blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ed3f5a01919bae2b65fd2af71bf60e498544062
|
342031941504a79471af32eeaacbc8d345eebca0
|
/Zadanie2/loty/models.py
|
40f8bcb4123db053bead55fbd3a28bdefefe93c7
|
[] |
no_license
|
Kammsy/appWWW
|
8ba72043cbfbf299d42598567198ae94c50737ff
|
14bfbf5d9441ae1a889619a352f4ca1c6c69edd6
|
refs/heads/master
| 2020-03-27T22:48:09.911652
| 2018-09-04T17:46:58
| 2018-09-04T17:46:58
| 147,261,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
#coding=utf-8
from django.db import models
class Pasazer(models.Model):
imie = models.CharField(max_length=42)
nazwisko = models.CharField(max_length=42)
class Meta:
unique_together = (('imie', 'nazwisko'),)
class Samolot(models.Model):
znaki_rejestracyjne = models.CharField(max_length=50, unique=True)
liczba_miejsc = models.IntegerField()
class Lot(models.Model):
lotnisko_startu = models.CharField(max_length=100)
czas_startu = models.DateTimeField()
lotnisko_ladowania = models.CharField(max_length=100)
czas_ladowania = models.DateTimeField()
samolot = models.ForeignKey(Samolot, on_delete=models.CASCADE)
pasazerowie = models.ManyToManyField(Pasazer)
|
[
"sz383558@students.mimuw.edu.pl"
] |
sz383558@students.mimuw.edu.pl
|
13a0dc94977bf3bd54cb59287a041c7289d7e809
|
37585359073b3f2a6fe1d7fe9b10dfa4486bc02e
|
/djeer/urls.py
|
0ad86affc913a58e6ff776c57274fe5fb6cf5e43
|
[] |
no_license
|
rubyhome-solutions/djeer
|
9aad145200de05bc23838029781745744ec51938
|
c745b7f0faf89d3240bd45ce8545658e28dd315b
|
refs/heads/master
| 2021-06-11T09:53:32.665115
| 2017-02-26T00:04:23
| 2017-02-26T00:04:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
from django.conf.urls import url, include
from djeer_auth import views
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', views.main_page, name="main"),
url(r'^lostpet_admin_login/$', views.login, name="login"),
url(r'^logout/$', views.logout, name="logout"),
url(r'^signup/(?P<person>[0-9a-z]+)$', views.signup, name="signup"),
url(r'^signup_djeer/(?P<person>[0-9a-z]+)$', views.signup_djeer, name="signup_djeer"),
url(r'^auth/', include('djeer_auth.urls')),
url(r'^profile/', include('profile.urls')),
url(r'^events/', include('events.urls')),
url(r'^search/', include('search.urls')),
url(r'^chat/', include('message.urls')),
url(r'^booking/', include('bookings.urls')),
url(r'^help/', include('help.urls')),
url(r'^djeer/index/$', views.index_page, name="index"),
url(r'^accounts/', include('allauth.urls')),
url(r'^tinymce/$', include('tinymce.urls')),
]
|
[
"coralisland8327@gmail.com"
] |
coralisland8327@gmail.com
|
97f69ec49c421509df9b9ef2b9c6785bdb0dafc5
|
9ee327caec1165ff7c70ddb2d792388e5b6be3b5
|
/src/utils.py
|
3f6760f2bbaa8f8ed1b93a1ca48119143e7a1ec2
|
[] |
no_license
|
nicktao9/AgriculturalDiseaseClassification
|
62a0f5c1b8301c431e6c4435abcb4dda0897210b
|
f505aad04b7d421bbb2d2c91f75e02813a2f8dc7
|
refs/heads/master
| 2020-04-06T18:00:42.357728
| 2018-11-15T09:28:42
| 2018-11-15T09:28:42
| 157,681,631
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,289
|
py
|
import sys, os, time
import visdom
import time,glob
import numpy as np
import cv2,json,shutil
import logging
from tqdm import tqdm
""" Create a new dir """
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
""" Visuallizer Module """
class Visualizer(object):
"""
封装了visdom的基本操作,但是你仍然可以通过`self.vis.function`
调用原生的visdom接口
"""
def __init__(self, env='default', **kwargs):
self.vis = visdom.Visdom(env=env, **kwargs)
# 画的第几个数,相当于横座标
# 保存(’loss',23) 即loss的第23个点
self.index = {}
self.log_text = ''
def reinit(self, env='default', **kwargs):
"""
修改visdom的配置
"""
self.vis = visdom.Visdom(env=env, **kwargs)
return self
def plot_many(self, d):
"""
一次plot多个
@params d: dict (name,value) i.e. ('loss',0.11)
"""
for k, v in d.items():
self.plot(k, v)
def img_many(self, d):
for k, v in d.items():
self.img(k, v)
def plot(self, name, y, **kwargs):
"""
self.plot('loss',1.00)
"""
x = self.index.get(name, 0)
self.vis.line(Y=np.array([y]), X=np.array([x]),
win=name,
opts=dict(title=name),
update=None if x == 0 else 'append',
**kwargs
)
self.index[name] = x + 1
def img(self, name, img_, **kwargs):
"""
self.img('input_img',t.Tensor(64,64))
self.img('input_imgs',t.Tensor(3,64,64))
self.img('input_imgs',t.Tensor(100,1,64,64))
self.img('input_imgs',t.Tensor(100,3,64,64),nrows=10)
!!!don‘t ~~self.img('input_imgs',t.Tensor(100,64,64),nrows=10)~~!!!
"""
self.vis.images(img_.cpu().numpy(),
win=name,
opts=dict(title=name),
**kwargs
)
def log(self, info, win='log_text'):
"""
self.log({'loss':1,'lr':0.0001})
"""
self.log_text += ('[{time}] {info} <br>'.format(
time=time.strftime('%m%d_%H%M%S'),
info=info))
self.vis.text(self.log_text, win)
def __getattr__(self, name):
return getattr(self.vis, name)
""" log module """
class log(object):
"""
记录日志
log = log()
log.printf("This is a good start {}".format(1))
"""
def __init__(self,
level = logging.DEBUG,
format1 = '%(asctime)s %(filename)s : %(levelname)s %(message)s',
datefmt = '%Y-%m-%d %A %H:%M:%S',
filename = os.path.join("../Result/","log.txt"),
filemode = 'w'):
logging.basicConfig(
level= level, # 定义输出到文件的log级别,大于此级别的都被输出
format= format1, # 定义输出log的格式
datefmt= datefmt, # 时间
filename= filename, # log文件名
filemode=filemode) # 写入模式“w”或“a”
def printf(self,str):
logging.info(str)
def img2classfication(input_json_path,input_file_path,outputs_folders_path):
"""put the picture of json file in the folders of corresponding label
Args:
input_json_path :origion json path
input_file_path :all images folder
outputs_folders_path:outputs path of file
Returns:
different label folders in outputs_folders_path
"""
with open(input_json_path,'r') as f:
data_dict = json.load(f)
with tqdm(total = len(data_dict),unit= 'pic') as pbar:
for data in data_dict:
data_name = data['image_id']
data_label = data['disease_class']
create_folder(outputs_folders_path +"/"+str(data_label))
shutil.copy(input_file_path + "/" + data_name,outputs_folders_path + "/" + str(data_label) +"/" + data_name)
pbar.update(1)
if __name__ == "__main__":
img2classfication("../../datasets/ai_challenge/val_set/val_annotations.json","../../datasets/ai_challenge/val_set/images/","../../datasets/ai_challenge/new_val_set/")
|
[
"taolianjie007@163.com"
] |
taolianjie007@163.com
|
2358001a8b8e2de4a23fb90ca93ca55a3ac626f2
|
a2a9260526e0c2a20cb08bcad693d8bddb47d420
|
/lib/python2.7/site-packages/allauth/socialaccount/providers/facebook/tests.py
|
854d1c9745eeae47e94d54a3ac1351b41485b93a
|
[] |
no_license
|
shubh3794/Processing-Payment
|
42df51fb0e582d573fbfd030125f27b8c4464263
|
81c0223889a450f4d023d91eb2890e385afd198b
|
refs/heads/master
| 2021-01-10T03:50:38.727002
| 2015-11-25T20:56:41
| 2015-11-25T20:56:41
| 46,883,262
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,194
|
py
|
import json
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.test.client import RequestFactory
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse, patch
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount import providers
from allauth.socialaccount.providers import registry
from allauth.account import app_settings as account_settings
from allauth.account.models import EmailAddress
from allauth.utils import get_user_model
from .provider import FacebookProvider
@override_settings(
SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None,
LOGIN_REDIRECT_URL='/accounts/profile/',
ACCOUNT_EMAIL_VERIFICATION=account_settings
.EmailVerificationMethod.NONE,
SOCIALACCOUNT_PROVIDERS={
'facebook': {
'AUTH_PARAMS': {},
'VERIFIED_EMAIL': False}})
class FacebookTests(create_oauth2_tests(registry.by_id(FacebookProvider.id))):
facebook_data = """
{
"id": "630595557",
"name": "Raymond Penners",
"first_name": "Raymond",
"last_name": "Penners",
"email": "raymond.penners@gmail.com",
"link": "https://www.facebook.com/raymond.penners",
"username": "raymond.penners",
"birthday": "07/17/1973",
"work": [
{
"employer": {
"id": "204953799537777",
"name": "IntenCT"
}
}
],
"timezone": 1,
"locale": "nl_NL",
"verified": true,
"updated_time": "2012-11-30T20:40:33+0000"
}"""
def get_mocked_response(self, data=None):
if data is None:
data = self.facebook_data
return MockedResponse(200, data)
def test_username_conflict(self):
User = get_user_model()
User.objects.create(username='raymond.penners')
self.login(self.get_mocked_response())
socialaccount = SocialAccount.objects.get(uid='630595557')
self.assertEqual(socialaccount.user.username, 'raymond')
def test_username_based_on_provider(self):
self.login(self.get_mocked_response())
socialaccount = SocialAccount.objects.get(uid='630595557')
self.assertEqual(socialaccount.user.username, 'raymond.penners')
def test_username_based_on_provider_with_simple_name(self):
data = '{"id": "1234567", "name": "Harvey McGillicuddy"}'
self.login(self.get_mocked_response(data=data))
socialaccount = SocialAccount.objects.get(uid='1234567')
self.assertEqual(socialaccount.user.username, 'harvey')
def test_media_js(self):
provider = providers.registry.by_id(FacebookProvider.id)
request = RequestFactory().get(reverse('account_login'))
request.session = {}
script = provider.media_js(request)
self.assertTrue('"appId": "app123id"' in script)
def test_login_by_token(self):
resp = self.client.get(reverse('account_login'))
with patch('allauth.socialaccount.providers.facebook.views'
'.requests') as requests_mock:
mocks = [self.get_mocked_response().json()]
requests_mock.get.return_value.json \
= lambda: mocks.pop()
resp = self.client.post(reverse('facebook_login_by_token'),
data={'access_token': 'dummy'})
self.assertRedirects(resp, 'http://testserver/accounts/profile/',
fetch_redirect_response=False)
@override_settings(
SOCIALACCOUNT_PROVIDERS={
'facebook': {
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
'VERIFIED_EMAIL': False}})
def test_login_by_token_reauthenticate(self):
resp = self.client.get(reverse('account_login'))
nonce = json.loads(resp.context['fb_data'])['loginOptions']['auth_nonce']
with patch('allauth.socialaccount.providers.facebook.views'
'.requests') as requests_mock:
mocks = [self.get_mocked_response().json(),
{'auth_nonce': nonce}]
requests_mock.get.return_value.json \
= lambda: mocks.pop()
resp = self.client.post(reverse('facebook_login_by_token'),
data={'access_token': 'dummy'})
self.assertRedirects(resp, 'http://testserver/accounts/profile/',
fetch_redirect_response=False)
@override_settings(
SOCIALACCOUNT_PROVIDERS={
'facebook': {
'VERIFIED_EMAIL': True}})
def test_login_verified(self):
emailaddress = self._login_verified()
self.assertTrue(emailaddress.verified)
def test_login_unverified(self):
emailaddress = self._login_verified()
self.assertFalse(emailaddress.verified)
def _login_verified(self):
resp = self.login(self.get_mocked_response())
return EmailAddress.objects.get(email='raymond.penners@gmail.com')
|
[
"shubh.aggarwal.37@gmail.com"
] |
shubh.aggarwal.37@gmail.com
|
4a47b93956005fbcfc0bdd1b548ae397f80962e4
|
ac255a4ddae0644b7599531b69d5cd34c855358c
|
/yt-corey-schafer/2-strings.py
|
193a1f3e3c2c590b987bf78bde4c03a746338470
|
[] |
no_license
|
mickosav/python
|
23bc992a83324395f88b8e6a6cf7d6a9f42c9f1e
|
16c381c9edcdcb29f835d02eac8e15d84c186fa9
|
refs/heads/master
| 2020-08-25T09:09:33.938495
| 2020-03-10T12:21:06
| 2020-03-10T12:21:06
| 216,989,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
message = 'Hello Micko'
message_multiline = '''bla bla blalalll
hello lsakdjfl '''
print(message)
print(message_multiline)
print(len(message)) # length
print(type(message)) # -> <class 'str'>
print(message[0])
# slicing:
print(message[0:5]) # from (including) - to (excluding)
print(message[:5]) # from start
print(message[6:]) # from 6 to the end
# string methods
print(message.lower())
print(message.upper())
print(message.count('Micko'))
print(message.find('Hello'))
print(message.find('asdfasdfasdf')) # -> -1
message_pera = message.replace('Micko', 'Pera') # replace doesn't mutate state - it returnes new string
print(message_pera)
# concatenation
gretting = 'Hello'
name = 'Mickon'
print(gretting + ', ' + name + '. Welcome!')
print('{}, {}. Welcome!'.format(gretting, name))
# f-strings (available from v3.6)
print(f'{gretting}, {name.upper()}. Welcome!')
|
[
"mickosavovic@gmail.com"
] |
mickosavovic@gmail.com
|
e604b54ca604aa75d3c6c934157d6ccf8bcba3f2
|
85ce6f5af5ecbaa3b3f1455cfd91c672bdec595b
|
/Foundational_data_structures/Array.py
|
bdfd9c4473e1c60413895684516729d16afede6f
|
[] |
no_license
|
c1lo/DataStructures_Algorithms
|
26eeee2e0d9feae75a38a16145e89e1713773ca1
|
e62fb71884791682dd8677ee2ac8aebe58ea87b0
|
refs/heads/master
| 2021-01-25T07:18:51.670186
| 2014-09-04T15:53:02
| 2014-09-04T15:53:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
__author__ = 'xiyilo'
class Array(object):
"""
extending Python lists - an array class
"""
def __init__(self, length=0, baseIndex=0):
assert length >= 0
self._data = [None for i in xrange(length)]
self._baseIndex = baseIndex
def __copy__(self):
result = Array(len(self._data))
for i, datum in enumerate(self._data):
result._data[i] = datum
result._baseIndex = self._baseIndex
return result
def getOffset(self, index):
offset = index - self._baseIndex
if offset < 0 or offset >= len(self._data):
raise IndexError
return offset
def __getitem__(self, item):
return self._data[self.getOffset(item)]
def __setitem__(self, key, value):
self._data[self.getOffset(key)] = value
# Array Properties
def getData(self):
return self._data
data = property(
fget=lambda self: self.getData()
)
def getBaseIndex(self):
return self._baseIndex
def setBaseIndex(self, baseIndex):
self._baseIndex = baseIndex
baseIndex = property(
fget=lambda self: self.getBaseIndex(),
fset=lambda self, value: self.setBaseIndex(value)
)
# Resizing an array
def __len__(self):
return len(self._data)
def setLength(self, value):
if len(self._data) != value:
newData = [None for i in xrange(value)]
m = min(len(self._data), value)
for i in xrange(m):
newData[i] = self._data[i]
self._data = newData
length = property(
fget=lambda self: self.__len__(),
fset=lambda self, value: self.setLength(value)
)
if __name__=='__main__':
from copy import copy
a = Array(5)
b = copy(a)
print a
print a[0]
print id(a)
print(b)
print id(b)
|
[
"xiyigm@gmail.com"
] |
xiyigm@gmail.com
|
cde76863a99e655e46b43112532dd7da3bcc13d4
|
1bde0c807f17fc431b04b4b9cb338ee3acd34b7d
|
/.history/predict_20210713124241.py
|
7c01488a87ec48a2cd94cb2965aba58fb29d0d56
|
[] |
no_license
|
Harrysibbenga/Pytorch-NLP
|
cf9d7e6376d5e19929e6703c3342c81c1a128be1
|
6f22f6ac5f2bf37f27ed2d6285f3a154eda4b566
|
refs/heads/main
| 2023-06-19T22:43:35.513874
| 2021-07-14T19:45:15
| 2021-07-14T19:45:15
| 385,595,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
from model import *
from data import *
import sys
rnn = torch.load('char-rnn-classification.pt')
# Just return an output given a line
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
def predict(line, n_predictions=3):
output = evaluate(Variable(lineToTensor(line)))
# Get top N categories
topv, topi = output.data.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i]
category_index = topi[0][i]
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
return predictions
if __name__ == '__main__':
predict(sys.argv[1])
|
[
"sibbengaharry@gmail.com"
] |
sibbengaharry@gmail.com
|
60c9db8974c4fcdaf3f0fc22cf0b0a1ad6083ca1
|
5095a2cbc3fea5b63b6f3cabf4ae1bd930cdb479
|
/영동/16_숨바꼭질.py
|
68332c98c92b749a3fb174e52d26d5d881b07e15
|
[] |
no_license
|
syeeuns/week03
|
a198150d94caf772d6421b4adf6d8e28793853db
|
cf40b994fa285800854bac07b7ef86ad5dbdf35a
|
refs/heads/master
| 2023-02-06T13:13:11.012967
| 2020-12-31T04:14:10
| 2020-12-31T04:14:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
from collections import deque
N,K = map(int, input().split())
Max=10**5+1
queue=deque([N])
D=[[-1]*2 for _ in range(Max)]
D[N][0]=0
T=[K]
z=K
while queue:
v = queue.popleft()
for newv in [v-1,v+1,v*2]:
if 0<= newv < Max and D[newv][0]==-1:
queue.append(newv)
D[newv][0]=D[v][0]+1
D[newv][1]=v
while D[z][1]!=-1:
T.append(D[z][1])
z=D[z][1]
print(D[K][0])
print(*reversed(T))
|
[
"zeroistfilm@naver.com"
] |
zeroistfilm@naver.com
|
a1a6c80785ed6ba981a14c5ae605f4d0ebb50942
|
916482a48a92247a3735e11ebb60448e1d90453f
|
/YellowFin-master/parsing/nbest_parse.py
|
601548575904ea662212b964829ee9c430001700
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
w-zm/DeepAndCrossNetwork-TF
|
bb32c5cc2ad7e26ca29dbc9213ebaa4e156075c6
|
f2e117b7167dd797b3470f2c2a7db660a9cb4964
|
refs/heads/master
| 2020-05-31T17:38:47.522642
| 2019-06-12T02:26:00
| 2019-06-12T02:26:00
| 190,413,924
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
from __future__ import print_function
import fileinput
from bllipparser import RerankingParser, Tree
if __name__ == '__main__':
rrp = RerankingParser()
parser = 'wsj/WSJ-PTB3/parser'
rrp.load_parser_model(parser)
for line in fileinput.input():
tokens = Tree(line).tokens()
nbest = rrp.parse(tokens)
print(len(nbest))
for tree in nbest:
print(tree.ptb_parse)
|
[
"354242964@qq.com"
] |
354242964@qq.com
|
56a5981bb2b820c0c1889dba526616846c4b1fb6
|
2afb28eabc992f4f3fec48f0ec91be7459a51a31
|
/flask/models/knn.py
|
9a39613331e9a5ae7ecbbda59501f4ef0e85e8c7
|
[
"MIT"
] |
permissive
|
LanceZhu/deep-learning
|
d3088d40d4c7f04ee82d23c339d46ac69a222bab
|
a1d8554397844ab097e1ae89686acb6d4dcc524d
|
refs/heads/master
| 2023-02-04T10:14:26.634857
| 2020-12-24T06:57:52
| 2020-12-24T06:57:52
| 315,268,712
| 0
| 0
|
MIT
| 2020-12-05T14:28:49
| 2020-11-23T09:48:57
|
Python
|
UTF-8
|
Python
| false
| false
| 574
|
py
|
# -*- coding: utf-8 -*-
import pickle
from torchvision import transforms
input = open('models/state/knn.pkl', 'rb')
clf = pickle.load(input)
input.close()
def transfrom_image(image):
transform = transforms.Compose([
# transforms.Resize((28, 28)),
# transforms.CenterCrop(28),
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,)),
])
return transform(image).unsqueeze(0)
def predict(image):
tensor = transfrom_image(image)
tensor = tensor.view(-1 ,28*28)
data = tensor.numpy()
result = clf.predict(data)
return float(result[0])
|
[
"2937443168@qq.com"
] |
2937443168@qq.com
|
43d4d44a3129344e934b24556ad12f994123e98b
|
114b2f71e553abc33f7774de00391488c74b1563
|
/websocket/tests.py
|
e6715e95a399eef9667284201cceb8de914e38ce
|
[] |
no_license
|
279zlj/Autotest_project
|
06589305df696b21e05a4f521a56c8170ec7b327
|
60576bfe56d9a4e76a590bfa3176e196d06dbeed
|
refs/heads/master
| 2022-12-14T03:42:49.933047
| 2018-08-28T03:15:53
| 2018-08-28T03:15:53
| 146,384,029
| 0
| 0
| null | 2022-12-08T02:23:10
| 2018-08-28T03:00:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,608
|
py
|
from django.test import TestCase
from client.c_client import Client
# Create your tests here.
import json
import sqlite3
import time
# def save_data(data_response):
# con = sqlite3.connect("D:\Autotestproject\Autotest_project\db.sqlite3")
# cur = con.cursor()
# cur.execute('insert into \
# auto_config_infos \
# (server_num,board,cpu,fc_card,gpu,hard_disk,hba,inspect_time,memory_bank,net_card,raid,status,vga) \
# values\
# ({},{},{},{},{},{},{},{},{},{},{},{},{})'.format(data_response["server_num"], data_response["board"],
# data_response["cpu"], data_response["fc_card"],
# data_response["gpu"], data_response["hard_disk"],
# data_response["hba"], data_response["inspect_time"],
# data_response["memory_bank"], data_response["net_card"], data_response["raid"],
# data_response["status"], data_response["vga"]))
# cur.execute('select * from auto_config_infos')
# print(cur.fetchall())
# con.commit()
# cur.close()
# d = {"raid": "", "fc_card": "", "status": 1, "checkstate": 0, "server_num": "23dfgtr", "ip_info": "127.0.0.1",
# "state": "success", "cpu": "CpuNum:1/CpuCore:4/CpuInfo:8Intel(R)Xeon(R)CPUE3-1230v3@3.30GHz",
# "inspect_time": 1534484708.3386865, "vga": "Graphics:LeadTekResearchInc.GK107[GeForceGT640][107d:2737]\r\n",
# "memory_bank": "MemInfo:*ASRock*Intel*Transcend/MemSize:4096MB/MemFreq:1600MT/s", "gpu": "", "hba": "",
# "hard_disk": "DiskInfo:WDCWD10EURX-73C/DiskNum:1/DiskVer:/01.01A01",
# "net_card": "NetBoardInfo:IntelCorporationEthernetConnection(2)I218-V",
# "board": "BoardName:Z97Pro3/BisoVersion:P1.20"}
d = {"board": "BoardName:Z97Pro3/BisoVersion:P1.20", "hba": "",
"net_card": "NetBoardInfo:IntelCorporationEthernetConnection(2)I218-V", "raid": "dash_raid",
"vga": "Graphics:LeadTekResearchInc.GK107[GeForceGT640][107d:2737]\r\n", "ip_info": "127.0.0.1", "checkstate": 0,
"memory_bank": "MemInfo:*ASRock*Intel*Transcend/MemSize:4096MB/MemFreq:1600MT/s", "state": "success",
"hard_disk": "DiskInfo:WDCWD10EURX-73C/DiskNum:1/DiskVer:/01.01A01",
"cpu": "CpuNum:1/CpuCore:4/CpuInfo:8Intel(R)Xeon(R)CPUE3-1230v3@3.30GHz", "gpu": "", "status": 1,
"inspect_time": 1534486359.491127, "server_num": "23dfgtr", "fc_card": ""}
def save_data(data_response):
print(data_response)
con = sqlite3.connect("D:\Autotestproject\Autotest_project\db.sqlite3")
cur = con.cursor()
params = [data_response["server_num"], data_response["board"], data_response["cpu"], data_response["memory_bank"],
data_response["hard_disk"], data_response["raid"], data_response["vga"], data_response["gpu"],
data_response["hba"], data_response["net_card"], data_response["fc_card"], data_response["inspect_time"],
data_response["ip_info"], data_response["checkstate"], data_response["state"], data_response["status"]]
cur.execute('insert into auto_config_infos \
(server_num,board,cpu,memory_bank,hard_disk,raid,vga,gpu,hba,net_card,fc_card,inspect_time,ip_info,checkstate,state,status) \
values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', params)
con.commit()
cur.execute('select * from auto_config_infos')
cur.close()
save_data(d)
|
[
"13414101644@163.com"
] |
13414101644@163.com
|
613e5477e30db6c4b12634643777cb4c219c3749
|
8cc910f12ccacd2926a1b26a4f2e10d107a99c8b
|
/bots/tejbot1.py
|
87bc56f743d27dc573f4d13f7273138df56bbeb2
|
[] |
no_license
|
gkanwar/pokerbots2018
|
a25f8e9e0e3eceb093363a5fbfee40d3fdc2d671
|
ce7632c8cd98d1e2971b5e67d6ba2c63daa3c396
|
refs/heads/master
| 2021-06-03T09:54:59.220651
| 2021-01-23T06:05:02
| 2021-01-23T06:05:27
| 136,264,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,785
|
py
|
import sys
import random
card_map = {'J': 0, 'Q': 1, 'K': 2, 'A': 3}
# simple probs for bets
pbet = [0.0, 0.0, 0.5, 1.0]
def get_msg():
return sys.stdin.readline().strip()
def get_msg_named(name):
msg = get_msg()
assert msg == name
def get_field():
return get_msg().split(': ')
def get_field_named(name):
field,val = get_field()
assert field == name
return val
def get_init_hand():
hand = int(get_field_named('Hand'))
card = card_map[get_field_named('Cards')]
return hand,card
def get_action():
act,val = get_field_named('Action').split()
val = int(val)
assert act in ("PASS", "BET", "FOLD", "BLIND")
return act,val
def get_play_actions():
acts = []
for i in xrange(3):
acts.append(get_action())
return acts
def get_showdown():
return get_field_named('Showdown').split(',')
def get_pots():
out = []
pots = get_field_named('Pots').split()
for pot in pots:
val,winner = map(int, pot.split(','))
out.append((val,winner))
return out
def get_init_round():
money = map(int, get_field_named('Money').split(','))
blinds = map(int, get_field_named('Blinds').split(','))
button = int(get_field_named('Button'))
end_prob = map(int, get_field_named('EndProb').split(','))
return money,blinds,button,end_prob
def get_end_actions():
return [get_field_named('EndAction') for i in xrange(3)]
def get_bankrolls():
bs = get_field_named('Bankrolls').split(',')
return map(int, bs)
def get_num_hands():
return int(get_field_named('NumHands'))
def with_prob(p):
return random.random() < p
def update_wagered(acts, wagered):
for i in xrange(len(wagered)):
act,val = acts[i]
if act == "BET": wagered[i] = val
def get_call_amt(acts):
amt = 0
for act,val in acts:
if act == "BET":
amt = max(amt, val)
return amt
def fl(): sys.stdout.flush()
### Start brain
get_msg_named('init_round')
money,blinds,button,end_prob = get_init_round()
print "READY"
fl()
while True:
msg = get_msg()
if msg == 'end_round': break
assert msg == 'init_hand'
# Init hand
folded = False
hand,card = get_init_hand()
wagered = [0]*3
for i in xrange(len(blinds)):
pos = (i + button) % 3
wagered[pos] = blinds[i]
cur_bet = wagered[0]
print "READY"
fl()
# First play
get_msg_named('play')
acts = get_play_actions()
update_wagered(acts, wagered)
call_amt = max(wagered)
if with_prob(pbet[card]):
cur_bet = 2
print "BET", cur_bet
elif cur_bet == call_amt: # always check
print "BET", cur_bet
else:
folded = True
print "FOLD", cur_bet
fl()
msg = get_msg()
if msg == "play":
acts = get_play_actions()
update_wagered(acts, wagered)
call_amt = max(wagered)
if call_amt == cur_bet: # check
print "BET", cur_bet
elif not folded: # we were in a check/fold, so fold
print "FOLD", cur_bet
else: # already folded
print "PASS", cur_bet
fl()
get_msg_named('end_hand')
# End hand
acts = get_play_actions()
update_wagered(acts, wagered)
for i in xrange(len(wagered)):
money[i] -= wagered[i]
get_showdown()
pots = get_pots()
for val,winner in pots:
money[winner] += val
print "OK"
fl()
get_end_actions() # assume no rebuy for now
print "Money:", ",".join(map(str, money))
fl()
# End round
bs = get_bankrolls()
num_hands = get_num_hands()
print >>sys.stderr, "Final bankrolls:", ",".join(map(str, bs))
print >>sys.stderr, "Total hands played:", num_hands
# gotta be polite
print "Thank you dealer, have a nice day!"
|
[
"gurtej@mit.edu"
] |
gurtej@mit.edu
|
7c06a594c1fc00a552171de2cf59119db21e4ad4
|
0fa4657ec9211364c7a8429a24ce7a18d5e4549e
|
/FRNN_pytorch_TTLSTM_noiseaware/loader.py
|
4bffc012284d422f2d66b3cd5126071d9db859d4
|
[] |
no_license
|
ge-dong/plasma-python
|
beefbf92fd02f3c255357f1a80b832d940bf640e
|
101d810988b39979418fb3dc5973fb4c273313c8
|
refs/heads/master
| 2021-08-20T05:04:18.185294
| 2021-07-14T16:29:02
| 2021-07-14T16:29:02
| 170,406,172
| 1
| 0
| null | 2019-02-12T23:15:43
| 2019-02-12T23:15:43
| null |
UTF-8
|
Python
| false
| false
| 36,776
|
py
|
'''
#########################################################
This file containts classes to handle data processing
Author: Julian Kates-Harbeck, jkatesharbeck@g.harvard.edu
This work was supported by the DOE CSGF program.
#########################################################
'''
from __future__ import print_function, division
import numpy as np
from shots import Shot
import multiprocessing as mp
# import pdb
import sys
class Loader(object):
'''A Python class to ...
The length of shots in e.g. JET data varies by orders of magnitude. For
data parallel synchronous training it is essential that amounds of train
data passed to the model replica is about the same size. Therefore, a
patching technique is introduced.
A patch is a subset of shot's time/signal profile having a fixed length,
equal among all patches. Patch size is approximately equal to the minimum
shot length. More precisely: it is equal to the max(1,
min_len//rnn_length)*rnn_length - the largest number less or equal to the
minimum shot length divisible by the LSTM model length. If minimum shot
length is less than the rnn_length, then the patch length is equal to the
rnn_length
'''
def __init__(self, conf, normalizer=None):
self.conf = conf
self.stateful = conf['model']['stateful']
self.normalizer = normalizer
self.verbose = True
def set_inference_mode(self, val):
self.normalizer.set_inference_mode(val)
def training_batch_generator(self, shot_list):
"""The method implements a training batch generator as a Python
generator with a while-loop. It iterates indefinitely over the
data set and returns one mini-batch of data at a time.
NOTE: Can be inefficient during distributed training because one
process loading data will cause all other processes to stall.
Argument list:
- shot_list:
Returns:
- One mini-batch of data and label as a Numpy array: X[start:end],
y[start:end]
- reset_states_now: boolean flag indicating when to reset state
during stateful RNN training
- num_so_far,num_total: number of samples generated so far and the
total dataset size as per shot_list
"""
batch_size = self.conf['training']['batch_size']
num_at_once = self.conf['training']['num_shots_at_once']
epoch = 0
num_so_far = 0
while True:
# the list of all shots
shot_list.shuffle()
# split the list into equal-length sublists (random shots will be
# reused to make them equal length).
shot_sublists = shot_list.sublists(num_at_once, equal_size=True)
num_total = len(shot_list)
for (i, shot_sublist) in enumerate(shot_sublists):
# produce a list of equal-length chunks from this set of shots
X_list, y_list = self.load_as_X_y_list(shot_sublist)
# Each chunk will be a multiple of the batch size
for j, (X, y) in enumerate(zip(X_list, y_list)):
num_examples = X.shape[0]
assert(num_examples % batch_size == 0)
num_chunks = num_examples//batch_size
"""
The method produces batch-sized training data X and labels
y as Numpy arrays to feed during training.
Mini-batch dimensions are (num_examples, num_timesteps,
num_dimensions_of_data) also num_examples has to be
divisible by the batch_size. The i-th example and the
(batchsize + 1)-th example are consecutive in time, so we
do not reset the RNN internal state unless we start a new
chunk.
"""
for k in range(num_chunks):
# epoch_end = (i == len(shot_sublists) - 1 and j ==
# len(X_list) -1 and k == num_chunks - 1)
reset_states_now = (k == 0)
start = k*batch_size
end = (k + 1)*batch_size
num_so_far += 1.0 * \
len(shot_sublist)/(len(X_list)*num_chunks)
yield X[start:end], y[start:end], reset_states_now,
num_so_far, num_total
epoch += 1
def fill_training_buffer(self, Xbuff, Ybuff, end_indices, shot,
is_first_fill=False):
sig, res = self.get_signal_result_from_shot(shot)
length = self.conf['model']['length']
if is_first_fill: # cut signal to random position
cut_idx = np.random.randint(res.shape[0]-length+1)
sig = sig[cut_idx:]
res = res[cut_idx:]
sig_len = res.shape[0]
sig_len = (sig_len // length)*length # make divisible by lenth
assert(sig_len > 0)
batch_idx = np.where(end_indices == 0)[0][0]
if sig_len > Xbuff.shape[1]:
Xbuff = self.resize_buffer(Xbuff, sig_len+length)
Ybuff = self.resize_buffer(Ybuff, sig_len+length)
Xbuff[batch_idx, :sig_len, :] = sig[-sig_len:]
Ybuff[batch_idx, :sig_len, :] = res[-sig_len:]
end_indices[batch_idx] += sig_len
# print("Filling buffer at index {}".format(batch_idx))
return Xbuff, Ybuff, batch_idx
def return_from_training_buffer(self, Xbuff, Ybuff, end_indices):
length = self.conf['model']['length']
end_indices -= length
assert(np.all(end_indices >= 0))
X = 1.0*Xbuff[:, :length, :]
Y = 1.0*Ybuff[:, :length, :]
self.shift_buffer(Xbuff, length)
self.shift_buffer(Ybuff, length)
return X, Y
def shift_buffer(self, buff, length):
buff[:, :-length, :] = buff[:, length:, :]
def resize_buffer(self, buff, new_length, dtype=None):
if dtype is None:
dtype = self.conf['data']['floatx']
old_length = buff.shape[1]
batch_size = buff.shape[0]
num_signals = buff.shape[2]
new_buff = np.zeros((batch_size, new_length, num_signals), dtype=dtype)
new_buff[:, :old_length, :] = buff
# print("Resizing buffer to new length {}".format(new_length))
return new_buff
def inference_batch_generator_full_shot(self, shot_list):
"""
The method implements a training batch generator as a Python generator
with a while-loop.
It iterates indefinitely over the data set and returns one mini-batch
of data at a time.
NOTE: Can be inefficient during distributed training because one
process loading data will cause all other processes to stall.
Argument list:
- shot_list:
Returns:
- One mini-batch of data and label as a Numpy array:
X[start:end],y[start:end]
- reset_states_now: boolean flag indicating when to reset state
during stateful RNN training
- num_so_far,num_total: number of samples generated so far and
the total dataset size as per shot_list
"""
batch_size = self.conf['model']['pred_batch_size']
sig, res = self.get_signal_result_from_shot(shot_list.shots[0])
Xbuff = np.zeros((batch_size,) + sig.shape,
dtype=self.conf['data']['floatx'])
Ybuff = np.zeros((batch_size,) + res.shape,
dtype=self.conf['data']['floatx'])
Maskbuff = np.zeros((batch_size,) + res.shape,
dtype=self.conf['data']['floatx'])
disr = np.zeros(batch_size, dtype=bool)
lengths = np.zeros(batch_size, dtype=int)
# epoch = 0
num_total = len(shot_list)
num_so_far = 0
# returned = False
# num_steps = 0
batch_idx = 0
np.seterr(all='raise')
# warmup_steps = self.conf['training']['batch_generator_warmup_steps']
# is_warmup_period = num_steps < warmup_steps
# is_first_fill = num_steps < batch_size
while True:
# the list of all shots
# shot_list.shuffle()
for i in range(num_total):
shot = shot_list.shots[i]
sig, res = self.get_signal_result_from_shot(shot)
sig_len = res.shape[0]
if sig_len > Xbuff.shape[1]: # resize buffer if needed
old_len = Xbuff.shape[1]
Xbuff = self.resize_buffer(Xbuff, sig_len)
Ybuff = self.resize_buffer(Ybuff, sig_len)
Maskbuff = self.resize_buffer(Maskbuff, sig_len)
Maskbuff[:, old_len:, :] = 0.0
Xbuff[batch_idx, :, :] = 0.0
Ybuff[batch_idx, :, :] = 0.0
Maskbuff[batch_idx, :, :] = 0.0
Xbuff[batch_idx, :sig_len, :] = sig
Ybuff[batch_idx, :sig_len, :] = res
Maskbuff[batch_idx, :sig_len, :] = 1.0
disr[batch_idx] = shot.is_disruptive_shot()
lengths[batch_idx] = res.shape[0]
batch_idx += 1
if batch_idx == batch_size:
num_so_far += batch_size
x1 = 1.0*Xbuff
x2 = 1.0*Ybuff
x3 = 1.0*Maskbuff
x4 = disr & True
x5 = 1*lengths
yield x1, x2, x3, x4, x5, num_so_far, num_total
batch_idx = 0
def training_batch_generator_full_shot_partial_reset(self, shot_list):
"""
The method implements a training batch generator as a Python generator
with a while-loop. It iterates indefinitely over the data set and
returns one mini-batch of data at a time.
NOTE: Can be inefficient during distributed training because one
process loading data will cause all other processes to stall.
Argument list:
- shot_list:
Returns:
- One mini-batch of data and label as a Numpy array:
X[start:end],y[start:end]
- reset_states_now: boolean flag indicating when to reset state
during stateful RNN training
- num_so_far,num_total: number of samples generated so far and the
total dataset size as per shot_list
"""
batch_size = self.conf['training']['batch_size']
sig, res = self.get_signal_result_from_shot(shot_list.shots[0])
Xbuff = np.empty((batch_size,) + sig.shape,
dtype=self.conf['data']['floatx'])
Ybuff = np.empty((batch_size,) + res.shape,
dtype=self.conf['data']['floatx'])
Maskbuff = np.empty((batch_size,) + res.shape,
dtype=self.conf['data']['floatx'])
# epoch = 0
num_total = len(shot_list)
num_so_far = 0
batch_idx = 0
# warmup_steps = self.conf['training']['batch_generator_warmup_steps']
# is_warmup_period = num_steps < warmup_steps
# is_first_fill = num_steps < batch_size
while True:
# the list of all shots
shot_list.shuffle()
for i in range(num_total):
shot = self.sample_shot_from_list_given_index(shot_list, i)
sig, res = self.get_signal_result_from_shot(shot)
sig_len = res.shape[0]
if sig_len > Xbuff.shape[1]: # resize buffer if needed
old_len = Xbuff.shape[1]
Xbuff = self.resize_buffer(Xbuff, sig_len)
Ybuff = self.resize_buffer(Ybuff, sig_len)
Maskbuff = self.resize_buffer(Maskbuff, sig_len)
Maskbuff[:, old_len:, :] = 0.0
Xbuff[batch_idx, :, :] = 0.0
Ybuff[batch_idx, :, :] = 0.0
Maskbuff[batch_idx, :, :] = 0.0
Xbuff[batch_idx, :sig_len, :] = sig
Ybuff[batch_idx, :sig_len, :] = res
Maskbuff[batch_idx, :sig_len, :] = 1.0
batch_idx += 1
if batch_idx == batch_size:
num_so_far += batch_size
yield (1.0*Xbuff, 1.0*Ybuff, 1.0*Maskbuff, num_so_far,
num_total)
batch_idx = 0
def sample_shot_from_list_given_index(self, shot_list, i):
if self.conf['training']['ranking_difficulty_fac'] == 1.0:
if self.conf['data']['equalize_classes']:
shot = shot_list.sample_equal_classes()
else:
shot = shot_list.shots[i]
else: # draw the shot weighted
shot = shot_list.sample_weighted()
return shot
def training_batch_generator_partial_reset(self, shot_list):
"""
The method implements a training batch generator as a Python generator
with a while-loop. It iterates indefinitely over the data set and
returns one mini-batch of data at a time.
NOTE: Can be inefficient during distributed training because one
process loading data will cause all other processes to stall.
Argument list:
- shot_list:
Returns:
- One mini-batch of data and label as a Numpy array: X[start:end],
y[start:end]
- reset_states_now: boolean flag indicating when to reset state
during stateful RNN training
- num_so_far,num_total: number of samples generated so far and the
total dataset size as per shot_list
"""
batch_size = self.conf['training']['batch_size']
# length = self.conf['model']['length']
sig, res = self.get_signal_result_from_shot(shot_list.shots[0])
Xbuff = np.empty((batch_size,) + sig.shape,
dtype=self.conf['data']['floatx'])
Ybuff = np.empty((batch_size,) + res.shape,
dtype=self.conf['data']['floatx'])
end_indices = np.zeros(batch_size, dtype=np.int)
batches_to_reset = np.ones(batch_size, dtype=np.bool)
# epoch = 0
num_total = len(shot_list)
num_so_far = 0
returned = False
num_steps = 0
warmup_steps = self.conf['training']['batch_generator_warmup_steps']
is_warmup_period = num_steps < warmup_steps
is_first_fill = num_steps < batch_size
while True:
# the list of all shots
shot_list.shuffle()
for i in range(len(shot_list)):
if self.conf['training']['ranking_difficulty_fac'] == 1.0:
if self.conf['data']['equalize_classes']:
shot = shot_list.sample_equal_classes()
else:
# TODO(KGF): test my merge of the "jdev" branch for the
# following line into this set of conditionals
shot = self.sample_shot_from_list_given_index(
shot_list, i)
# shot = shot_list.shots[i]
else: # draw the shot weighted
shot = shot_list.sample_weighted()
while not np.any(end_indices == 0):
X, Y = self.return_from_training_buffer(
Xbuff, Ybuff, end_indices)
yield (X, Y, batches_to_reset, num_so_far, num_total,
is_warmup_period)
returned = True
num_steps += 1
is_warmup_period = num_steps < warmup_steps
is_first_fill = num_steps < batch_size
batches_to_reset[:] = False
Xbuff, Ybuff, batch_idx = self.fill_training_buffer(
Xbuff, Ybuff, end_indices, shot, is_first_fill)
batches_to_reset[batch_idx] = True
if returned and not is_warmup_period:
num_so_far += 1
# epoch += 1
def fill_batch_queue(self, shot_list, queue):
print("Starting thread to fill queue")
gen = self.training_batch_generator_partial_reset(shot_list)
while True:
ret = next(gen)
queue.put(ret, block=True, timeout=-1)
def training_batch_generator_process(self, shot_list):
queue = mp.Queue()
proc = mp.Process(target=self.fill_batch_queue,
args=(shot_list, queue))
proc.start()
while True:
yield queue.get(True)
proc.join()
queue.close()
def load_as_X_y_list(self, shot_list, verbose=False,
prediction_mode=False):
"""
The method turns a ShotList into a set of equal-sized patches which
contain a number of examples that is a multiple of the batch size.
Initially, shots are "light" meaning signal amd disruption related
attributes are not filled.
By invoking Loader.get_signals_results_from_shotlist the
shot information is filled and stored in the object in memory. Next,
patches are made, finally patches are arranged into batch input shape
expected by RNN model.
Performs calls to: get_signals_results_from_shotlist, make_patches,
arange_patches
Argument list:
- shot_list: a ShotList
- verbose: TO BE DEPRECATED, self.verbose data member is used instead
- prediction_mode: unused
Returns:
- X_list,y_list: lists of Numpy arrays of batch input shape
"""
# TODO(KGF): check tuple unpack
(signals, results,
total_length) = self.get_signals_results_from_shotlist(shot_list)
sig_patches, res_patches = self.make_patches(signals, results)
X_list, y_list = self.arange_patches(sig_patches, res_patches)
effective_length = len(res_patches)*len(res_patches[0])
if self.verbose:
print('multiplication factor: {}'.format(
1.0 * effective_length / total_length))
print('effective/total length : {}/{}'.format(
effective_length, total_length))
print('patch length: {} num patches: {}'.format(
len(res_patches[0]), len(res_patches)))
return X_list, y_list
def load_as_X_y_pred(self, shot_list, verbose=False,
custom_batch_size=None):
(signals, results, shot_lengths,
disruptive) = self.get_signals_results_from_shotlist(
shot_list, prediction_mode=True)
sig_patches, res_patches = self.make_prediction_patches(signals,
results)
X, y = self.arange_patches_single(sig_patches, res_patches,
prediction_mode=True,
custom_batch_size=custom_batch_size)
return X, y, shot_lengths, disruptive
def get_signals_results_from_shotlist(self, shot_list,
prediction_mode=False):
prepath = self.conf['paths']['processed_prepath']
use_signals = self.conf['paths']['use_signals']
signals = []
results = []
disruptive = []
shot_lengths = []
total_length = 0
for shot in shot_list:
assert(isinstance(shot, Shot))
assert(shot.valid)
shot.restore(prepath)
if self.normalizer is not None:
self.normalizer.apply(shot)
else:
print('Warning, no normalization. ',
'Training data may be poorly conditioned')
if self.conf['training']['use_mock_data']:
signal, ttd = self.get_mock_data()
try:
target_description=self.conf['training']['target_description']
except:
# print(sys.exc_info())
target_description=['Locked mode amplitude']
try:
predict_time=self.conf['training']['predict_time']
except:
# print(sys.exc_info())
predict_time=30
try:
predict_mode=self.conf['training']['predict_mode']
# ttd, signal = shot.get_data_arrays_lmtarget(
# use_signals, self.conf['data']['floatx'],predict_time=predict_time,predict_mode=predict_mode,target_description=target_description)
except:
# print(sys.exc_info())
# predict_mode='shift_target'
ttd, signal = shot.get_data_arrays(
use_signals, self.conf['data']['floatx'])
# if len(ttd) < self.conf['model']['length']:
# print(ttd)
# print(shot)
# print(shot.number)
total_length += len(ttd)
signals.append(signal)
shot_lengths.append(len(ttd))
disruptive.append(shot.is_disruptive)
if len(ttd.shape) == 1:
results.append(np.expand_dims(ttd, axis=1))
else:
results.append(ttd)
shot.make_light()
if not prediction_mode:
return signals, results, total_length
else:
return signals, results, shot_lengths, disruptive
def get_signal_result_from_shot(self, shot, prediction_mode=False):
prepath = self.conf['paths']['processed_prepath']
use_signals = self.conf['paths']['use_signals']
assert(isinstance(shot, Shot))
assert(shot.valid)
shot.restore(prepath)
if self.normalizer is not None:
self.normalizer.apply(shot)
else:
print('Warning, no normalization. ',
'Training data may be poorly conditioned')
if self.conf['training']['use_mock_data']:
signal, ttd = self.get_mock_data()
try:
target_description=self.conf['training']['target_description']
except:
#print(sys.exc_info())
target_description='Locked mode amplitude'
try:
predict_time=self.conf['training']['predict_time']
except:
#print(sys.exc_info())
predict_time=30
try:
predict_mode=self.conf['training']['predict_mode']
# ttd, signal = shot.get_data_arrays_lmtarget(
# use_signals, self.conf['data']['floatx'],predict_time=predict_time,predict_mode=predict_mode,target_description=target_description)
except:
#print(sys.exc_info())
#predict_mode='shift_target'
ttd, signal = shot.get_data_arrays(
use_signals, self.conf['data']['floatx'])
if (ttd.shape[0]) < self.conf['model']['length']:
print(ttd)
print(shot)
print(shot.number)
print("Shot must be at least as long as the RNN length.")
exit(1)
if len(ttd.shape) == 1:
ttd = np.expand_dims(ttd, axis=1)
shot.make_light()
if not prediction_mode:
return signal, ttd
else:
return signal, ttd, shot.is_disruptive
def batch_output_to_array(self, output, batch_size=None):
if batch_size is None:
batch_size = self.conf['model']['pred_batch_size']
assert(output.shape[0] % batch_size == 0)
num_chunks = output.shape[0] // batch_size
num_timesteps = output.shape[1]
feature_size = output.shape[2]
outs = []
for patch_idx in range(batch_size):
out = np.empty((num_chunks*num_timesteps, feature_size))
for chunk in range(num_chunks):
out[chunk*num_timesteps:(chunk + 1)*num_timesteps, :] = output[
chunk * batch_size + patch_idx, :, :]
outs.append(out)
return outs
def make_deterministic_patches(self, signals, results):
num_timesteps = self.conf['model']['length']
sig_patches = []
res_patches = []
min_len = self.get_min_len(signals, num_timesteps)
for sig, res in zip(signals, results):
(sig_patch,
res_patch) = self.make_deterministic_patches_from_single_array(
sig, res, min_len)
sig_patches += sig_patch
res_patches += res_patch
return sig_patches, res_patches
def make_deterministic_patches_from_single_array(self, sig, res, min_len):
sig_patches = []
res_patches = []
if len(sig) <= min_len:
print('signal length: {}'.format(len(sig)))
assert(min_len <= len(sig))
for start in range(0, len(sig)-min_len, min_len):
sig_patches.append(sig[start:start+min_len])
res_patches.append(res[start:start+min_len])
sig_patches.append(sig[-min_len:])
res_patches.append(res[-min_len:])
return sig_patches, res_patches
def make_random_patches(self, signals, results, num):
num_timesteps = self.conf['model']['length']
sig_patches = []
res_patches = []
min_len = self.get_min_len(signals, num_timesteps)
for i in range(num):
idx = np.random.randint(len(signals))
sig_patch, res_patch = self.make_random_patch_from_array(
signals[idx], results[idx], min_len)
sig_patches.append(sig_patch)
res_patches.append(res_patch)
return sig_patches, res_patches
def make_random_patch_from_array(self, sig, res, min_len):
start = np.random.randint(len(sig) - min_len+1)
return sig[start:start+min_len], res[start:start+min_len]
def get_min_len(self, arrs, length):
min_len = min([len(a) for a in arrs]
+ [self.conf['training']['max_patch_length']])
min_len = max(1, min_len // length) * length
return min_len
def get_max_len(self, arrs, length):
max_len = max([len(a) for a in arrs])
max_len = int(np.ceil(1.0*max_len / length) * length)
return max_len
def make_patches(self, signals, results):
"""A patch is a subset of shot's time/signal profile having a fixed
length, equal among all patches. Patch size is approximately equal to
the minimum shot length. More precisely: it is equal to the max(1,
min_len//rnn_length)*rnn_length - the largest number less or equal to
the minimum shot length divisible by the LSTM model length. If minimum
shot length is less than the rnn_length, then the patch length is equal
to the rnn_length
Since shot lengthes are not multiples of the minimum shot length in
general, some non-deterministic fraction of patches is created. See:
Deterministic patching:
Random patching:
Argument list:
- signals: a list of 1D Numpy array of doubles containing signal
values (a plasma property). Numpy arrays are shot-sized
- results: a list of 1D Numpy array of doubles containing disruption
times or -1 if a shot is non-disruptive. Numpy arrays are shot-sized
NOTE: signals and results are parallel lists. Since Arrays are
shot-sized, the shape veries across the list
Returns:
- sig_patches_det + sig_patches_rand: (concatenated) list of 1D Numpy
arrays of doubles containing signal values. Numpy arrays are
patch-sized
- res_patches_det + res_patches_rand: (concatenated) a list of 1D
Numpy array of doubles containing disruption times or -1 if a shot is
non-disruptive. Numpy arrays are patch-sized
NOTE: sig_patches_det + sig_patches_rand and res_patches_det +
res_patches_rand are prallel lists. All arrays in the list have
identical shapes.
"""
total_num = self.conf['training']['batch_size']
sig_patches_det, res_patches_det = self.make_deterministic_patches(
signals, results)
num_already = len(sig_patches_det)
total_num = int(np.ceil(1.0 * num_already / total_num)) * total_num
num_additional = total_num - num_already
assert(num_additional >= 0)
sig_patches_rand, res_patches_rand = self.make_random_patches(
signals, results, num_additional)
if self.verbose:
print(
'random to deterministic ratio: {}/{}'.format(num_additional,
num_already))
return (sig_patches_det + sig_patches_rand,
res_patches_det + res_patches_rand)
def make_prediction_patches(self, signals, results):
# total_num = self.conf['training']['batch_size']
num_timesteps = self.conf['model']['pred_length']
sig_patches = []
res_patches = []
max_len = self.get_max_len(signals, num_timesteps)
for sig, res in zip(signals, results):
sig_patches.append(Loader.pad_array_to_length(sig, max_len))
res_patches.append(Loader.pad_array_to_length(res, max_len))
return sig_patches, res_patches
@staticmethod
def pad_array_to_length(arr, length):
dlength = max(0, length - arr.shape[0])
tuples = [(0, dlength)]
for l in arr.shape[1:]:
tuples.append((0, 0))
return np.pad(arr, tuples, mode='constant', constant_values=0)
def arange_patches(self, sig_patches, res_patches):
num_timesteps = self.conf['model']['length']
batch_size = self.conf['training']['batch_size']
assert(len(sig_patches) % batch_size == 0) # fixed number of batches
# divisible by length of RNN sequence
assert(len(sig_patches[0]) % num_timesteps == 0)
num_batches = len(sig_patches) // batch_size
# patch_length = len(sig_patches[0])
zipped = list(zip(sig_patches, res_patches))
np.random.shuffle(zipped)
sig_patches, res_patches = zip(*zipped)
X_list = []
y_list = []
for i in range(num_batches):
X, y = self.arange_patches_single(
sig_patches[i*batch_size:(i+1)*batch_size],
res_patches[i*batch_size:(i+1)*batch_size])
X_list.append(X)
y_list.append(y)
return X_list, y_list
def arange_patches_single(self, sig_patches, res_patches,
prediction_mode=False, custom_batch_size=None):
if prediction_mode:
num_timesteps = self.conf['model']['pred_length']
batch_size = self.conf['model']['pred_batch_size']
else:
num_timesteps = self.conf['model']['length']
batch_size = self.conf['training']['batch_size']
return_sequences = self.conf['model']['return_sequences']
if custom_batch_size is not None:
batch_size = custom_batch_size
assert(len(sig_patches) == batch_size)
assert(len(sig_patches[0]) % num_timesteps == 0)
num_chunks = len(sig_patches[0]) // num_timesteps
num_dimensions_of_data = sig_patches[0].shape[1]
if len(res_patches[0].shape) == 1:
num_answers = 1
else:
num_answers = res_patches[0].shape[1]
X = np.zeros(
(num_chunks*batch_size,
num_timesteps,
num_dimensions_of_data))
if return_sequences:
y = np.zeros((num_chunks*batch_size, num_timesteps, num_answers))
else:
y = np.zeros((num_chunks*batch_size, num_answers))
for chunk_idx in range(num_chunks):
src_start = chunk_idx*num_timesteps
src_end = (chunk_idx+1)*num_timesteps
for patch_idx in range(batch_size):
X[chunk_idx*batch_size + patch_idx, :,
:] = sig_patches[patch_idx][src_start:src_end]
if return_sequences:
y[chunk_idx*batch_size + patch_idx, :,
:] = res_patches[patch_idx][src_start:src_end]
else:
y[chunk_idx*batch_size + patch_idx,
:] = res_patches[patch_idx][src_end-1]
return X, y
def load_as_X_y(self, shot, verbose=False, prediction_mode=False):
assert(isinstance(shot, Shot))
assert(shot.valid)
prepath = self.conf['paths']['processed_prepath']
return_sequences = self.conf['model']['return_sequences']
shot.restore(prepath)
if self.normalizer is not None:
self.normalizer.apply(shot)
else:
print('Warning, no normalization. ',
'Training data may be poorly conditioned')
signals = shot.signals
ttd = shot.ttd
if self.conf['training']['use_mock_data']:
signals, ttd = self.get_mock_data()
# if not self.stateful:
# X,y = self.array_to_path_and_external_pred(signals,ttd)
# else:
X, y = self.array_to_path_and_external_pred_cut(
signals, ttd, return_sequences=return_sequences,
prediction_mode=prediction_mode)
shot.make_light()
return X, y # X,y
def get_mock_data(self):
signals = np.linspace(0, 4*np.pi, 10000)
rand_idx = np.randint(6000)
lgth = np.randint(1000, 3000)
signals = signals[rand_idx:rand_idx+lgth]
# ttd[-100:] = 1
signals = np.vstack([signals]*8)
signals = signals.T
signals[:, 0] = 0.5 + 0.5*np.sin(signals[:, 0])
signals[:, 1] = 0.5 # + 0.5*cos(signals[:,1])
signals[:, 2] = 0.5 + 0.5*np.sin(2*signals[:, 2])
signals[:, 3:] *= 0
offset = 100
ttd = 0.0*signals[:, 0]
ttd[offset:] = 1.0*signals[:-offset, 0]
mask = ttd > np.mean(ttd)
ttd[~mask] = 0
# mean(signals[:,:2],1)
return signals, ttd
def array_to_path_and_external_pred_cut(
self,
arr,
res,
return_sequences=False,
prediction_mode=False):
num_timesteps = self.conf['model']['length']
skip = self.conf['model']['skip']
if prediction_mode:
num_timesteps = self.conf['model']['pred_length']
if not return_sequences:
num_timesteps = 1
skip = num_timesteps # batchsize = 1!
assert(np.shape(arr)[0] == np.shape(res)[0])
num_chunks = len(arr) // num_timesteps
arr = arr[-num_chunks*num_timesteps:]
res = res[-num_chunks*num_timesteps:]
assert(np.shape(arr)[0] == np.shape(res)[0])
X = []
y = []
i = 0
chunk_range = range(num_chunks-1)
i_range = range(1, num_timesteps+1, skip)
if prediction_mode:
chunk_range = range(num_chunks)
i_range = range(1)
for chunk in chunk_range:
for i in i_range:
start = chunk*num_timesteps + i
assert(start + num_timesteps <= len(arr))
X.append(arr[start:start+num_timesteps, :])
if return_sequences:
y.append(res[start:start+num_timesteps])
else:
y.append(res[start+num_timesteps-1:start+num_timesteps])
X = np.array(X)
y = np.array(y)
if len(np.shape(X)) == 1:
X = np.expand_dims(X, axis=len(np.shape(X)))
if return_sequences:
y = np.expand_dims(y, axis=len(np.shape(y)))
return X, y
@staticmethod
def get_batch_size(batch_size, prediction_mode):
if prediction_mode:
return 1
else:
return batch_size # Loader.get_num_skips(length,skip)
@staticmethod
def get_num_skips(length, skip):
return 1 + (length-1)//skip
class ProcessGenerator(object):
def __init__(self, generator):
self.generator = generator
self.proc = mp.Process(target=self.fill_batch_queue)
self.queue = mp.Queue()
self.proc.start()
def fill_batch_queue(self):
print("Starting process to fetch data")
count = 0
while True:
self.queue.put(next(self.generator), True)
count += 1
def __next__(self):
return self.queue.get(True)
def next(self):
return self.__next__()
def __exit__(self):
self.proc.terminate()
self.queue.close()
|
[
"noreply@github.com"
] |
ge-dong.noreply@github.com
|
a783106d7327f6e60fc8d1e993174170369d1c56
|
635f10f50b3dc5304c000c1e195e5a1d7504a8e6
|
/gpet_api_test/dir_config.py
|
5cd909cd922b1fb888c8332d7d98929782778c0b
|
[] |
no_license
|
zhihuia/TestProjects
|
1bd6125c59c34fc980a7b612b935c90accd78ba2
|
87dd88aeca91f54f64c7165fd516eb3b4d331885
|
refs/heads/master
| 2020-03-24T22:45:51.518962
| 2018-12-20T12:12:46
| 2018-12-20T12:12:46
| 143,101,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
import os
cur_dir = os.path.split(os.path.abspath(__file__))[0] # 文件的目录和文件的名字分离出来
# os.path.abspath(__file__) 获取当前文件的所有路径,返回元组
testcase_dir = cur_dir.replace("Common", "TestCases")
testdata_dir = cur_dir.replace("Common", "TestDatas")
htmlreport_dir = cur_dir.replace("Common", "HtmlTestReport")
logs_dir = cur_dir.replace("Common", "Logs")
config_dir = cur_dir.replace("Common", "Config")
|
[
"moonlightzhihui@163.com"
] |
moonlightzhihui@163.com
|
9b1a6b196b0755fa7bff3b62d8a6d7f89638b11f
|
b9a9f1d9a3ef5871b6bd3835e5f2c8f78d79654d
|
/python/matrix.py
|
7092a8ca4eedcf7d38d5afc1456621f1acfe90fa
|
[] |
no_license
|
Dreamer-WangYiQiang/ExtendedKalmanFilter
|
d09c0283c2cce1210f702506ea4b6b4e4da7729c
|
177a79258f80d187b7056fce48e81450a47b7d30
|
refs/heads/master
| 2022-04-10T15:00:37.196774
| 2017-06-13T14:46:27
| 2017-06-13T14:46:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,774
|
py
|
"""matrix class from Udacity robotics course"""
from math import *
class matrix:
# implements basic operations of a matrix class
def __init__(self, value):
self.value = value
self.dimx = len(value)
self.dimy = len(value[0])
if value == [[]]:
self.dimx = 0
def zero(self, dimx, dimy):
# check if valid dimensions
if dimx < 1 or dimy < 1:
raise ValueError("Invalid size of matrix")
else:
self.dimx = dimx
self.dimy = dimy
self.value = [[0 for row in range(dimy)] for col in range(dimx)]
def identity(self, dim):
# check if valid dimension
if dim < 1:
raise ValueError("Invalid size of matrix")
else:
self.dimx = dim
self.dimy = dim
self.value = [[0 for row in range(dim)] for col in range(dim)]
for i in range(dim):
self.value[i][i] = 1
def show(self):
for i in range(self.dimx):
print(self.value[i])
print(' ')
def __add__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimy != other.dimy:
raise ValueError("Matrices must be of equal dimensions to add")
else:
# add if correct dimensions
res = matrix([[]])
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] + other.value[i][j]
return res
def __sub__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimy != other.dimy:
raise ValueError("Matrices must be of equal dimensions to subtract")
else:
# subtract if correct dimensions
res = matrix([[]])
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] - other.value[i][j]
return res
def __mul__(self, other):
# check if correct dimensions
if self.dimy != other.dimx:
raise ValueError("Matrices must be m*n and n*p to multiply")
else:
# subtract if correct dimensions
res = matrix([[]])
res.zero(self.dimx, other.dimy)
for i in range(self.dimx):
for j in range(other.dimy):
for k in range(self.dimy):
res.value[i][j] += self.value[i][k] * other.value[k][j]
return res
def transpose(self):
# compute transpose
res = matrix([[]])
res.zero(self.dimy, self.dimx)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[j][i] = self.value[i][j]
return res
# Thanks to Ernesto P. Adorio for use of Cholesky and CholeskyInverse functions
def Cholesky(self, ztol=1.0e-5):
# Computes the upper triangular Cholesky factorization of
# a positive definite matrix.
res = matrix([[]])
res.zero(self.dimx, self.dimx)
for i in range(self.dimx):
S = sum([(res.value[k][i])**2 for k in range(i)])
d = self.value[i][i] - S
if abs(d) < ztol:
res.value[i][i] = 0.0
else:
if d < 0.0:
raise ValueError("Matrix not positive-definite")
res.value[i][i] = sqrt(d)
for j in range(i+1, self.dimx):
S = sum([res.value[k][i] * res.value[k][j] for k in range(self.dimx)])
if abs(S) < ztol:
S = 0.0
res.value[i][j] = (self.value[i][j] - S)/res.value[i][i]
return res
def CholeskyInverse(self):
# Computes inverse of matrix given its Cholesky upper Triangular
# decomposition of matrix.
res = matrix([[]])
res.zero(self.dimx, self.dimx)
# Backward step for inverse.
for j in reversed(range(self.dimx)):
tjj = self.value[j][j]
S = sum([self.value[j][k]*res.value[j][k] for k in range(j+1, self.dimx)])
res.value[j][j] = 1.0/tjj**2 - S/tjj
for i in reversed(range(j)):
res.value[j][i] = res.value[i][j] = -sum([self.value[i][k]*res.value[k][j] for k in range(i+1, self.dimx)])/self.value[i][i]
return res
def inverse(self):
aux = self.Cholesky()
res = aux.CholeskyInverse()
return res
def __repr__(self):
return repr(self.value)
|
[
"wilbur@MacBook-Pro.local"
] |
wilbur@MacBook-Pro.local
|
30fff796f8fcfc32f7125d4357b9e6bb99834655
|
9222711ab110cf180997e8b3af8e784ebd4ebf9c
|
/Message.py
|
317ece43fd5ba1d8df5508bfa254fa92fe0925d2
|
[] |
no_license
|
MaxHXie/twittersentiment
|
e2184786b8f256f29abdc4ab5eb6792a1f8f5d63
|
18b6b2884f3a72a7974caa478456671245128f89
|
refs/heads/master
| 2020-04-17T09:23:25.299362
| 2019-01-19T18:53:59
| 2019-01-19T18:53:59
| 166,455,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,536
|
py
|
#Now we want to make a class for each datapoint, i.e. messages
import re
from nltk.stem import WordNetLemmatizer
from nltk.tokenize.casual import TweetTokenizer
from nltk import pos_tag
from nltk.corpus import wordnet
import copy
class Message:
def __init__(self, text, true_sentiment=None, n=2):
try:
self.n_grams = self.grammify(self.tokenize(self.sanitize(text)), n)
self.POS_grams = self.POS(self.n_grams)
self.true_sentiment = true_sentiment
self.bad_row = False
except:
print('Bad row: ' + text)
self.bad_row = True
def get_wordnet_pos(self, treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def sanitize(self, text):
replace_list = [
["can't", "cannot"],
["won't", "will not"],
["don't","do not"],
["cannot", "can not"],
["isn't", "is not"],
["wasn't", "was not"],
["wasn't", "was not"],
["weren't", "were not"]
]
for word, replace in replace_list:
text = text.replace(word,replace)
text = re.sub(r"http\S+", "", text)
text = re.sub(r"@\S+", "", text)
text = text.strip("'")
text = text.strip('"')
text = text.strip()
text = text.lower()
#remove URL-links (www.google.com) - DONE
#remove twitter special words - ?
#Strip the text of any spaces or tabs in the beginning/end of the string
#Ta bort hashtags?
return text
def tokenize(self, text):
#Make a list where each word is an element, text_list = text.split(' ')
#Lemmatize each word. Exception: We want "better" to become its lemma "good" but "best" should stay "best".
#There are nltk methods for this. Look at https://www.youtube.com/watch?v=uoHVztKY6S4
#Remove the articles 'a', 'an', 'the'
#Also split on punctuation marks so that, "I like, fish" becomes ['I', 'like', ',', 'fish'] = token_list
tweettokenizer = TweetTokenizer();
lemmatizer = WordNetLemmatizer();
token_list = tweettokenizer.tokenize(text)
try:
token_list.remove('a');
token_list.remove('an');
token_list.remove('the');
except ValueError:
pass
pos_list = pos_tag(token_list)
pos_listwordnet = [(word[0], self.get_wordnet_pos(word[1])) for word in pos_list]
for i in range(len(token_list)):
token_list[i] = lemmatizer.lemmatize(token_list[i] ,pos=pos_listwordnet[i][1])
if len(token_list) == 1:
token_list.append('.')
return token_list
def grammify(self, text_list, n):
#Construct n-grams as 2D lists, here is an example if n=2 [['I', 'like'], ['like', ','], [',', 'fish']]
#Negations such as no and not must merge with the previous and the next word, and be treated as one word.
#“I do not like fish” will form the bigrams [['I', 'do+not'], ['do+not', 'like'], ['not+like', 'fish']] = n_grams
#Treat all smileys as "words"
def peek_next():
try:
return text_list[index+gram_index+1]
except IndexError:
return False
def peek_this():
try:
return text_list[index+gram_index]
except IndexError:
return False
def peek_prev():
try:
return text_list[index+gram_index-1]
except IndexError:
return False
n_grams = []
index = 0
gram_list = []
gram_index = 0
while index < len(text_list):
prev_word = peek_prev()
this_word = peek_this()
next_word = peek_next()
if next_word != False and this_word != False:
if this_word.lower() == 'not':
gram_list.append(this_word + "+" + next_word)
gram_index += 2
else:
if next_word.lower() == 'not':
gram_list.append(this_word + "+" + next_word)
gram_index += 2
else:
gram_list.append(this_word)
gram_index += 1
else:
if this_word != False:
gram_list.append(this_word)
n_grams.append(gram_list)
return n_grams
if len(gram_list) >= n:
n_grams.append(gram_list)
gram_list = []
gram_index = 0
index += 1
def POS(seLf, n_grams):
#Take in a 2D list of n_grams. It shouldn't matter how big n is.
#Make a similarly formatted list but with the POS tags instead
#Exception: all punctuations get the tag BR
#Exception: all positive smileys get the tag PS :) =) :D =D ;) ;D :3
#Exception: all negative smileys get the tag NS :( =( :*( =*(
#Exception: the word not+[word] need to become the POS tag of [word]
#['I', 'do+not'], ['do+not', 'like'], ['not+like', 'fish'] becomes
#['PR', 'VB'], ['VB', 'ADV'], ['ADV', 'NN'] = POS_grams
#I recommend using an nltk library method for this one, as we want it to be 100% correct.
POS_grams = copy.deepcopy(n_grams)
n = len(n_grams[0])
for index1 in range(len(n_grams)):
for index2 in range(n):
uw = n_grams[index1][index2]
if uw == '?' or uw == '.' or uw == ',' or uw == ';' or uw == ':' or uw == '!':
POS_grams[index1][index2] = 'BR'
elif uw == ':)' or uw == ':D' or uw == ';)' or uw == ';D' or uw == '=)' or uw == '=D' or uw == ':-)' or uw == ':-D' or uw == ';-)':
POS_grams[index1][index2] = 'PS'
elif uw == ':(' or uw == ':/' or uw == ';(' or uw == '=(' or uw == '=/' or uw == ':-/' or uw == ':-(' or uw == ';/':
POS_grams[index1][index2] = 'NS'
else:
POS_grams[index1][index2] = pos_tag([uw])[0][1]
return POS_grams
|
[
"noreply@github.com"
] |
MaxHXie.noreply@github.com
|
8f3fd56d7fcc53dff4b0a0f3e9943652e4108514
|
9e70af68afebb05e66381e4f6a978eb1b7589c46
|
/mywork/testsurface.py
|
9c9f460c955f27a1e39564e43c413a7f23928d5c
|
[] |
no_license
|
OuYangMinOa/space-Physic
|
cbca14b3948ac204c35327fbb9cc6ab9eb704d76
|
e8e6ec7379a8cecb944add6dbbbee213dae1b018
|
refs/heads/master
| 2022-11-11T08:09:59.844261
| 2020-06-28T20:14:24
| 2020-06-28T20:14:24
| 275,657,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
from netCDF4 import Dataset
import os
m = Basemap()
m.drawcoastlines()
m.fillcontinents()
m.drawparallels(np.arange(-90,90,30),labels=[1,1,0,1], fontsize=8)
m.drawmeridians(np.arange(0,360,30),labels=[1,1,0,1], rotation=45, fontsize=8)
plt.xlabel('Longitude', labelpad=40)
plt.ylabel('Latitude', labelpad=40)
x = np.arange(-90,90,1)
y = np.arange(-180,180,1)
x,y = np.meshgrid(x,y)
z = np.sin(x/100)**10 + np.cos(10 + y*x/10000) * np.cos(x/100)
##mappable = plt.cm.ScalarMappable(cmap=plt.cm.viridis)
##mappable.set_array(z)
print(z.shape)
m.pcolormesh(y,x,z,zorder=2,cmap='Spectral_r',alpha=0.7)
plt.colorbar()
plt.show()
|
[
"noreply@github.com"
] |
OuYangMinOa.noreply@github.com
|
0dd0fb2d347482fcc39221d04b6a381dab6cd16f
|
d87acfc6fa8dcf71ac26eebbd6069a938222efc3
|
/captum/attr/_core/lrp.py
|
b40829da9c388417e8b280a4ee94afa8983edfd2
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/captum
|
aedeec58d34c7611ae8928144e9f2314f820c1ca
|
945c582cc0b08885c4e2bfecb020abdfac0122f3
|
refs/heads/master
| 2023-09-04T08:49:54.120380
| 2023-07-08T00:30:37
| 2023-07-08T00:30:37
| 204,734,444
| 4,230
| 491
|
BSD-3-Clause
| 2023-09-08T17:58:15
| 2019-08-27T15:34:41
|
Python
|
UTF-8
|
Python
| false
| false
| 18,328
|
py
|
#!/usr/bin/env python3
import typing
from collections import defaultdict
from typing import Any, cast, List, Tuple, Union
import torch.nn as nn
from captum._utils.common import (
_format_output,
_format_tensor_into_tuples,
_is_tuple,
_register_backward_hook,
_run_forward,
)
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import Literal, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution
from captum.attr._utils.common import _sum_rows
from captum.attr._utils.custom_modules import Addition_Module
from captum.attr._utils.lrp_rules import EpsilonRule, PropagationRule
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
class LRP(GradientAttribution):
r"""
Layer-wise relevance propagation is based on a backward propagation
mechanism applied sequentially to all layers of the model. Here, the
model output score represents the initial relevance which is decomposed
into values for each neuron of the underlying layers. The decomposition
is defined by rules that are chosen for each layer, involving its weights
and activations. Details on the model can be found in the original paper
[https://doi.org/10.1371/journal.pone.0130140]. The implementation is
inspired by the tutorial of the same group
[https://doi.org/10.1016/j.dsp.2017.10.011] and the publication by
Ancona et al. [https://openreview.net/forum?id=Sy21R9JAW].
"""
def __init__(self, model: Module) -> None:
r"""
Args:
model (Module): The forward function of the model or any modification of
it. Custom rules for a given layer need to be defined as attribute
`module.rule` and need to be of type PropagationRule. If no rule is
specified for a layer, a pre-defined default rule for the module type
is used.
"""
GradientAttribution.__init__(self, model)
self.model = model
self._check_rules()
@property
def multiplies_by_inputs(self) -> bool:
return True
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
verbose: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
verbose: bool = False,
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
verbose: bool = False,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which relevance is
propagated. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
verbose (bool, optional): Indicates whether information on application
of rules is printed during propagation.
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**
or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The propagated relevance values with respect to each
input feature. The values are normalized by the output score
value (sum(relevance)=1). To obtain values comparable to other
methods or implementations these values need to be multiplied
by the output score. Attributions will always
be the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned. The sum of attributions
is one and not corresponding to the prediction score as in other
implementations.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
of examples in the inputs.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities. It has one
>>> # Conv2D and a ReLU layer.
>>> net = ImageClassifier()
>>> lrp = LRP(net)
>>> input = torch.randn(3, 3, 32, 32)
>>> # Attribution size matches input size: 3x3x32x32
>>> attribution = lrp.attribute(input, target=5)
"""
self.verbose = verbose
self._original_state_dict = self.model.state_dict()
self.layers: List[Module] = []
self._get_layers(self.model)
self._check_and_attach_rules()
self.backward_handles: List[RemovableHandle] = []
self.forward_handles: List[RemovableHandle] = []
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
try:
# 1. Forward pass: Change weights of layers according to selected rules.
output = self._compute_output_and_change_weights(
inputs, target, additional_forward_args
)
# 2. Forward pass + backward pass: Register hooks to configure relevance
# propagation and execute back-propagation.
self._register_forward_hooks()
normalized_relevances = self.gradient_func(
self._forward_fn_wrapper, inputs, target, additional_forward_args
)
relevances = tuple(
normalized_relevance
* output.reshape((-1,) + (1,) * (normalized_relevance.dim() - 1))
for normalized_relevance in normalized_relevances
)
finally:
self._restore_model()
undo_gradient_requirements(inputs, gradient_mask)
if return_convergence_delta:
return (
_format_output(is_inputs_tuple, relevances),
self.compute_convergence_delta(relevances, output),
)
else:
return _format_output(is_inputs_tuple, relevances) # type: ignore
def has_convergence_delta(self) -> bool:
return True
def compute_convergence_delta(
self, attributions: Union[Tensor, Tuple[Tensor, ...]], output: Tensor
) -> Tensor:
"""
Here, we use the completeness property of LRP: The relevance is conserved
during the propagation through the models' layers. Therefore, the difference
between the sum of attribution (relevance) values and model output is taken as
the convergence delta. It should be zero for functional attribution. However,
when rules with an epsilon value are used for stability reasons, relevance is
absorbed during propagation and the convergence delta is non-zero.
Args:
attributions (Tensor or tuple[Tensor, ...]): Attribution scores that
are precomputed by an attribution algorithm.
Attributions can be provided in form of a single tensor
or a tuple of those. It is assumed that attribution
tensor's dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
output (Tensor): The output value with respect to which
the attribution values are computed. This value corresponds to
the target score of a classification model. The given tensor
should only have a single element.
Returns:
*Tensor*:
- **delta** Difference of relevance in output layer and input layer.
"""
if isinstance(attributions, tuple):
for attr in attributions:
summed_attr = cast(
Tensor, sum(_sum_rows(attr) for attr in attributions)
)
else:
summed_attr = _sum_rows(attributions)
return output.flatten() - summed_attr.flatten()
def _get_layers(self, model: Module) -> None:
for layer in model.children():
if len(list(layer.children())) == 0:
self.layers.append(layer)
else:
self._get_layers(layer)
def _check_and_attach_rules(self) -> None:
for layer in self.layers:
if hasattr(layer, "rule"):
layer.activations = {} # type: ignore
layer.rule.relevance_input = defaultdict(list) # type: ignore
layer.rule.relevance_output = {} # type: ignore
pass
elif type(layer) in SUPPORTED_LAYERS_WITH_RULES.keys():
layer.activations = {} # type: ignore
layer.rule = SUPPORTED_LAYERS_WITH_RULES[type(layer)]() # type: ignore
layer.rule.relevance_input = defaultdict(list) # type: ignore
layer.rule.relevance_output = {} # type: ignore
elif type(layer) in SUPPORTED_NON_LINEAR_LAYERS:
layer.rule = None # type: ignore
else:
raise TypeError(
(
f"Module of type {type(layer)} has no rule defined and no"
"default rule exists for this module type. Please, set a rule"
"explicitly for this module and assure that it is appropriate"
"for this type of layer."
)
)
def _check_rules(self) -> None:
for module in self.model.modules():
if hasattr(module, "rule"):
if (
not isinstance(module.rule, PropagationRule)
and module.rule is not None
):
raise TypeError(
(
f"Please select propagation rules inherited from class "
f"PropagationRule for module: {module}"
)
)
def _register_forward_hooks(self) -> None:
for layer in self.layers:
if type(layer) in SUPPORTED_NON_LINEAR_LAYERS:
backward_handles = _register_backward_hook(
layer, PropagationRule.backward_hook_activation, self
)
self.backward_handles.extend(backward_handles)
else:
forward_handle = layer.register_forward_hook(
layer.rule.forward_hook # type: ignore
)
self.forward_handles.append(forward_handle)
if self.verbose:
print(f"Applied {layer.rule} on layer {layer}")
def _register_weight_hooks(self) -> None:
for layer in self.layers:
if layer.rule is not None:
forward_handle = layer.register_forward_hook(
layer.rule.forward_hook_weights # type: ignore
)
self.forward_handles.append(forward_handle)
def _register_pre_hooks(self) -> None:
for layer in self.layers:
if layer.rule is not None:
forward_handle = layer.register_forward_pre_hook(
layer.rule.forward_pre_hook_activations # type: ignore
)
self.forward_handles.append(forward_handle)
def _compute_output_and_change_weights(
self,
inputs: Tuple[Tensor, ...],
target: TargetType,
additional_forward_args: Any,
) -> Tensor:
try:
self._register_weight_hooks()
output = _run_forward(self.model, inputs, target, additional_forward_args)
finally:
self._remove_forward_hooks()
# Register pre_hooks that pass the initial activations from before weight
# adjustments as inputs to the layers with adjusted weights. This procedure
# is important for graph generation in the 2nd forward pass.
self._register_pre_hooks()
return output
def _remove_forward_hooks(self) -> None:
for forward_handle in self.forward_handles:
forward_handle.remove()
def _remove_backward_hooks(self) -> None:
for backward_handle in self.backward_handles:
backward_handle.remove()
for layer in self.layers:
if hasattr(layer.rule, "_handle_input_hooks"):
for handle in layer.rule._handle_input_hooks: # type: ignore
handle.remove()
if hasattr(layer.rule, "_handle_output_hook"):
layer.rule._handle_output_hook.remove() # type: ignore
def _remove_rules(self) -> None:
for layer in self.layers:
if hasattr(layer, "rule"):
del layer.rule
def _clear_properties(self) -> None:
for layer in self.layers:
if hasattr(layer, "activation"):
del layer.activation
def _restore_state(self) -> None:
self.model.load_state_dict(self._original_state_dict) # type: ignore
def _restore_model(self) -> None:
self._restore_state()
self._remove_backward_hooks()
self._remove_forward_hooks()
self._remove_rules()
self._clear_properties()
def _forward_fn_wrapper(self, *inputs: Tensor) -> Tensor:
"""
Wraps a forward function with addition of zero as a workaround to
https://github.com/pytorch/pytorch/issues/35802 discussed in
https://github.com/pytorch/captum/issues/143#issuecomment-611750044
#TODO: Remove when bugs are fixed
"""
adjusted_inputs = tuple(
input + 0 if input is not None else input for input in inputs
)
return self.model(*adjusted_inputs)
SUPPORTED_LAYERS_WITH_RULES = {
nn.MaxPool1d: EpsilonRule,
nn.MaxPool2d: EpsilonRule,
nn.MaxPool3d: EpsilonRule,
nn.Conv2d: EpsilonRule,
nn.AvgPool2d: EpsilonRule,
nn.AdaptiveAvgPool2d: EpsilonRule,
nn.Linear: EpsilonRule,
nn.BatchNorm2d: EpsilonRule,
Addition_Module: EpsilonRule,
}
SUPPORTED_NON_LINEAR_LAYERS = [nn.ReLU, nn.Dropout, nn.Tanh]
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
82e414689a322c8ca38f282d715bc5d4643919dd
|
03437ed05cb3af69634d7c845d59cdeac479029a
|
/dashboard.py
|
1e5f1316706e7db975754ea684c19830f6b53142
|
[] |
no_license
|
theotheo/whooshhack
|
0ff4d9bc17a01ed2757b40a9e90ab8772822f45d
|
e5710eac8fea5bcbfa413496cba28fc520f9ceba
|
refs/heads/main
| 2023-06-25T03:58:43.568016
| 2021-07-18T16:31:48
| 2021-07-18T16:31:48
| 387,151,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
# %%
from keplergl import KeplerGl
map_ = KeplerGl(height=500)
map_
# %%
map_.add_data(data=gdf_lines, name="segments")
# %%
import geopandas as gpd
gpd.read_file('sample_route2.json')
# %%
points = gpd.read_file('points_sample.json')
map_.add_data(data=points, name='points')
# %%
map_
# %%
with open('kepler_config.json', 'w') as f:
f.write(map_.config)
# %%
with open('kepler_config.json', 'w') as f:
f.write(map_.config)
|
[
"ibelyalov@yandex.ru"
] |
ibelyalov@yandex.ru
|
7b57a819c8050165d39822e26efb9a39926a2dc9
|
52b979c22057c06f12c7b84e4b9d3b52d3aa01db
|
/17340027-姚洁倩-p2/test.py
|
aceb539f2754eb560d2e5dc42a4a5c5ab8cbb443
|
[] |
no_license
|
EssieYiu/Python
|
c845866f3312ebe7927f4d19b55c5d119c2d850b
|
9f2e66882bfe1ddb8d98f213c5e227e0f3148f6e
|
refs/heads/master
| 2023-04-15T03:42:37.205622
| 2019-07-17T15:44:47
| 2019-07-17T15:44:47
| 180,075,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
def error():
raise ValueError
if __name__ == "__main__":
try:
error()
except:
raise ValueError
|
[
"31929622+EssieYiu@users.noreply.github.com"
] |
31929622+EssieYiu@users.noreply.github.com
|
35bfbf1eff018366c3caf857dbf59ab9b9487ae2
|
abdc7868bc1c788f86ce261e162181cce2c434ee
|
/app/src/auth/routes.py
|
8325a61a9ee4d06ff272e57932458f842f96a315
|
[] |
no_license
|
felipefujioka/shared-pool
|
0b0e6419cf2acd143c24e89ce0c7dd40cd863164
|
faa55462e7d45e42bdf30817212fe4bb6fb0a8a4
|
refs/heads/master
| 2021-07-09T14:52:15.416093
| 2017-10-11T03:24:39
| 2017-10-11T03:24:39
| 106,208,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,110
|
py
|
import flask
from main import app
from db.util import session
from auth.models.user import User
from auth.models.password_credential import PasswordCredential
import scrypt
import decorators.validate
import os
import errors
create_user_schema = {
"type": "object",
"required": [
'name',
'email',
'password',
'confirmation_password'
],
"additional_properties": False,
"properties": {
"email": {
"type": "string",
"pattern": "^(([^<>()\[\]\\.,;:\s@""]+(\.[^<>()\[\]\\.,;:\s@""]+)*)|(\.+))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$"
},
"name": {
"type": "string"
},
"password": {
"type": "string",
"minimum_length": 8
},
"confirmation_password": {
"type": "string",
"minimum_length": 8
}
}
}
def hash_password(password, maxtime=0.5, datalength=64):
return scrypt.encrypt(os.urandom(datalength), password, maxtime=maxtime)
@app.route('/users', methods=['GET'])
def list_users():
users = session.query(User).all()
return str(dict(zip(users.keys(), users)))
@app.route('/users', methods=['POST'])
@decorators.validate.validate_body(create_user_schema)
def create_user():
body = flask.request.parsed_body
if body.get('password') != body.get('confirmation_password'):
raise errors.UnprocessableEntityError('password and confirmation does not match')
try:
new_user = User(
name=body.get('name'),
email=body.get('email')
)
session.add(new_user)
user = session.query(User).where(User.email == body.get('email')).get(1)
new_password = PasswordCredential(
user_id=user.id,
password_hash=hash_password(body.get('password'))
)
session.add(new_password)
session.commit()
except Exception as e:
session.rollback()
raise errors.UnprocessableEntityError('DB error: {}'.format(e))
return 'OK'
|
[
"felipefujioka@gmail.com"
] |
felipefujioka@gmail.com
|
2b7b1e3cfa9dbc03cc1d297534895c8a4362ab7a
|
3fa27b3ad1c1ca90f2bcf311d89fe8c2ca241cb4
|
/Location/models.py
|
86164283ca0e8eeb3356a4bbbb87c92cee61e3a0
|
[] |
no_license
|
emperorDuke/django-backend-for-ecommerce
|
717e15d7be899abcd5a4b7a7d2203c612f001aeb
|
83c1ca4d016d876a5c8711ac5cdc448d5a4a533d
|
refs/heads/master
| 2023-02-10T08:57:17.852721
| 2021-01-02T15:49:07
| 2021-01-02T15:49:07
| 271,039,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from phonenumber_field.modelfields import PhoneNumberField
# Create your models here
class Location(models.Model):
address = models.CharField(_('address'), max_length=100, blank=False)
city = models.CharField(_('city'), max_length=50, blank=False)
country = models.CharField(_('country'), max_length=50, blank=False)
zip_code = models.CharField(_('zip code'), max_length=50, blank=True)
state = models.CharField(_('state'), max_length=50, blank=False)
phone_number = PhoneNumberField(_('Phone number'), blank=False)
added_at = models.DateField(auto_now=True)
class Meta:
unique_together = ('address', 'city', 'state')
db_table = 'location'
def __str__(self):
return '%s, %s, %s' % (self.city, self.state, self.country)
|
[
"effiomduke@gmail.com"
] |
effiomduke@gmail.com
|
1da1fd2e67831c1909851618b9f0eaf85642ad9e
|
3b451e8de359659a1648b95f86372549f29a193c
|
/super_z_avg_across_songs.py
|
b70e68647b04cb7f39162ec24336c5be45ffe3df
|
[] |
no_license
|
jamalw/music_event_structures_bucket
|
33dc0c5ffba46ab5a978a04268f88103148d2476
|
8ad5e9b2d767a3771cc618c061fbcb22f942cd51
|
refs/heads/master
| 2022-08-22T00:20:45.859935
| 2022-08-15T14:48:22
| 2022-08-15T14:48:22
| 126,224,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
import numpy as np
import sys
import os
import nibabel as nib
import glob
import scipy.stats as st
datadir = '/jukebox/norman/jamalw/MES/prototype/link/scripts/data/searchlight_output/HMM_searchlight_K_sweep_srm/'
nii_template = nib.load('/jukebox/norman/jamalw/MES/subjects/MES_022817_0/analysis/run1.feat/trans_filtered_func_data.nii')
# Collect searchlight files
k = ['20']
for i in range(len(k)):
fn = datadir + 'avg_z_k'+k[i]+'_across_songs.nii.gz'
global_outputs_all = nib.load(fn).get_data()
# Reshape data
z_scores_reshaped = np.nan_to_num(np.reshape(global_outputs_all,(91*109*91)))
# Mask data with nonzeros
mask = z_scores_reshaped != 0
z_scores_reshaped[mask] = st.zscore(z_scores_reshaped[mask])
#z_scores_reshaped[mask] = -np.log(st.norm.sf(z_scores_reshaped[mask]))
# Reshape data back to original shape
#neg_log_p_values = np.reshape(z_scores_reshaped,(91,109,91))
super_z_avg_z = np.reshape(z_scores_reshaped,(91,109,91))
# Plot and save searchlight results
maxval = np.max(super_z_avg_z)
minval = np.min(super_z_avg_z)
img = nib.Nifti1Image(super_z_avg_z, affine=nii_template.affine)
img.header['cal_min'] = minval
img.header['cal_max'] = maxval
nib.save(img,datadir + '/avg_superz_k'+k[i]+'_across_songs.nii.gz')
|
[
"jamalw@princeton.edu"
] |
jamalw@princeton.edu
|
db51449844ace1e60c6dbd69885b32550e1fbfec
|
67bb016404d1edbbe6e5c7e630d60131dac8fd54
|
/modules/system/modules.py
|
ee6c81e1ad3fa956e69768d42dcb4f74e79c4b60
|
[] |
no_license
|
switchswap/discord-senko
|
ad6a6def1a3fe70c3bae3616a33242aba4e9e307
|
b525cb7f9ffc90cffa75edb05673f81a39adc209
|
refs/heads/master
| 2022-01-06T22:15:19.617826
| 2019-04-26T16:54:12
| 2019-04-26T16:54:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
import traceback
from discord.ext import commands
from discord.ext.commands import ExtensionError
from core.util.globals import module_dir
class Modules(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group(name='module')
async def module_group(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send('Invalid Module command.')
@module_group.command(name='load')
async def module_load(self, ctx, name: str):
try:
self.bot.load_extension(module_dir+"."+name)
await ctx.send(f'Loaded module {name}')
print(f'Loaded module {name}')
except ExtensionError:
await ctx.send(f'Failed to load module {name}')
print(f'Failed to load module {name}')
traceback.print_exc()
@module_group.command(name='unload')
async def module_unload(self, ctx, name: str):
try:
self.bot.unload_extension(module_dir+"."+name)
await ctx.send(f'Unloaded module {name}')
print(f'Loaded module {name}')
except ExtensionError:
await ctx.send(f'Failed to unload module {name}')
print(f'Failed to unload module {name}')
traceback.print_exc()
@module_group.command(name='reload')
async def module_reload(self, ctx, name: str):
try:
self.bot.reload_extension(module_dir+"."+name)
await ctx.send(f'Reloaded module {name}')
print(f'Reloaded module {name}')
except ExtensionError:
await ctx.send(f'Failed to reload module {name}')
print(f'Failed to reload module {name}')
traceback.print_exc()
def setup(bot):
bot.add_cog(Modules(bot))
|
[
"Jannis.Becker@stud.hshl.de"
] |
Jannis.Becker@stud.hshl.de
|
5a4f5930ce92934a8286651c8650f298be3f3fb1
|
9c03b9188b711097fb40c2170626ac762b3d9aaf
|
/python3/mymodule_demo.py
|
dc80ee27f1d36287a6decc400a9009bbcfc1f352
|
[] |
no_license
|
andrewnezhivy/learn_python
|
9e92a826864b1876f5f24aff47b980b4ecc25c93
|
6f37d32469ed84681c05afaf34cade4eb411f3d8
|
refs/heads/master
| 2020-12-03T02:45:14.690632
| 2016-09-02T02:50:58
| 2016-09-02T02:50:58
| 66,676,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
import mymodule
mymodule.say_hi()
print('версия', mymodule.__version__)
|
[
"andrew.nezhivy@yandex.com"
] |
andrew.nezhivy@yandex.com
|
3203acaae91aa63994c898368f666ee31aab675b
|
d0d31ff386f0295075a786ad15c04c171fc979cb
|
/weeklyc/database.py
|
884b6e609d5898e2d8753b96e0244ea7ba22bcac
|
[] |
no_license
|
ewitz/EPA_Water_App
|
e5c2770b983f9e47ca837e5c61ec55be3657e447
|
8b644cefc70a03ddb5734692eda661b7dbe5271e
|
refs/heads/master
| 2020-05-26T02:30:26.424241
| 2013-06-07T19:23:11
| 2013-06-07T19:23:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///weeklyc.db', echo=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
from weeklyc.models import *
def init_db():
Base.metadata.create_all(engine)
def kill_db():
Base.metadata.drop_all(engine)
def setup_db():
from weeklyc.models import *
from weeklyc.views import bcrypt
""" User Creation
"""
dennis = User(login='skinner927',
password = bcrypt.generate_password_hash('bigboobs'))
db_session.add(dennis)
luke = User(login='rastii',
password = bcrypt.generate_password_hash('asdfqwer'))
db_session.add(luke)
db_session.commit()
""" Challenge Creation
"""
pwnme = Challenge(name="Pwnme",
link="/static/files/pwnme",
flag="butthurt")
banana = Challenge(name="Banana",
link="/static/files/banana",
flag="rotten")
db_session.add(pwnme)
db_session.add(banana)
db_session.commit()
""" Challenge Submissions
"""
dennis.submissions.append(pwnme)
dennis.submissions.append(banana)
luke.submissions.append(banana)
db_session.commit()
|
[
"emilliot@mail.usf.edu"
] |
emilliot@mail.usf.edu
|
504ef30ee0168bc45ae83697c4235349baea0e97
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03636/s442033431.py
|
c0c4bfde22c431f0bcb92965231ba4426799433b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
strings = list(input())
str_len = len(strings)
print(strings[0] + str(str_len - 2) + strings[str_len - 1])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
634479c49cc8bd8a070f9692c2ad94c1f8c9c7f0
|
2b71e4b33896bde83aa9daa3c56042505cb63220
|
/xeri.py
|
e6015eabaf3a920f04a261cc1d89382d8b259d24
|
[] |
no_license
|
xarkouts/xeri-se-python
|
9fea568ac4b1509c5db8ef94361ec6d1adf375f6
|
5c0720d571bb55cf1454e9026f070f02ac124d30
|
refs/heads/main
| 2023-05-08T02:31:30.286608
| 2021-05-23T21:51:05
| 2021-05-23T21:51:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,384
|
py
|
import random
katigories={"Σπαθι","Κουπα","Καρο","Μπαστουνι"}
fila={1,2,3,4,5,6,7,8,9,10,"Βαλες","Νταμα","Ριγας"}
trapoyla={(k,f) for k in katigories for f in fila}
del katigories
del fila
trapoyla=list(trapoyla)
xeri1=[]
xeri2=[]
trapezi=[]
paixtis=2
kerdismena_1=[]
kerdismena_2=[]
def mirasma(t=False):
for _ in range(1,7):
xarti=random.randrange(0,len(trapoyla))
xeri1.append(trapoyla.pop(xarti))
xarti=random.randrange(0,len(trapoyla))
xeri2.append(trapoyla.pop(xarti))
if t==True:
for _ in range(1,5):
xarti=random.randrange(len(trapoyla))
trapezi.append(trapoyla.pop(xarti))
def paixtes(paixtis):
if paixtis==2:
return 1
elif paixtis==1:
return 2
def paixnidi (paixtis):
print(f"Παιζει ο παιχτις {paixtis}")
print(trapezi)
print("-"*6)
if paixtis==1:
print(xeri1)
epilogi=input("Διαέξε να ριξεις ενα χαρτι στο τραπεζι απο το 0 μεχρι το "+str(len(xeri1)-1)+"\n")
while True:
if epilogi.isdigit():
epilogi=int(epilogi)
if epilogi>=0 and epilogi<=(len(xeri1)-1):
break
print("H επιλογη που επιλεξατε ητνα λαθασμενη")
epilogi=input("Διαέξε να ριξεις ενα χαρτι στο τραπεζι απο το 0 μεχρι το "+str(len(xeri1)-1)+"\n")
trapezi.insert(0,xeri1.pop(epilogi))
elif paixtis==2:
print(xeri2)
epilogi=input("Διαέξε να ριξεις ενα χαρτι στο τραπεζι απο το 0 μεχρι το "+str(len(xeri2)-1)+"\n")
while True:
if epilogi.isdigit():
epilogi=int(epilogi)
if epilogi>=0 and epilogi<=(len(xeri2)-1):
break
print("H επιλογη που επιλεξατε ηταν λαθασμενη")
epilogi=input("Διαέξε να ριξεις ενα χαρτι στο τραπεζι απο το 0 μεχρι το "+str(len(xeri1)-1)+"\n")
trapezi.insert(0,xeri2.pop(epilogi))
if len(xeri1)==0 and len(xeri2)==0:
return
def ta_perni(paixtis):
if len(trapezi)<2:
return
if paixtis==1:
if trapezi[0][1]==trapezi[1][1]:
for x1 in trapezi:
kerdismena_1.append(x1)
if len(trapezi)==2:
kerdismena_1.append("Ξερη με "+str(trapezi[0][1]))
trapezi.clear()
if len(kerdismena_1)>0:
print("Τα κερδιμσενα χαρτια του παιχτη 1 ειναι ",kerdismena_1)
elif trapezi[0][1]=="Βαλες":
for x1 in trapezi:
kerdismena_1.append(x1)
trapezi.clear()
if len(kerdismena_1)>0:
print("Τα κερδιμσενα χαρτια του παιχτη 1 ειναι ",kerdismena_1)
elif paixtis==2:
if trapezi[0][1]==trapezi[1][1]:
for x2 in trapezi:
kerdismena_2.append(x2)
if len(trapezi)==2:
kerdismena_2.append("Ξερι με "+str(trapezi[0][1]))
trapezi.clear()
if len(kerdismena_2)>0:
print("Τα κερδιμσενα χαρτια του παιχτη 2 ειναι ",kerdismena_2)
elif trapezi[0][1]=="Βαλες":
for x2 in trapezi:
kerdismena_2.append(x2)
trapezi.clear()
if len(kerdismena_2)>0:
print("Τα κερδιμσενα χαρτια του παιχτη 2 ειναι ",kerdismena_2)
def metrima_ponton(l,s):
xeres=[]
p=0
ponti_xeron=0
fiogoyres=0
kalo_10=0
kalo_2=0
dekargia=0
asoi=0
for g in l:
if type(g)==str:
xeres.append(g)
for d in xeres:
l.remove(d)
for xeri in xeres:
if xeri=="Ξερη με Βαλες":
ponti_xeron+=20
else:
ponti_xeron+=10
for i in l:
if i[1]=="Βαλες" or i[1]=="Νταμα" or i[1]=="Ριγας":
fiogoyres+=1
continue
if i[0]=="Καρο" and i[1]==10:
kalo_10=2
continue
elif i[0]=="Σπαθι" and i[1]==10 or i[0]=="Κουπα" and i[1]==10 or i[0]=="Μπαστουνι" and i[1]==10:
dekargia+=1
continue
elif i[0]=="Σπαθι" and i[1]==2:
kalo_2=1
continue
if i[1]==1:
asoi+=1
continue
xeres.clear()
for k in s:
if type(k)==str:
xeres.append(k)
for de in xeres:
s.remove(de)
xeres.clear()
p=ponti_xeron+fiogoyres+dekargia+asoi+kalo_2+kalo_10
if len(l)<len(s):
return p
elif len(l)==len(s):
return p
else:
return p+3
mirasma(True)
while True:
paixtis=paixtes(paixtis)
paixnidi(paixtis)
ta_perni(paixtis)
if len(xeri1)==0 and len(xeri2)==0:
mirasma()
print("Απομενου τοσα χαρτια στη τραπουλα",len(trapoyla))
if len(trapoyla)==0:
if paixtis==1:
if len(trapoyla)==0:
for x1 in trapezi:
kerdismena_1.append(x1)
trapezi.clear()
elif paixtis==2:
if len(trapoyla)==0:
for x1 in trapezi:
kerdismena_1.append(x1)
trapezi.clear()
print("το τραπεζει ειναι",trapezi)
break
pontoi_1=metrima_ponton(kerdismena_1,kerdismena_2)
pontoi_2=metrima_ponton(kerdismena_2,kerdismena_1)
if pontoi_1>pontoi_2:
print(f"nikise o paixtis 1 me pontoys {pontoi_1} enanti toy paiti 2 me poyntoys {pontoi_2}")
else:
print(f"nikise o paixtis 2 me pontoys {pontoi_2} enanti toy paiti 1 me poyntoys {pontoi_1}")
|
[
"noreply@github.com"
] |
xarkouts.noreply@github.com
|
e2a883be7a61493ac52b48a563af686087b2640a
|
9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612
|
/19100101/sundyyang/d11_training1.py
|
1583902d982bb0769ab8ae4731fc7504566a7143
|
[] |
no_license
|
shen-huang/selfteaching-python-camp
|
e8410bfc06eca24ee2866c5d890fd063e9d4be89
|
459f90c9f09bd3a3df9e776fc64dfd64ac65f976
|
refs/heads/master
| 2022-05-02T05:39:08.932008
| 2022-03-17T07:56:30
| 2022-03-17T07:56:30
| 201,287,222
| 9
| 6
| null | 2019-08-08T15:34:26
| 2019-08-08T15:34:25
| null |
UTF-8
|
Python
| false
| false
| 875
|
py
|
# day 11
import requests
import getpass
import yagmail
from pyquery import PyQuery
from mymodule import stats_word
# 提取微信公众号正文
response = requests.get('https://mp.weixin.qq.com/s/pLmuGoc4bZrMNl7MSoWgiA')
document = PyQuery(response.text)
content = document('#js_content').text()
# 应用 stats_word 方法提取前100个词
day11 = stats_word.stats_text_cn(content)
content = day11.most_common(100)
day11_1 = str(day11)
# print(day11_1)
# 设置邮箱
user = input('请输入你的邮箱:') #邮箱账号
password = getpass.getpass('请输入发件人邮箱密码(可复制粘贴):') #邮箱开通smtp服务授权码
recipient = input('请输入收件人邮箱:')
smtp = "smtp.163.com" #服务器地址
# print(user,password,recipient) #检查
# 发送邮件
yag = yagmail.SMTP(user,password,smtp)
yag.send(recipient,'19100101 sundyyang',day11_1)
|
[
"6396023+realcaiying@users.noreply.github.com"
] |
6396023+realcaiying@users.noreply.github.com
|
093ee2463b1a86d455d7fd106f214c73722c2ee1
|
503d2f8f5f5f547acb82f7299d86886691966ca5
|
/atcoder/abc172_a.py
|
b5c6641365ed028613ce1898499a4b6fd7f432d3
|
[] |
no_license
|
Hironobu-Kawaguchi/atcoder
|
3fcb649cb920dd837a1ced6713bbb939ecc090a9
|
df4b55cc7d557bf61607ffde8bda8655cf129017
|
refs/heads/master
| 2023-08-21T14:13:13.856604
| 2023-08-12T14:53:03
| 2023-08-12T14:53:03
| 197,216,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
# https://atcoder.jp/contests/abc172/tasks/abc172_a
# import sys
# def input(): return sys.stdin.readline().rstrip()
# input = sys.stdin.readline
# input = sys.stdin.buffer.readline
# from numba import njit
# from functools import lru_cache
# sys.setrecursionlimit(10 ** 7)
# @njit('(i8,i8[::1],i4[::1])', cache=True)
# @njit(cache=True)
# def main():
# # @lru_cache(None)
# # def dfs():
# # return
# A, B = map(int, input().split())
# print(A*B)
# return
# main()
a = int(input())
ans = a + a*a + a*a*a
print(ans)
# S = input()
# n = int(input())
# N, K = map(int, input().split())
# l = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
|
[
"hironobukawaguchi3@gmail.com"
] |
hironobukawaguchi3@gmail.com
|
a5dfe239bc5a69803127534a8d77267c8a7e6705
|
2307893a2b706d3e3e387cb5f20117213460fc21
|
/Exercises/Module 05/exercise_33.py
|
c894365e2e769d909613264428b94517d486702a
|
[] |
no_license
|
caiquemarinho/python-course
|
a5e3447b676365e5f49297a51830ce5fee089199
|
2863f77dff77712422d0f60adf00483806fcc040
|
refs/heads/master
| 2023-06-09T10:53:44.988594
| 2021-06-21T13:28:04
| 2021-06-21T13:28:04
| 378,922,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
"""
Receive a product and change it's price according to the table.
0 - 50$ -> 5%
50 - 100 -> 10%
100 + ->15%
"""
print('Insert the product price')
product = float(input())
# Finding out the new price of the product.
if product < 50:
new_price = product+(product*0.05)
elif (product >= 50) and (product <= 100):
new_price = product+(product*0.10)
else:
new_price = product+(product*0.15)
print(new_price)
# Message to the user.
if new_price <= 80:
print(f'Cheap! The new price is {new_price}')
elif (new_price > 80) and (new_price <= 120):
print(f'Decent price! The new price is {new_price}')
elif (new_price > 120) and (new_price <= 200):
print(f'Expensive! The new price is {new_price}')
elif new_price > 200:
print(f'Very Expensive! The new price is {new_price}')
|
[
"caiquealfonso@gmail.com"
] |
caiquealfonso@gmail.com
|
06f0db0e55ef80709480a68ce7ca0c408e0efc04
|
c73f1e73c937e556c96b072d99019b7af94d113f
|
/P1/UDPServer.py
|
6d9dab5ba29a78b3bdd7b6de3e92862e6c909a09
|
[] |
no_license
|
sicsic1997/Retele-Tema2
|
33c24c0e715a2374ce0442048a006e1038bb9f08
|
21b2f912a78ddfb56b4e7240ac10af7fe12dbe46
|
refs/heads/master
| 2020-03-18T20:20:40.841762
| 2018-05-28T21:12:10
| 2018-05-28T21:12:10
| 135,209,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,055
|
py
|
from socket import *
import pickle
import hashlib
import sys
import os
import math
import time
serverIP="0.0.0.0"
serverPort=10000
serverAddress=(serverIP, serverPort)
serverSocket=socket(AF_INET,SOCK_DGRAM)
serverSocket.bind(serverAddress)
serverSocket.settimeout(3)
print "Ready to serve"
#initializes packet variables
expectedseqnum=1
ACK=1
ack = []
#RECEIVES DATA
f = open("output", "wb")
endoffile = False
lastpktreceived = time.time()
starttime = time.time()
while True:
try:
rcvpkt=[]
packet,clientAddress= serverSocket.recvfrom(4096)
rcvpkt = pickle.loads(packet)
# check value of checksum received (c) against checksum calculated (h) - NOT CORRUPT
c = rcvpkt[-1]
del rcvpkt[-1]
h = hashlib.md5()
h.update(pickle.dumps(rcvpkt))
if c == h.digest():
# check value of expected seq number against seq number received - IN ORDER
if(rcvpkt[0]==expectedseqnum):
print "Received inorder", expectedseqnum
#print "Data:", rcvpkt[1]
if rcvpkt[1]:
#f.write(rcvpkt[1])
print "Success for: ", rcvpkt[1]
else:
endoffile = True
#print "Interm1"
expectedseqnum = expectedseqnum + 1
# create ACK (seqnum,checksum)
sndpkt = []
sndpkt.append(expectedseqnum)
#print "Interm2"
h = hashlib.md5()
h.update(pickle.dumps(sndpkt))
sndpkt.append(h.digest())
#print "Preparing to send ACK"
serverSocket.sendto(pickle.dumps(sndpkt), clientAddress)
print "New Ack", expectedseqnum
else:
# default? discard packet and resend ACK for most recently received inorder pkt
print "Received out of order", rcvpkt[0]
sndpkt = []
sndpkt.append(expectedseqnum)
h = hashlib.md5()
h.update(pickle.dumps(sndpkt))
sndpkt.append(h.digest())
serverSocket.sendto(pickle.dumps(sndpkt), clientAddress)
print "Ack", expectedseqnum
else:
print "error detected"
except:
if endoffile:
if(time.time()-lastpktreceived>3):
break
endtime = time.time()
f.close()
print 'FILE TRANFER SUCCESSFUL'
print "TIME TAKEN " , str(endtime - starttime)
|
[
"vlad.coteanu@mindit.ro"
] |
vlad.coteanu@mindit.ro
|
1c7014d54f9884059212afadf8db1114596f9b5d
|
5d43552534b89c81e57e0c04c59af5c94b223d71
|
/studentinfoproject/testapp/migrations/0001_initial.py
|
62296d817476b2000e9135829959dd80ebfaf74a
|
[] |
no_license
|
Prachithakur27/Django
|
d9fee9ac4647c75e0a22f2dd7256e73d9591fc2f
|
7e1d7cd8f8a65d0ba711ef1937538749497b4231
|
refs/heads/master
| 2023-01-02T07:49:35.084678
| 2020-10-31T15:08:18
| 2020-10-31T15:08:18
| 286,936,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
# Generated by Django 3.0.8 on 2020-08-15 20:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rollno', models.IntegerField()),
('name', models.CharField(max_length=30)),
('dob', models.DateField()),
('marks', models.IntegerField()),
('email', models.EmailField(max_length=254)),
('phonenumber', models.IntegerField()),
('address', models.TextField()),
],
),
]
|
[
"prachi.pthakur27@gmail.com"
] |
prachi.pthakur27@gmail.com
|
bd453b1be551ac68730e7f3d719b9c710a458ed0
|
a5a49c0dadc7cf42800c7004cb2324416aed671c
|
/fakecsv/urls.py
|
1e73f8da05cb7a77d32ace2bcbadc0085295da12
|
[] |
no_license
|
imanov21/csv_gen
|
6064f778df9d40b942a5c1c07868fd3312d554af
|
23ee39340606954c50f64745b33ffb9efaa69a65
|
refs/heads/master
| 2023-05-28T18:05:26.021467
| 2021-06-10T14:22:23
| 2021-06-10T14:22:23
| 375,718,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
from django.urls import path
from fakecsv.views import DataSchemasListView, \
DataSchemaDeleteView, DataSchemaCreateView, DataSchemaUpdateView, \
data_sets_view, generate_csv, download_csv, check_task_status
urlpatterns = [
path('', DataSchemasListView.as_view(),
name='data_schema_list'),
path('<int:pk>/data_sets/generate_csv/', generate_csv,
name='generate_csv'),
path('<int:pk>/data_sets/download_csv/<int:id>/', download_csv,
name='download_csv'),
path('delete/<int:pk>/', DataSchemaDeleteView.as_view(),
name='delete_data_schema'),
path('create/', DataSchemaCreateView.as_view(),
name='create'),
path('update/<int:pk>/', DataSchemaUpdateView.as_view(),
name='update_data_schema'),
path('<int:pk>/data_sets/', data_sets_view,
name='data_sets_list'),
path('check_task_status/<str:task_id>/', check_task_status,
name='check_task_status'),
]
|
[
"antony.imanov@gmail.com"
] |
antony.imanov@gmail.com
|
7086db00aaf5f716ceea92538aadd40b33777e8e
|
d6e481f8dea60a0814888b47f540be375771b758
|
/train.py
|
3e7d7744033e0a558dba13ac2991ab04125d344e
|
[] |
no_license
|
Gogs2/Depression-Detection-in-Reddit-Posts
|
2d6185d65ce10d67cb321fb8d6aadf88a4ee6711
|
6570a776160bbbbf257c1efbc95b706663a3991a
|
refs/heads/master
| 2020-08-07T13:27:51.150881
| 2019-10-07T19:24:33
| 2019-10-07T19:24:33
| 213,468,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
import pandas as pd
import collections
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics import recall_score, precision_score, accuracy_score, f1_score
from sklearn.utils import class_weight
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
# Function that prints metrics for given predictions and true values
def printMetrics(y_true,y_pred):
print("Accuracy: ",accuracy_score(y_true,y_pred))
print("Recall: ",recall_score(y_true,y_pred))
print("Precision score: ", precision_score(y_true,y_pred))
print("F1 Score: ",f1_score(y_true,y_pred))
# Load the data
data_train = pd.read_csv('train_clean.csv')
data_test = pd.read_csv('test_clean.csv')
# Transform into tf-idf vectors
tfidf = TfidfVectorizer( analyzer='word', max_features=40000)
X_train = tfidf.fit_transform(data_train['Text']).astype('float16')
X_test = tfidf.transform(data_test['Text']).astype('float16')
# Transform the targets from dataframe to list
Y_train = data_train['Target'].tolist()
Y_test = data_test['Target'].tolist()
def build_model():
model = Sequential()
model.add(Dense(256, input_dim=40000, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(3000, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(2600, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(2200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(900, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
# Add class weigths to the model since we are dealing with an unbalanced dataset
class_weights = class_weight.compute_class_weight('balanced', np.unique(Y_train), Y_train)
print("Class weights:")
print(class_weights)
# Fit and predict
estimator = KerasClassifier(build_fn=build_model, epochs=30,batch_size=32)
estimator.fit(X_train,Y_train, class_weight=class_weights)
Y_pred = estimator.predict(X_test)
printMetrics(Y_test, Y_pred)
|
[
"gjorgji.chepujnoski@gmail.com"
] |
gjorgji.chepujnoski@gmail.com
|
4a13df491ffd209b82623d757ca0aafda56fb96b
|
02fc29f7c9f4fd6cbe3a6afbbef1554fb0e7a3ca
|
/genienlp/model_utils/optimizer.py
|
123188628fbaed151cbeb5440050586a280ffac2
|
[
"BSD-3-Clause"
] |
permissive
|
stanford-oval/genienlp
|
8f8f14924dc379e413de4e37ae206ed0a391e8cc
|
8ad64f6d20f45b31c8d1de262570ecc840316c58
|
refs/heads/master
| 2023-07-14T01:59:30.072402
| 2023-04-04T18:19:57
| 2023-04-04T18:19:57
| 237,029,113
| 82
| 24
|
NOASSERTION
| 2023-06-26T13:57:29
| 2020-01-29T16:26:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,349
|
py
|
import math
from functools import partial
import torch
from transformers import (
Adafactor,
AdamW,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def get_transformer_learning_rate(i, *, dimension, warmup):
i += 1
return 1.0 / math.sqrt(dimension) * min(1 / math.sqrt(i), i / (warmup * math.sqrt(warmup)))
def get_sgd_learning_rate(i, *, warmup):
i += 1
return min(math.sqrt(warmup) / math.sqrt(i), i / warmup)
def init_opt(args, model, logger):
num_training_steps = sum(args.train_iterations) // args.gradient_accumulation_steps
if args.optimizer == 'adam':
# Adam with transformer schedule has a different set of default hyperparameters:
if args.lr_schedule == 'transformer':
opt = torch.optim.Adam(
model.params, lr=args.lr_multiply, betas=(0.9, 0.98), eps=1e-9, weight_decay=args.weight_decay
)
else:
opt = torch.optim.Adam(
model.params, lr=args.lr_multiply, betas=(args.beta0, 0.999), weight_decay=args.weight_decay
)
elif args.optimizer == 'adamw':
opt = AdamW(model.params, lr=args.lr_multiply, weight_decay=args.weight_decay)
elif args.optimizer == 'adafactor':
opt = Adafactor(
model.params, lr=args.lr_multiply, weight_decay=args.weight_decay, relative_step=False, scale_parameter=False
)
elif args.optimizer == 'radam':
import radam
if args.warmup > 1:
logger.warning('With RAdam optimizer, warmup is never applied')
opt = radam.RAdam(model.params, lr=args.lr_multiply, betas=(args.beta0, 0.999), weight_decay=args.weight_decay)
elif args.optimizer == 'sgd':
opt = torch.optim.SGD(model.params, lr=args.lr_multiply, weight_decay=args.weight_decay)
else:
raise ValueError('Invalid optimizer.')
if args.lr_schedule == 'transformer':
lr_lambda = partial(get_transformer_learning_rate, dimension=args.dimension, warmup=args.warmup)
scheduler = torch.optim.lr_scheduler.LambdaLR(opt, lr_lambda)
elif args.lr_schedule == 'constant':
scheduler = get_constant_schedule_with_warmup(opt, num_warmup_steps=args.warmup)
elif args.lr_schedule == 'linear':
scheduler = get_linear_schedule_with_warmup(
opt,
num_training_steps=num_training_steps,
num_warmup_steps=args.warmup,
)
elif args.lr_schedule == 'polynomial':
scheduler = get_polynomial_decay_schedule_with_warmup(
opt,
num_training_steps=num_training_steps,
num_warmup_steps=args.warmup,
lr_end=args.lr_poly_end,
power=args.lr_poly_power,
)
elif args.lr_schedule == 'cosine':
scheduler = get_cosine_schedule_with_warmup(
opt,
num_training_steps=num_training_steps,
num_warmup_steps=args.warmup,
num_cycles=0.5,
)
elif args.lr_schedule == 'sgd':
lr_lambda = partial(get_sgd_learning_rate, warmup=args.warmup)
scheduler = torch.optim.lr_scheduler.LambdaLR(opt, lr_lambda)
else:
raise ValueError('Invalid learning rate scheduler.')
return opt, scheduler
|
[
"mehrad@stanford.edu"
] |
mehrad@stanford.edu
|
85075fe45559a79929a08d27a2d5af734d36dc76
|
abd70ecf747decdd24077b544657642f6b07cdee
|
/method/utils/adjustAccounting.py
|
f5963fee92b58c259c3476bde88a3eca5c59a669
|
[] |
no_license
|
mokusen/chms
|
3925ffeb52d7f3b314682a80ede0c1d9d7e951d5
|
60144b224b82464d1c7014b9bfcb6502e94b47d1
|
refs/heads/master
| 2020-04-09T02:30:52.792981
| 2019-01-09T15:05:45
| 2019-01-09T15:05:45
| 159,943,760
| 0
| 0
| null | 2018-12-29T04:49:10
| 2018-12-01T12:46:49
|
Python
|
UTF-8
|
Python
| false
| false
| 570
|
py
|
def adjust_accounting(adjust_list):
"""
sql側に渡す前に型変換を行う
Parameters
----------
adjust_list : list型
[id, use, money, year, month, day]
Returns
-------
adjust_list : list型
[id(int), use(str), money(int), year(int), month(int), day(int)]
"""
adjust_list[0] = int(adjust_list[0])
adjust_list[2] = int(adjust_list[2])
adjust_list[3] = int(adjust_list[3])
adjust_list[4] = int(adjust_list[4])
adjust_list[5] = int(adjust_list[5])
return adjust_list
|
[
"kikawa.tech@gmail.com"
] |
kikawa.tech@gmail.com
|
70020790ef3e5c40c205d85214c30921aa11259a
|
d0f78233d2436c7976256171696af005ce085118
|
/blogs/api_urls.py
|
46e09ada4938bbe11ce755bc0f6cd5b136e8b913
|
[] |
no_license
|
mvelezserrano/wordplease
|
487cfccea864b6f4d3cd14b20d1c86760748ed5f
|
37c7507082d7df14c256a73f5124561dcc16c8fc
|
refs/heads/master
| 2021-01-16T18:44:25.180055
| 2015-07-26T20:41:01
| 2015-07-26T20:41:01
| 39,739,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
# -*- coding: utf-8 -*-
from blogs.api import PostViewSet, CreatePostViewSet, BlogViewSet
from django.conf.urls import include, url
from rest_framework.routers import DefaultRouter, SimpleRouter
# APIRouter
router = DefaultRouter()
router = SimpleRouter(trailing_slash=False)
router.register(r'blogs/(?P<user>[A-Za-z0-9]+)', PostViewSet)
router.register(r'blogs/new-post', CreatePostViewSet) # para la creación
router.register(r'blogs', BlogViewSet)
urlpatterns = [
# API URLs
url(r'1.0/', include(router.urls)),
]
|
[
"mvelezserrano@outlook.es"
] |
mvelezserrano@outlook.es
|
63dacef7d72dc5d5511e9f17298d93a569363f34
|
3e3444911c9771186034a2ee68f3f3eae6dfdd4d
|
/binarySearch.py
|
b30f5bf6dbf0cb7ffaff7b71e21c653efc5e1f9b
|
[] |
no_license
|
ChristopherParke/FunWithAlgorithms
|
d3ab9ad59b40d5ab7901cbeb408beb840453496a
|
44b09c8e8a9034b2662df181211c86c483d48834
|
refs/heads/master
| 2022-02-14T06:54:08.204590
| 2019-09-04T00:09:39
| 2019-09-04T00:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
import math
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
xmin = 0
xmax = len(primes) - 1
target = 67
iteration = 0
def binarySearch(primes, xmin, xmax, target, iteration=0):
"""
This search method is the binary search method. It is a common algorithm for finding a target in a list where the list is already sorted.
"""
for x in primes:
iteration = iteration + 1
arrVal = math.floor((xmin + xmax) / 2)
if primes[arrVal] < target:
xmin = arrVal + 1
elif primes[arrVal] > target:
xmax = arrVal - 1
else:
print("I found it in " + str(iteration)+ " tries. It's value " + str(primes[arrVal]) + " at index " + str(arrVal))
break
binarySearch(primes, xmin, xmax, target, iteration)
|
[
"christopherparke@zoho.com"
] |
christopherparke@zoho.com
|
7135c0afb6b60fbba69ff48e0365b1ccc6c5dcdd
|
a21d9303e155fdb9845d67f0a888b70a9cd95d51
|
/api/serializer.py
|
943d43f63eeef0a362803fb204eca85f09acf12c
|
[] |
no_license
|
saisai/Implementation_logger
|
dafa188507b64a3776b9228d37c22ae20a5a87f2
|
373f233cebf64a8c7dba8a0d75812f6480fbe61a
|
refs/heads/master
| 2020-08-05T22:52:52.295716
| 2019-10-04T05:31:06
| 2019-10-04T05:31:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
from rest_framework import serializers
from api.models import *
class groupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = group
fields = ('groupName', 'config')
class devicesSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = devices
fields = ('uuid', 'group', 'hashes')
class configurationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = config
fields = ('name', 'config')
class zipFilesSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = zipFiles
fields = ('zipfile', 'device')
|
[
"jdejonge@protonmail.ch"
] |
jdejonge@protonmail.ch
|
63f0d8e6153c3ec5586eebed9a64301c1e43753b
|
2985230e6542361c1a63ff4bea0283951f256599
|
/setup.py
|
f7fed0d0b923b39819d1a0339aa179b360100e97
|
[] |
no_license
|
Stonesth/TOS-3087
|
7053c486e87e91d35bd1f15748d3e5e23348f742
|
8c4fd5c3d5f45751f77eba67d6aac1f21c3f711a
|
refs/heads/main
| 2023-02-26T11:13:08.338497
| 2021-02-01T15:35:48
| 2021-02-01T15:35:48
| 334,964,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
from cx_Freeze import setup, Executable
setup(
name = "tos-3087",
version = "0.1",
description = "",
executables = [Executable("tos-3087.py")]
)
|
[
"pierre.thonon@gmail.com"
] |
pierre.thonon@gmail.com
|
e8bb363682e5f8e63ac0e56a986076f1e43f91b6
|
7722c0c2d616af965124120c76bed472900d48bf
|
/build/pi_io/cmake/pi_io-genmsg-context.py
|
093469fa89dd16ff069e8425074390da5c5219da
|
[] |
no_license
|
EdisonAltamirano/Turtlebot_Mapping
|
f1cfb190b5a239d0e0bb4932e766cce1ec6c42ba
|
cfb98058745e5fbf84b2388254dbad2d045362ef
|
refs/heads/master
| 2022-11-13T11:48:27.915328
| 2020-07-06T00:02:45
| 2020-07-06T00:02:45
| 276,209,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/edison/turtle_ws1/src/pi_io/msg/gpio_input.msg"
services_str = "/home/edison/turtle_ws1/src/pi_io/srv/gpio_output.srv"
pkg_name = "pi_io"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "pi_io;/home/edison/turtle_ws1/src/pi_io/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"a00825234@itesm.mx"
] |
a00825234@itesm.mx
|
44978164b8c3e675e86d3cda878b105b9f15fec3
|
5edf9d401178bbf8839119904a8998f8bc676293
|
/minesweeper/minesweeper.py
|
6e8c52045ff76ecbe7899a756cdb841842017f5d
|
[] |
no_license
|
cheikh93/MES_PROJET_IA
|
33a22c8c4223340e89811cc12c8b4e7675ec99aa
|
ae230402a1469adfda3dcdd3faf1181795261b4e
|
refs/heads/main
| 2023-06-05T06:16:13.820961
| 2021-06-29T00:03:31
| 2021-06-29T00:03:31
| 348,394,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,505
|
py
|
import itertools
import random
class Minesweeper():
"""
Minesweeper game representation
"""
def __init__(self, height=8, width=8, mines=8):
# Set initial width, height, and number of mines
self.height = height
self.width = width
self.mines = set()
# Initialize an empty field with no mines
self.board = []
for i in range(self.height):
row = []
for j in range(self.width):
row.append(False)
self.board.append(row)
# Add mines randomly
while len(self.mines) != mines:
i = random.randrange(height)
j = random.randrange(width)
if not self.board[i][j]:
self.mines.add((i, j))
self.board[i][j] = True
# At first, player has found no mines
self.mines_found = set()
def print(self):
"""
Prints a text-based representation
of where mines are located.
"""
for i in range(self.height):
print("--" * self.width + "-")
for j in range(self.width):
if self.board[i][j]:
print("|X", end="")
else:
print("| ", end="")
print("|")
print("--" * self.width + "-")
def is_mine(self, cell):
i, j = cell
return self.board[i][j]
def nearby_mines(self, cell):
"""
Returns the number of mines that are
within one row and column of a given cell,
not including the cell itself.
"""
# Keep count of nearby mines
count = 0
# Loop over all cells within one row and column
for i in range(cell[0] - 1, cell[0] + 2):
for j in range(cell[1] - 1, cell[1] + 2):
# Ignore the cell itself
if (i, j) == cell:
continue
# Update count if cell in bounds and is mine
if 0 <= i < self.height and 0 <= j < self.width:
if self.board[i][j]:
count += 1
return count
def won(self):
"""
Checks if all mines have been flagged.
"""
return self.mines_found == self.mines
class Sentence():
"""
Logical statement about a Minesweeper game
A sentence consists of a set of board cells,
and a count of the number of those cells which are mines.
"""
def __init__(self, cells, count):
self.cells = set(cells)
self.count = count
def __eq__(self, other):
return self.cells == other.cells and self.count == other.count
def __str__(self):
return f"{self.cells} = {self.count}"
def known_mines(self):
"""
Returns the set of all cells in self.cells known to be mines.
"""
#raise NotImplementedError
if len(self.cells) == self.count:
if len(self.cells) != 0:
return self.cells
return None
def known_safes(self):
"""
Returns the set of all cells in self.cells known to be safe.
"""
#raise NotImplementedError
if self.count == 0:
if len(self.cells) != 0:
return self.cells
return None
def mark_mine(self, cell):
"""
Updates internal knowledge representation given the fact that
a cell is known to be a mine.
"""
#raise NotImplementedError
self.mines.add(cell)
for sentence in self.knowledge:
sentence.mark_mine(cell)
def mark_safe(self, cell):
"""
Updates internal knowledge representation given the fact that
a cell is known to be safe.
"""
#raise NotImplementedError
if cell in self.cells:
self.cells.remove(cell)
self.count -= 1
class MinesweeperAI():
"""
Minesweeper game player
"""
def __init__(self, height=8, width=8):
# Set initial height and width
self.height = height
self.width = width
# Keep track of which cells have been clicked on
self.moves_made = set()
# Keep track of cells known to be safe or mines
self.mines = set()
self.safes = set()
# List of sentences about the game known to be true
self.knowledge = []
def mark_mine(self, cell):
"""
Marks a cell as a mine, and updates all knowledge
to mark that cell as a mine as well.
"""
self.mines.add(cell)
for sentence in self.knowledge:
sentence.mark_mine(cell)
def mark_safe(self, cell):
"""
Marks a cell as safe, and updates all knowledge
to mark that cell as safe as well.
"""
self.safes.add(cell)
for sentence in self.knowledge:
sentence.mark_safe(cell)
def add_knowledge(self, cell, count):
"""
Called when the Minesweeper board tells us, for a given
safe cell, how many neighboring cells have mines in them.
This function should:
1) mark the cell as a move that has been made
2) mark the cell as safe
3) add a new sentence to the AI's knowledge base
based on the value of `cell` and `count`
4) mark any additional cells as safe or as mines
if it can be concluded based on the AI's knowledge base
5) add any new sentences to the AI's knowledge base
if they can be inferred from existing knowledge
"""
#raise NotImplementedError
#1
self.moves_made.add(cell)
#2
self.mark_safe(cell)
#3
cells = []
for i in range(cell[0] - 1, cell[0] + 2):
for j in range(cell[1] - 1, cell[1] + 2):
# Ignore the cell itself
if (i, j) == cell:
continue
# Update count if cell in bounds and is mine
if 0 <= i < self.height and 0 <= j < self.width:
#已统计的无需考虑
if (i, j) in self.mines:
count -= 1
elif (i, j) not in self.safes:
cells.append((i, j))
self.knowledge.append(Sentence(cells, count))
#4
for sentence in self.knowledge:
safe = sentence.known_safes()
mine = sentence.known_mines()
if safe != None:
self.safes = self.safes.union(safe)
if mine != None:
self.mines = self.mines.union(mine)
#5
n = len(self.knowledge)
for i in range(n):
for j in range(i + 1, n):
s1 = self.knowledge[i]
s2 = self.knowledge[j]
if s1.cells.issubset(s2.cells):
cells = s2.cells - s1.cells
s = Sentence(s2.cells - s1.cells, s2.count - s1.count)
if s not in self.knowledge:
self.knowledge.append(s)
elif s2.cells.issubset(s1.cells):
cells = s1.cells - s2.cells
s = Sentence(s1.cells - s2.cells, s1.count - s2.count)
if s not in self.knowledge:
self.knowledge.append(s)
def make_safe_move(self):
"""
Returns a safe cell to choose on the Minesweeper board.
The move must be known to be safe, and not already a move
that has been made.
This function may use the knowledge in self.mines, self.safes
and self.moves_made, but should not modify any of those values.
"""
#raise NotImplementedError
for cell in self.safes:
if cell not in self.moves_made:
return cell
return None
def make_random_move(self):
"""
Returns a move to make on the Minesweeper board.
Should choose randomly among cells that:
1) have not already been chosen, and
2) are not known to be mines
"""
#raise NotImplementedError
for i in range(self.height):
for j in range(self.width):
if (i, j) not in self.moves_made and (i, j) not in self.mines:
return (i, j)
return None
|
[
"noreply@github.com"
] |
cheikh93.noreply@github.com
|
c7c31a8ca0369dd548fae8e2fb9f805dc9644a40
|
45ba180dd7069a34aea8f64090c09582cfe88f81
|
/build/universal_robot/ur3_e_moveit_config/catkin_generated/pkg.installspace.context.pc.py
|
5dd389a4fa0bacbccb19bcb7da5760746da17ed6
|
[] |
no_license
|
ylliu/aubo_robot
|
a1e40977c57e77a3c6a4d1e0de40cecc5c56280d
|
fec6eee61063b0655f741ef4437c70c16aaa052b
|
refs/heads/main
| 2023-03-28T08:46:04.851614
| 2021-03-28T10:33:44
| 2021-03-28T10:33:44
| 349,738,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur3_e_moveit_config"
PROJECT_SPACE_DIR = "/home/ylliu/catkin_ws/install"
PROJECT_VERSION = "1.2.7"
|
[
"1065656821@qq.com"
] |
1065656821@qq.com
|
3a332e60dcd0d31aa40dd5f4b190704f255a6135
|
9d484077026b7fcf26188d77281f573eaec1f1d3
|
/scripts/adhoc/dendrogram_cuts.py
|
fef1177f5d92df2b34a79f14c35a9204d2660122
|
[] |
no_license
|
gaberosser/qmul-bioinf
|
603d0fe1ed07d7233f752e9d8fe7b02c7cf505fe
|
3cb6fa0e763ddc0a375fcd99a55eab5f9df26fe3
|
refs/heads/master
| 2022-02-22T06:40:29.539333
| 2022-02-12T00:44:04
| 2022-02-12T00:44:04
| 202,544,760
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
plt.figure(figsize=(10, 4))
ax = plt.gca()
hierarchy.dendrogram(z, no_labels=True, color_threshold=0., above_threshold_color='k')
ax.axis('off')
plt.tight_layout()
plt.savefig('methyl_dendrogram_%dcuts.png' % 0, dpi=200)
for i in range(2, 7):
c = clustering.dendrogram_threshold_by_nclust(z, i)
plt.figure(figsize=(10, 4))
ax = plt.gca()
hierarchy.dendrogram(z, no_labels=True, color_threshold=c, above_threshold_color='k')
ax.axhline(c, c='gray', ls='--')
ax.axis('off')
plt.tight_layout()
plt.savefig('methyl_dendrogram_%dcuts.png' % i, dpi=200)
|
[
"gabriel.rosser@gmail.com"
] |
gabriel.rosser@gmail.com
|
ded348637281439cb1638b6252d242ecc3bfbddc
|
1aa49533c27f114840b528d9ab71dc7322120da0
|
/utils.py
|
31bce31e3917f3045269b2590ecdaeb448ca4fee
|
[
"MIT"
] |
permissive
|
yassineAlouini/image-recognition-as-a-service
|
a18afb2a99dd1d9e3da3e90086d8dbb9d3289bc9
|
c85502cdb3828e128415a92cb0564671f038cca8
|
refs/heads/master
| 2020-09-05T06:53:57.373156
| 2017-12-26T10:07:12
| 2017-12-26T10:07:12
| 94,414,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
import logging
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
|
[
"yassinealouini@outlook.com"
] |
yassinealouini@outlook.com
|
aa1b71d24e45defa75b428b775fb1f2ecb43ba1d
|
0c4bb0b0c91a6bfb8c6ea529b93a303661bcd625
|
/python/table_parse/table_data.py
|
c6dd3b8e0345ebfde4e1382caa04ecbcd3604d16
|
[] |
no_license
|
shahrukhqasim/TIES
|
28de4bd962624856e58f5293b5a91427132fa38d
|
279ce12ef5303f9428cf72d3f01a1922c7c5d584
|
refs/heads/master
| 2021-08-23T16:29:02.057736
| 2017-12-05T17:34:55
| 2017-12-05T17:34:55
| 106,605,384
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
class TableData:
def __init__(self, tokens_embeddings, tokens_rects, neighbor_distance_matrix, tokens_neighbor_matrix,
tokens_share_row_matrix, tokens_share_col_matrix, tokens_share_cell_matrix, neighbors_same_row,
neighbors_same_col, neighbors_same_cell, conv_features):
self.embeddings = tokens_embeddings
self.rects = tokens_rects
self.distances = neighbor_distance_matrix
self.neighbor_graph = tokens_neighbor_matrix
self.row_share = tokens_share_row_matrix
self.col_share = tokens_share_col_matrix
self.cell_share = tokens_share_cell_matrix
self.neighbors_same_row = neighbors_same_row
self.neighbors_same_col = neighbors_same_col
self.neighbors_same_cell = neighbors_same_cell
self.conv_features = conv_features
|
[
"ishahrukhqasim@gmail.com"
] |
ishahrukhqasim@gmail.com
|
c78b87a98e2583ddb065568d9450bf3ea41e6846
|
f0db1e2d4a48bfa745edf3a7b817eb6ff8a9ea4a
|
/test.py
|
01a12cd20b363f6f23ce534c58d2eea35608c181
|
[] |
no_license
|
AlainBBenoist/GoogleAnalytics
|
15a7a0e9d73544a2bbdff6dfdc561f4b2eaf7911
|
a52de78e1ae652c24d44b0bacdd1a7b5ed4aff8f
|
refs/heads/master
| 2023-08-22T03:20:55.325180
| 2021-10-01T13:29:44
| 2021-10-01T13:29:44
| 410,386,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,876
|
py
|
# Google libraries
from oauth2client.service_account import ServiceAccountCredentials
from apiclient.discovery import build
class GAReporter() :
def __init__(self, keyfile) :
self.credentials = ServiceAccountCredentials.from_json_keyfile_name('analytics-dibutade.json', ['https://www.googleapis.com/auth/analytics.readonly'])
# Build the service object.
self.analytics = build('analyticsreporting', 'v4', credentials=self.credentials)
def report(self, view_id, start_date, end_date, metrics=None, dimensions=None) :
if not metrics:
metrics = [ 'users', 'newUsers', 'sessions', 'sessionsPerUser', 'pageviews', 'pageviewsPerSession', 'avgSessionDuration', 'sessionsPerUser', 'bounceRate', ]
if not dimensions:
dimensions = [ 'medium', 'source', ] # 'deviceCategory',
body = {
'reportRequests': [
{ 'viewId': view_id,
'dateRanges': [{'startDate': start_date, 'endDate': end_date}],
'metrics': [ { "expression": 'ga:'+metric } for metric in metrics ],
'dimensions':[ { "name": 'ga:'+dimension } for dimension in dimensions ],
}
]
}
response = self.analytics.reports().batchGet(body=body).execute()
return response
def print(self, response):
row_list = []
# Get each collected report
for report in response.get('reports', []):
# Set column headers
column_header = report.get('columnHeader', {})
dimension_headers = column_header.get('dimensions', [])
metric_headers = column_header.get('metricHeader', {}).get('metricHeaderEntries', [])
# Get each row in the report
for row in report.get('data', {}).get('rows', []):
# create dict for each row
row_dict = {}
dimensions = row.get('dimensions', [])
date_range_values = row.get('metrics', [])
# Fill dict with dimension header (key) and dimension value (value)
for header, dimension in zip(dimension_headers, dimensions):
row_dict[header] = dimension
# Fill dict with metric header (key) and metric value (value)
for i, values in enumerate(date_range_values):
for metric, value in zip(metric_headers, values.get('values')):
# Set int as int, float a float
if ',' in value or '.' in value:
row_dict[metric.get('name')] = float(value)
else:
row_dict[metric.get('name')] = int(value)
row_list.append(row_dict)
return row_list
if __name__ == '__main__':
import datetime
KEYFILE='analytics-dibutade.json'
VIEW_ID='141366074'
START_DATE='2021-01-01'
END_DATE='2021-08-31'
# Calculate yesterdate
start_date = datetime.date.today() - datetime.timedelta(days=1)
print(start_date.isoformat())
# Create a Google Analytics reporter
ga = GAReporter(KEYFILE)
# Get a view
response = ga.report(VIEW_ID, START_DATE, END_DATE)
#print(response)
#for result in ga.print(response):
# print('{:32.32s} {:32.32s} {:d}'.format(result['ga:medium'], result['ga:source'], result['ga:sessions']))
print('===============================')
response = ga.report(VIEW_ID, start_date.isoformat(), start_date.isoformat(), metrics=['pageviews', ], dimensions=['pagePath',])
#print(response)
for result in ga.print(response):
print('{:96.96s} {:d}'.format(result['ga:pagePath'], result['ga:pageviews']))
print('===============================')
response = ga.report(VIEW_ID, start_date.isoformat(), start_date.isoformat(), metrics=['totalEvents', ], dimensions=['eventAction',])
#print(response)
for result in ga.print(response):
print('{:96.96s} {:d}'.format(result['ga:eventAction'], result['ga:totalEvents']))
print('===============================')
response = ga.report(VIEW_ID, start_date.isoformat(), start_date.isoformat(), metrics=['impressions', 'adClicks', 'adCost' ], dimensions=['adGroup',])
#print(response)
for result in ga.print(response):
print('{:96.96s} {:8d} {:8d} {:6.2f}'.format(result['ga:adGroup'], result['ga:impressions'], result['ga:adClicks'], result['ga:adCost']))
print('===============================')
response = ga.report(VIEW_ID, start_date.isoformat(), start_date.isoformat(), metrics=['searchResultViews' ], dimensions=['searchKeyword',])
#print(response)
for result in ga.print(response):
print('{:96.96s} {:8d}'.format(result['ga:searchKeyword'], result['ga:searchResultViews']))
|
[
"alain.b.benoist@gmail.com"
] |
alain.b.benoist@gmail.com
|
480869005801ed9c8f1556d209302349a88fc7b2
|
68381f1b06f056f8c897723e2f1a06fcd7c0215c
|
/notebooks/data_tools.py
|
790087d7786f7d530fdba440e7a954ffb21a7832
|
[] |
no_license
|
djzelenak/lcmap-science
|
fa0632d82cd59fa65eba351db4a2cb980bf19f3a
|
79c37ec77ca1e4154a6e529712cf71cb9487f681
|
refs/heads/master
| 2020-12-13T10:14:00.447787
| 2019-06-10T15:18:00
| 2019-06-10T15:18:00
| 234,386,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,332
|
py
|
"""Some helpful functions for working with TAP-exported data"""
import sys
import warnings
import datetime as dt
import pandas as pd
import numpy as np
from collections import OrderedDict
# warnings.simplefilter('ignore')
mask_values = (1, 96, 112, 160, 176, 224, 352, 368, 416, 432, 480, 864, 880, 928, 944, 992)
def assemble(timeseries, ind, bands):
"""
Populate n-number of arrays using appropriate row and column locations
Args:
timeseries (array_like): A series of tuples, each containing a chip of data in the time series
bands (Iterable): Collection of band names that matches chipmunk bands
ind (int): The index location for the target date in each array_like object within the time series
Returns:
Dict[str: np.ndarray]
"""
out = {b: np.zeros(shape=(100, 100), dtype=np.int) for b in bands}
for t in timeseries:
coord_x = t[0][2]
coord_y = t[0][3]
chip_coord_x = t[0][0]
chip_coord_y = t[0][1]
col = int((coord_x - chip_coord_x) / 30)
row = int((chip_coord_y - coord_y) / 30)
for b in bands:
out[b][row][col] = t[1][b][ind]
return out
def temporal(df, ascending=True, field='dates'):
"""
Sort the input data frame based on time
Args:
df (pd.DataFrame): The input data
ascending (bool): Whether or not to sort in ascending order
field (str): The data frame field containing datetime objects
Returns:
pd.DataFrame
"""
return df.sort_values(field, ascending).reset_index(drop=True)
def sort_on(df, field, ascending=True):
"""
A more open-ended sorting function, may be used on a specified field
Args:
df (pd.DataFrame): The input data
field (str): The field to sort on
ascending (bool): Whether or not to sort in ascending order
Returns:
pd.DataFrame
"""
return df.sort_values(field, ascending).reset_index(drop=True)
def dates(df, params, field='dates'):
"""
Return an inclusive sliced portion of the input data frame based on a min and max date
Args:
df (pd.DataFrame): The input data
params (Tuple[dt.datetime, dt.datetime]): Dates, must be in order of MIN, MAX
field (str): The date field used to find matching values
Returns:
pd.DataFrame
"""
_min, _max = params
return df[(df[field] >= _min) & (df[field] <= _max)].reset_index(drop=True)
def years(df):
"""
Get an array of unique years in the current time series
Args:
df (pd.DataFrame): The input data frame
Returns:
np.ndarray
"""
return df['dates'].apply(lambda x: (x.timetuple()).tm_year).unique()
def date_range(params):
"""
Generate date ranges for a seasonal time series
Args:
params (dict): Arguments for the pandas date_range function
Returns:
"""
return pd.date_range(**params)
def seasons(df, start_mon, start_day, end_mon, end_day, periods=None, freq='D', **kwargs):
"""
Args:
df:
start_mon:
start_day:
end_mon:
end_day:
periods:
freq:
**kwargs:
Returns:
"""
return OrderedDict([(y,
date_range({'start': dt.datetime(y, start_mon, start_day),
'end': dt.datetime(y, end_mon, end_day),
'periods': periods,
'freq': freq}))
for y in years(df)])
def stats(arr):
"""
Return the statistics for an input array of values
Args:
arr (np.ndarray)
Returns:
OrderedDict
"""
try:
return OrderedDict([('min', arr.mean()),
('max', arr.max()),
('mean', arr.mean()),
('std', arr.std())])
except ValueError: # Can happen if the input array is empty
return OrderedDict([('min', None),
('max', None),
('mean', None),
('std', None)])
def get_seasonal_info(df, params):
"""
A wrapper function for easily returning the statistics on a seasonal basis for a given field of the data frame
Args:
df (pd.DataFrame)
params (dict)
Returns:
OrderedDict
"""
__seasons = seasons(df, **params)
return OrderedDict([
(y, stats(
values(
mask(
dates(df, (__seasons[y][0], __seasons[y][-1])), **params
), **params
)
)
)
for y in years(df)
])
def values(df, field, **kwargs):
"""
Return values from a specific field of the data frame within a given time extent
Args:
df (pd.DataFrame): The exported TAP tool data
field (str): The field representing the column name
Returns:
np.ndarray: An array of the time-specified values
"""
return df[field].values
def plot_data(d, field):
"""
Return the x and y series to be used for plotting
Args:
d (OrderedDict)
field (str)
Returns:
Tuple[list, list]:
[0] The x-series
[1] The y-series
"""
return ([year for year in d.keys() if d[year][field] is not None],
[i[field] for k, i in d.items() if i[field] is not None])
def mask(df, vals=mask_values, mask_field='qa', **kwargs):
"""
Remove rows from the data frame that match a condition
Args:
df (pd.DataFrame): The input data
vals (List[Number[int, float]]): The values used to filter the data frame, rows == value will be removed!
mask_field (str): The field to use for filtering
Returns:
pd.DataFrame
"""
return df[~df[mask_field].isin(np.array(vals))].reset_index(drop=True)
def nearest_date(array, date):
"""
Find the index value in the array to the nearest matching date, date may therefore not be a value
within the array
Args:
array (array_like): The input data
date (Tuple[int, int, int]): The date to look for given as (Year, Month, M-day)
Returns:
int
"""
date = dt.datetime(*date).toordinal()
array = np.asarray(array)
return (np.abs(array - date)).argmin()
def spectral_signature(df, date):
"""
Args:
df:
date:
Returns:
"""
index = nearest_date(df.dates.apply(lambda x: x.toordinal()), date)
return df.iloc[index]
def plot_spectral(series):
"""
Args:
series:
Returns:
"""
keys = ['blues', 'greens', 'reds', 'nirs', 'swir1s', 'swir2s']
vals = [series[key] for key in keys]
return keys, vals
def load_csv(csv_file, dates_field='dates', use_datetime=True):
"""
Args:
csv_file (str): The full path to the input csv file
dates_field (str): The name of the dates column
use_datetime (bool): Whether or not to use datetime format
Returns:
pd.DataFrame
"""
return pd.read_csv(csv_file,
parse_dates=[dates_field],
infer_datetime_format=use_datetime).drop(columns='Unnamed: 0')
|
[
"dzelenak@contractor.usgs.gov"
] |
dzelenak@contractor.usgs.gov
|
92c8a0f78906a2f80b77ac6550c0ec6dda1495c7
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_azure_video_analyzerfor_edge_enums.py
|
35371307bdbb6776cf7c3d240b1ac8a296876400
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 8,408
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class GrpcExtensionDataTransferMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Data transfer mode: embedded or sharedMemory.
"""
#: Media samples are embedded into the gRPC messages. This mode is less efficient but it requires
#: a simpler implementations and can be used with plugins which are not on the same node as the
#: Video Analyzer module.
EMBEDDED = "embedded"
#: Media samples are made available through shared memory. This mode enables efficient data
#: transfers but it requires that the extension plugin to be co-located on the same node and
#: sharing the same shared memory space.
SHARED_MEMORY = "sharedMemory"
class H264Profile(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The H264 Profile
"""
BASELINE = "Baseline"
MAIN = "Main"
EXTENDED = "Extended"
HIGH = "High"
class ImageFormatRawPixelFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Pixel format to be applied to the raw image.
"""
#: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples).
YUV420_P = "yuv420p"
#: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian.
RGB565_BE = "rgb565be"
#: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian.
RGB565_LE = "rgb565le"
#: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined.
RGB555_BE = "rgb555be"
#: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined.
RGB555_LE = "rgb555le"
#: Packed RGB 8:8:8, 24bpp, RGBRGB.
RGB24 = "rgb24"
#: Packed RGB 8:8:8, 24bpp, BGRBGR.
BGR24 = "bgr24"
#: Packed ARGB 8:8:8:8, 32bpp, ARGBARGB.
ARGB = "argb"
#: Packed RGBA 8:8:8:8, 32bpp, RGBARGBA.
RGBA = "rgba"
#: Packed ABGR 8:8:8:8, 32bpp, ABGRABGR.
ABGR = "abgr"
#: Packed BGRA 8:8:8:8, 32bpp, BGRABGRA.
BGRA = "bgra"
class ImageScaleMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Describes the image scaling mode to be applied. Default mode is 'pad'.
"""
#: Preserves the same aspect ratio as the input image. If only one image dimension is provided,
#: the second dimension is calculated based on the input image aspect ratio. When 2 dimensions are
#: provided, the image is resized to fit the most constraining dimension, considering the input
#: image size and aspect ratio.
PRESERVE_ASPECT_RATIO = "preserveAspectRatio"
#: Pads the image with black horizontal stripes (letterbox) or black vertical stripes (pillar-box)
#: so the image is resized to the specified dimensions while not altering the content aspect
#: ratio.
PAD = "pad"
#: Stretches the original image so it resized to the specified dimensions.
STRETCH = "stretch"
class LivePipelineState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Current pipeline state (read-only).
"""
#: The live pipeline is idle and not processing media.
INACTIVE = "inactive"
#: The live pipeline is transitioning into the active state.
ACTIVATING = "activating"
#: The live pipeline is active and able to process media. If your data source is not available,
#: for instance, if your RTSP camera is powered off or unreachable, the pipeline will still be
#: active and periodically retrying the connection. Your Azure subscription will be billed for the
#: duration in which the live pipeline is in the active state.
ACTIVE = "active"
#: The live pipeline is transitioning into the inactive state.
DEACTIVATING = "deactivating"
class MotionDetectionSensitivity(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Motion detection sensitivity: low, medium, high.
"""
#: Low sensitivity.
LOW = "low"
#: Medium sensitivity.
MEDIUM = "medium"
#: High sensitivity.
HIGH = "high"
class MPEG4Profile(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The MPEG4 Profile
"""
#: Simple Profile.
SP = "SP"
#: Advanced Simple Profile.
ASP = "ASP"
class ObjectTrackingAccuracy(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Object tracker accuracy: low, medium, high. Higher accuracy leads to higher CPU consumption in
average.
"""
#: Low accuracy.
LOW = "low"
#: Medium accuracy.
MEDIUM = "medium"
#: High accuracy.
HIGH = "high"
class OnvifSystemDateTimeType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""An enum value determining whether the date time was configured using NTP or manual.
"""
NTP = "Ntp"
MANUAL = "Manual"
class OutputSelectorOperator(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The operator to compare properties by.
"""
#: The property is of the type defined by value.
IS_ENUM = "is"
#: The property is not of the type defined by value.
IS_NOT = "isNot"
class OutputSelectorProperty(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The property of the data stream to be used as the selection criteria.
"""
#: The stream's MIME type or subtype: audio, video or application.
MEDIA_TYPE = "mediaType"
class ParameterType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the parameter.
"""
#: The parameter's value is a string.
STRING = "string"
#: The parameter's value is a string that holds sensitive information.
SECRET_STRING = "secretString"
#: The parameter's value is a 32-bit signed integer.
INT = "int"
#: The parameter's value is a 64-bit double-precision floating point.
DOUBLE = "double"
#: The parameter's value is a boolean value that is either true or false.
BOOL = "bool"
class RtspTransport(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP
packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are
exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP
connections alongside the RTSP messages.
"""
#: HTTP transport. RTSP messages are exchanged over long running HTTP requests and RTP packets are
#: interleaved within the HTTP channel.
HTTP = "http"
#: TCP transport. RTSP is used directly over TCP and RTP packets are interleaved within the TCP
#: channel.
TCP = "tcp"
class SpatialAnalysisOperationFocus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The operation focus type.
"""
#: The center of the object.
CENTER = "center"
#: The bottom center of the object.
BOTTOM_CENTER = "bottomCenter"
#: The footprint.
FOOTPRINT = "footprint"
class SpatialAnalysisPersonCountEventTrigger(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The event trigger type.
"""
#: Event trigger.
EVENT = "event"
#: Interval trigger.
INTERVAL = "interval"
class SpatialAnalysisPersonDistanceEventTrigger(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The event trigger type.
"""
#: Event trigger.
EVENT = "event"
#: Interval trigger.
INTERVAL = "interval"
class SpatialAnalysisPersonZoneCrossingEventType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The event type.
"""
#: Zone crossing event type.
ZONE_CROSSING = "zoneCrossing"
#: Zone dwell time event type.
ZONE_DWELL_TIME = "zoneDwellTime"
class VideoEncoding(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The video codec used by the Media Profile.
"""
#: The Media Profile uses JPEG encoding.
JPEG = "JPEG"
#: The Media Profile uses H264 encoding.
H264 = "H264"
#: The Media Profile uses MPEG4 encoding.
MPEG4 = "MPEG4"
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
5562336d1e9f8350768f81a204bd1b9ce5e6dcb9
|
f8b8b485b8d29a01a81c1bfbc631a496a3a88a02
|
/app/authentication.py
|
8b1eac797d7e714b87a085d772e5ee81e47eb25c
|
[] |
no_license
|
samuelquitiang/guane-intern-fastapi
|
2ca061ba8d496685a57ed17814c5ecf17972b36c
|
f83302ddf0871aa938dfe4972f2dd05c0150c4dd
|
refs/heads/master
| 2023-04-09T17:11:18.098223
| 2021-04-09T22:12:40
| 2021-04-09T22:12:40
| 356,331,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
from fastapi import APIRouter, status, HTTPException, Depends
import schemas, token_
from passlib.context import CryptContext
from fastapi.security import OAuth2PasswordRequestForm
router = APIRouter(tags=['Authentication'])
# Method to hash the password
myctx = CryptContext(schemes=["bcrypt"], deprecated="auto")
password = 'contraseña'
# For all the admin users the password must be hashed
hashed_password = myctx.hash(password)
# Creation of an admin who can enter the dogs data
admin_users = [] # List of the admins
admin1 = schemas.Admin_Login(username='first_admin', password=hashed_password)
admin_users.append(admin1.dict())
@router.get('/api/admins')
def admis():
return ' The registered admin_users are: ', admin_users
@router.post('/api/login/{username}')
def add_admin(username: str, password: str,
get_current_user:
schemas.Admin_Login = Depends(token_.get_current_user)):
hashed_password = myctx.hash(password)
new_admin_user = schemas.Admin_Login(username=username,
password=hashed_password)
admin_users.append(new_admin_user.dict())
return 'Successfully added'
@router.delete('/api/login/{username}')
def destroy_dog(username: str, get_current_user:
schemas.Admin_Login = Depends(token_.get_current_user)):
# Gets the index of the dog with the given name and delete it from the data
user = list(filter(lambda us: us['username'] == username, admin_users))
to_del = admin_users.index(user[0])
admin_users.pop(to_del)
return 'Done'
@router.post('/login', include_in_schema=False)
def login(request: OAuth2PasswordRequestForm = Depends()):
user = list(filter(lambda us: us['username'] == request.username,
admin_users))
if not user:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f'Invalid Credentials')
if not myctx.verify(request.password, user[0]['password']):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f'Incorrect Password')
access_token = token_.create_token(data={'sub': user[0]['username']})
return {'access_token': access_token, 'token_type': 'bearer'}
|
[
"samuel.quitiang@udea.edu.co"
] |
samuel.quitiang@udea.edu.co
|
ee86cc6478906dfe72ddb3e9b8b4af23f8613796
|
01532ebb2bb5721a4191765faa6edffebfab9464
|
/gen_data_V-Z.py
|
eab540e355fc189673e0fa784b0721899ad4b64a
|
[] |
no_license
|
sarc007/2-OpenCV_3_KNN_Character_Recognition_Python-master
|
bb52c6d5776b17fd4337579299519e989146ac11
|
6f2f2e139ce2c4182cae8db1f36b67e4f498b9f4
|
refs/heads/master
| 2022-12-13T02:07:59.443287
| 2019-10-22T10:26:41
| 2019-10-22T10:26:41
| 213,817,682
| 0
| 0
| null | 2023-09-06T23:34:26
| 2019-10-09T04:04:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,389
|
py
|
# Pythono3 code to rename multiple
# files in a directory or folder
# importing os module
import os
import cv2
import numpy as np
from skimage import io
import h5py
# Function to generate mnist data from images
def main():
src_dir = "E:\\OPEN_1\\V-Z"
dst_dir = "E:\\OPEN_1\\data"
data_file = dst_dir + "\\" + "V-Z.hdf5"
np_intValidChars = np.asarray(intValidChars)
with h5py.File(data_file, "w") as f:
dset = f.create_dataset("class", data=np_intValidChars)
j = 1
all_images = []
all_labels = []
for dirname in os.listdir(src_dir):
i = 1
# ord(dirname) :
for image_path in os.listdir(src_dir + "\\" + dirname):
img = cv2.imread(src_dir + "\\" + dirname + "\\" + image_path, 0)
all_images.append(img)
all_labels.append(ord(dirname))
print("Directory " + dirname + " Adding file : " + image_path + " out of " + str(
len(os.listdir(src_dir + "\\" + dirname))) + " To Array Creating HD5 File ")
# rename() function will
# rename all the files
i += 1
j += 1
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
x_data = np.array(all_images)
y_data = np.array(all_labels)
# tuple_xy = [x_data, y_data]
# tuple_xy = np.array(tuple_xy)
with h5py.File(data_file, "a") as f:
dset_lbl = f.create_dataset("img_labels", data=y_data)
dset_img = f.create_dataset("img_dataset", data=x_data)
# dset_img_lbl = f.create_dataset("img_lbl_ds", data=tuple_xy)
with h5py.File(data_file, 'r') as f:
class_arr = f['class'][:]
labels_arr = f['img_labels'][:]
image_arr = f['img_dataset'][:]
print(class_arr.shape)
print(labels_arr.shape)
print(image_arr.shape)
# Driver Code
if __name__ == '__main__':
# Calling main() function
intValidChars = [ord('0'), ord('1'), ord('2'), ord('3'), ord('4'), ord('5'), ord('6'), ord('7'), ord('8'), ord('9'),
ord('A'), ord('B'), ord('C'), ord('D'), ord('E'), ord('F'), ord('G'), ord('H'), ord('I'), ord('J'),
ord('K'), ord('L'), ord('M'), ord('N'), ord('O'), ord('P'), ord('Q'), ord('R'), ord('S'), ord('T'),
ord('U'), ord('V'), ord('W'), ord('X'), ord('Y'), ord('Z')]
WIDTH = 28
HEIGHT = 28
main()
|
[
"maazansari11@gmail.com"
] |
maazansari11@gmail.com
|
9dd9b9617da72ba3de93cf4b9abe346b3b89264e
|
eb80e96d29d2aa8329b87626fa22cc81c2abc723
|
/youtube_download.py
|
16a3f800bdbb7732ad22e8e9d5d3bd78d30b601e
|
[] |
no_license
|
ahsanGoheer/Tools
|
7229f8a7e0c686ce2ebf9731ee232d26382a856e
|
9217ee303a204e0f8e6d8e26bce3a20068bb57b9
|
refs/heads/master
| 2020-06-24T04:53:41.394784
| 2019-09-13T19:02:46
| 2019-09-13T19:02:46
| 198,854,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
from pytube import YouTube
from tqdm import tqdm
#file_size=0
save_path ="/home/ahsangoheer/Documents/Tools/Videos"
# def progress_Check(stream = None, chunk = None, file_handle = None, remaining = None):
# #Gets the percentage of the file that has been downloaded.
# percent = (100*(remaining-file_size))/file_size
# print("{:00.0f}% downloaded".format(percent))
mylinks=list(open('links.txt','r'))
for link in mylinks:
try:
yt = YouTube(link) #,on_progress_callback=progress_Check)
print("Downloading: {} \n".format(yt.title))
except:
print('There was an error while connecting!')
streams = yt.streams.all()
for i in streams:
print(str(i)+'\n')
itag = input('Enter the itag number of the desired format : ')
desired_stream=yt.streams.get_by_itag(itag)
desired_stream.download(save_path)
print('{} Downloaded!'.format(yt.title))
file_size=0
print('Process Complete! Files are located in {}'.format(save_path))
|
[
"noreply@github.com"
] |
ahsanGoheer.noreply@github.com
|
21bc8422cad06b064ce9d777977c5ed65f260aa3
|
465ef9a288d5902348e930a8f1193acfacd64127
|
/alerts/alerter.py
|
a8ffafcf1c1df22e9b7e11ff61d3297283db01d3
|
[] |
no_license
|
syffer/ineedanapartment
|
4b876b6fa2461e2402645735a57d44ce1a91399c
|
2c0430d1c3b4b9d90040812292f8702c8e36414c
|
refs/heads/master
| 2022-12-08T09:46:44.805570
| 2020-08-30T17:46:37
| 2020-08-30T17:46:37
| 289,482,517
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
# coding: utf-8
class Alerter(object):
def alert(self, locations):
raise NotImplementedError("subclasses must override this method")
|
[
"maxime.nicolas2@hotmail.fr"
] |
maxime.nicolas2@hotmail.fr
|
42d3f7d37df6743f60d81d69b57e407b29202290
|
fa8227796c7525f5dfd347f3c480a9a8fa0fa0b1
|
/backend/hharvard/hharvard/urls.py
|
1da5dbd43c3d01c103021c6ada2764309131af4b
|
[] |
no_license
|
CauaneAndrade/HackHarvard2021
|
515f8d635dcaa3796251d315a986f93919ff7296
|
42f6e031a6d74f1b8e1bbbe143ae33a76fbb8dde
|
refs/heads/main
| 2023-08-16T06:18:08.429155
| 2021-10-10T14:57:47
| 2021-10-10T14:57:47
| 413,943,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
from rest_framework import routers
from rest_framework_jwt.views import obtain_jwt_token
from app import viewsets
router = routers.DefaultRouter()
router.register(r'users', viewsets.UserViewSet)
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls')),
path('api-token-auth/', obtain_jwt_token, name="api-token-auth"),
]
|
[
"cauane.emanuela@hotmail.com"
] |
cauane.emanuela@hotmail.com
|
34397dfc2620cb2ec545a7ad9cb93a808013afbd
|
0917f0312bdc934d586b5471bda0f26ea2cd1dc6
|
/src/user_io.py
|
48e4c290764f8b6599b782be9ee8a72271c716eb
|
[
"MIT"
] |
permissive
|
S0S-90/geocachingTooly
|
1897b64f0c71af781807354362a1656e13756be9
|
a6ed356d0187dd517a9436a83bded3752d488db5
|
refs/heads/master
| 2021-06-02T09:01:34.592866
| 2019-04-09T10:00:30
| 2019-04-09T10:00:30
| 68,519,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,000
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""This file contains the user interface."""
import ownfunctions
import geocache
import os
PATH = [r"E:/Garmin", r"F:/Garmin", r"G:/Garmin", r"H:/Garmin", r"/media/{}/GARMIN/garmin/".format(os.environ["USER"])]
CODING = "cp1252" # coding of cmd (cp1252 recommended)
EDITORNAME = "notepad.exe" # name (+ path) of standard text editor
def general_output(string):
"""prints string on cmd"""
string = ownfunctions.replace_signs(string)
print(string)
def general_input(string):
"""asks for user input (by the use of string) and returns it (as string)"""
return input(string)
def map_menu():
"""map menu"""
print("\nWas moechtest du als naechstes tun?")
print("1: Alle auf dem Geraet gespeicherten Geocaches auf Karte zeigen (INTERNET!!!)")
print("2: https://www.geocaching.com/map aufrufen (INTERNET!!!)")
print("3: https://www.google.de/maps aufrufen (INTERNET!!!)")
inp = input(">> ")
if inp == "1":
return "show_on_map"
elif inp == "2":
return "gc-maps"
elif inp == "3":
return "google-maps"
def show_main_menu(found_exists):
"""prints main menu"""
print("\nWas moechtest du als naechstes tun?")
print("1: Geocaches aktualisieren")
print("2: Alle auf dem Geraet gespeicherten Geocaches sortieren und anzeigen")
print("3: Wegpunkt-Menue")
print("4: Karten-Menue")
print("5: Beschreibung fuer einen bestimmten Cache anzeigen (GC-Code erforderlich)")
print("6: Geocaches durchsuchen")
if found_exists:
print("7: Alle gefundenen Caches anzeigen")
print("8: Programm verlassen")
else:
print("7: Programm verlassen")
def main_menu(found_exists):
"""prints main menu and asks for user input
returns task that is chosen by user input"""
show_main_menu(found_exists)
inp = input(">> ")
if inp == "1":
return "update"
elif inp == "2":
return "show_all"
elif inp == "3":
return "show_waypoints"
elif inp == "4":
return "map-menu"
elif inp == "5":
return "show_one"
elif inp == "6":
return "search"
elif inp == "7" and found_exists:
return "show_founds"
elif inp == "8" and found_exists:
return "exit"
elif inp == "7" and not found_exists:
return "exit"
else:
print("Ungueltige Eingabe!")
def sort_caches():
"""asks for criterion and if the caches should be sorted forwards or backwards
returns list with two elements
first element: string that corresponds to criterion
second element: True (sorting backwards) or False (sorting forwards)"""
criterions = ["gccode", "name", "type", "difficulty", "terrain", "size", "downloaddate", "available", "distance"]
print("\nWonach sollen die Geocaches sortiert werden?")
print("1: GC-Code")
print("2: Name")
print("3: Cache-Typ")
print("4: D-Wertung")
print("5: T-Wertung")
print("6: Groesse")
print("7: Download-Datum")
print("8: Verfuegbarkeit")
print("9: Abstand von einer bestimmten Position (Koordinaten erforderlich)")
input_criterion = input(">> ")
if input_criterion == "0":
print("Ungueltige Eingabe: Sortierung erfolgt nach GC-Code")
criterion = "gccode"
else:
try:
criterion = criterions[int(input_criterion)-1]
except IndexError:
print("Ungueltige Eingabe: Sortierung erfolgt nach GC-Code")
criterion = "gccode"
except ValueError:
print("Ungueltige Eingabe: Sortierung erfolgt nach GC-Code")
criterion = "gccode"
print("In welche Richtung sollen die Caches sortiert werden?")
print("1: aufsteigend")
print("2: absteigend")
input_direction = input(">> ")
if input_direction == "2":
backward = True
else:
backward = False
return [criterion, backward]
def search():
"""asks for criterion by which search should be performed and returns it (as string)"""
criterions = ["name", "description", "type", "difficulty", "terrain",
"size", "downloaddate", "available", "attribute", "distance"]
print("\nWonach willst du suchen?")
print("1: Name")
print("2: Beschreibung")
print("3: Cache-Typ")
print("4: D-Wertung")
print("5: T-Wertung")
print("6: Groesse")
print("7: Download-Datum")
print("8: Verfuegbarkeit")
print("9: Attribut")
print("10: Abstand von einer bestimmten Position (Koordinaten erforderlich)")
inp = input(">> ")
if inp == "0":
print("Ungueltige Eingabe")
else:
try:
return criterions[int(inp)-1]
except IndexError:
print("Ungueltige Eingabe")
except ValueError:
print("Ungueltige Eingabe")
def search_type():
"""asks for cachetype which should be searched and returns it (as string)"""
print("Gib den Cachetyp ein, nach dem du suchen willst.")
print("Moegliche Typen: Traditional Cache, Multi-cache, Mystery Cache, EarthCache, Letterbox Hybrid, Event Cache, "
"Wherigo Cache, Geocaching HQ, Unknown Type")
print("Achtung! Gross- und Kleinschreibung beachten!")
inp = input(">> ")
return inp
def search_attribute(existing_attributes):
"""asks for attribute which should be searched and returns it (as string)"""
print("Gib das Attribut ein, nach dem du suchen willst.")
attr_string = ", ".join(existing_attributes)
print("Moegliche Attribute: {}".format(attr_string))
inp = input(">> ")
return inp
def actions_after_search():
"""asks for next action after a search, returns this action as a string"""
print("\nWas moechtest du als naechstes tun?")
print("1: Alle Suchergebnisse erneut anzeigen (bei evtl. Loeschen nicht aktualisiert)")
print("2: Alle Suchergebnisse loeschen")
print("3: Alle Suchergebnisse auf Karte zeigen (INTERNET!!!)")
print("4: Beschreibung fuer eines der Suchergebnisse anzeigen")
print("5: zurueck")
inp = input(">> ")
if inp == "1":
return "show_again"
elif inp == "2":
return "delete"
elif inp == "3":
return "show_on_map"
elif inp == "4":
return "show_one"
elif inp == "5":
return "back"
else:
print("Ungueltige Eingabe")
def actions_with_founds():
"""asks after showing the found caches what to do next
returns the next action as a string"""
print("\nWas moechtest du als naechstes tun?")
print("1: Gefundene Caches auf geocaching.com loggen (ueber den Upload von drafts / fieldnotes, INTERNET!!!)")
print("2: Alle gefundenen Caches loeschen")
print("3: zurueck")
inp = input(">> ")
if inp == "1":
return "log"
elif inp == "2":
return "delete"
elif inp == "3":
return "exit"
def confirm_deletion():
"""asks before deleting caches if they should really be deleted
returns True for yes and False for no"""
inp = input("\nWillst du den / die ausgewaehlten Cache(s) wirklich loeschen? (y/n) ")
if inp == "y":
return True
else:
return False
def confirm_deletion_wpt():
"""asks before deleting waypoints if they should really be deleted
returns True for yes and False for no"""
inp = input("\nWillst du den ausgewaehlten Wegpunkt wirklich loeschen? (y/n) ")
if inp == "y":
return True
else:
return False
def waypoint_menu(waypoints_exist):
"""asks what to do with waypoints"""
print("\nWas moechtest du als naechstes tun?")
print("1: Wegpunkte hinzufuegen")
if waypoints_exist:
print("2: Wegpunkte zu Geocaches zuordnen oder loeschen")
print("3: nichts")
else:
print("2: nichts")
inp = input(">> ")
if inp == "1":
return "add"
elif inp == "2" and waypoints_exist:
return "assign"
else:
return "continue"
def choose_cache(suggestions, more_options):
"""asks to which of a list of suggested caches the waypoint should be assigned
return either the chosen cache or the string 'other'"""
if type(suggestions) != list:
raise TypeError
if len(suggestions) > 0:
print("Zu welchem der folgenden Caches moechtest du den Wegpunkt zuordnen?")
else:
print("Keine Vorschlaege vorhanden. Was nun?")
for i, s in enumerate(suggestions):
if type(s) != geocache.Geocache:
raise TypeError
print("{}: {} ({})".format(i+1, s.name, s.gccode))
print("{}: zu anderem Geocache zuordnen (GC-Code erforderlich)".format(len(suggestions)+1))
if more_options:
print("{}: Wegpunkt loeschen".format(len(suggestions)+2))
print("{}: nichts tun".format(len(suggestions)+3))
else:
print("{}: Wegpunkt doch nicht zuordnen".format(len(suggestions)+2))
inp = input(">> ")
try:
sug = suggestions[int(inp)-1]
except IndexError:
if int(inp) == len(suggestions)+1:
sug = "other"
elif int(inp) == len(suggestions)+2 and more_options:
sug = "delete"
else:
sug = "continue"
except ValueError:
sug = "continue"
return sug
def show_one(waypoints):
"""asks after showing one cache what to do next
waypoints = True, if shown cache has waypoints, False if not
returns the next action as a string"""
print("\nWas moechtest du als naechstes tun?")
print("1: diesen Cache loeschen")
print("2: diesen Cache auf geocaching.com oeffnen (INTERNET!!!)")
print("3: Abstand dieses Caches zu einer bestimmten Position berechnen")
print("4: Position des Caches auf der Karte https://www.geocaching.com/map anzeigen (INTERNET!!!)")
print("5: Position des Caches auf der Karte https://www.google.de/maps anzeigen (INTERNET!!!)")
if waypoints:
print("6: diesen Cache mit allen Wegpunkten auf Karte zeigen (INTERNET!!!)")
print("7: zurueck")
else:
print("6: zurueck")
inp = input(">> ")
if inp == "1":
return "delete"
elif inp == "2":
return "gc.com"
elif inp == "3":
return "dist"
elif inp == "4":
return "gc-map"
elif inp == "5":
return "googlemaps"
elif inp == "6" and waypoints:
return "mapcustomizer"
def coordinates_input():
"""asks for coordinates, returns input as a string"""
print("Gib die Koordinaten ein (Format: X XX°XX.XXX, X XXX°XX.XXX oder URL (google maps oder geocaching.com/map))")
coords = input(">> ")
return coords
def wpt_ask_for_name_and_coords():
"""asks for name and coordinates of waypoint that should be created"""
name = input("Gib den Namen des Wegpunkts ein: ")
print("Gib die Koordinaten ein (Format: X XX°XX.XXX, X XXX°XX.XXX)")
coordstr = input(">> ")
return name, coordstr
def ask_for_path():
"""asks for the path to the GPS-device and returns it
if no path is specified: returns the standard PATH"""
print("\nGib den Pfad zum GPS-Geraet ein (NICHT zum Unterordner 'GPX').")
print("Falls Standardpfad uebernommen werden soll: keine Eingabe")
inp = input(">> ")
if inp == "":
return "default"
else:
return inp
def ask_for_waypoints():
"""asks if waypoints should be shown on map"""
inp = input("\nSollen auch Wegpunkte auf der Karte angezeigt werden? (y/n) ")
if inp == "y":
return True
else:
return False
def show_on_map_start(one, free_waypoints):
"""explains how the task 'show_on_map' works and asks for path to texteditor
returns path to texteditor or - if no path is specified - the standard EDITORNAME
one = True if only one geocache (with waypoints), False if several geocaches
free_waypoints = True if free waypoints are shown (i.e. waypoints that don't belong to a cache)
only if all caches are shown and waypoints should be shown
"""
print("\nNach dem Klicken werden sich mehrere Fenster oeffnen. "
"Eines davon ist der Editor, das andere die Seite mapcustomizer.com in deinem Browser.")
print("Um den Cache / die Caches auf der Karte anzuzeigen, kopiere den vollstaendigen Inhalt der Textdatei "
"aus deinem Editor in das Feld 'Bulk Entry' im Browser.")
if not one: # if more than one geocache
print("Die Caches werden in folgenden Farben angezeigt:")
print("Gruen: Traditional Cache")
print("Rot: Multi-cache")
print("Blau: Mystery Cache")
print("Braun: EarthCache")
print("Grau: Letterbox, Geocaching HQ")
z = "Gelb: Event Cache, Wherigo Cache"
if free_waypoints:
z += ", Wegpunkte"
print(z)
print("Pink: unbekannter Typ")
print("Gib nun den Pfad zu deinem Editor an: (bei Benutzung von Windows sollte das unnoetig sein)")
inp = input(">> ")
if inp == "":
return EDITORNAME
else:
return inp
def show_on_map_end():
"""asks for another input before leaving task 'show_on_map'"""
print("Schliesse den Editor und druecke Enter.")
input(">> ")
# string for main.py
GPS_NOT_FOUND = "GPS-Geraet nicht unter folgender Pfadangabe zu finden"
# string collection for gps_content.py
INVALID_INPUT = "Achtung! Ungueltige Eingabe."
WARNING_BROKEN_FILE = "Achtung! Kaputte Datei"
GEOCACHES = "Geocaches"
WAYPOINTS = "Wegpunkte"
AND = "und"
ON_DEVICE = "auf dem Geraet."
NO_WAYPOINTS_ON_DEVICE = "Keine Wegpunkte auf dem Geraet."
NO_CACHES_ON_DEVICE = "Keine Caches auf dem Geraet."
NAME_TO_LONG = "Name zu lang."
NOT_ALLOWED_SIGNS = "Name enthaelt ungueltige Zeichen."
COORDINATES_WRONG = "Koordinaten fehlerhaft."
NO_WAYPOINT_CREATED = "Kein Wegpunkt wurde erstellt."
CURRENT_WAYPOINT = "Aktueller Wegpunkt"
INPUT_GCCODE = "Gib den GC-Code ein: "
WAYPOINT_LEFT_OUT = "Wegpunkt wird uebersprungen."
ASSIGN_WAYPOINT_TO_CACHE = "Willst du den Wegpunkt einem Cache zuordnen?"
ADD_WAYPOINT = "Moechtest du einen weiteren Wegpunkt erstellen?"
GC_DOES_NOT_EXIST = "Dieser GC-Code existiert nicht."
SEARCH_FOR = "Suche nach... "
MIN_MAX_SEPERATED_BY_KOMMA = "Minimaler und maximaler Wert (mit Komma voneinander getrennt)"
POSSIBLE_SIZES = "Moegliche Groessen"
DATE_SEPERATED_BY_KOMMA = "Fruehestes und spaetestes Datum (mit Komma voneinander getrennt). Format: DD.MM.YYYY"
CACHES_AVAILABLE_OR_NOT = "Moechtest du die Caches sehen, die verfuegbar sind, oder die, die nicht verfuegbar sind? (y/n) "
DIST_SEPERATED_BY_KOMMA = "Minimale und maximale Distanz in Kilometern (mit Komma voneinander getrennt): "
NO_CACHES_FOUND = "keine Geocaches gefunden"
WARNING_LOG_INFO = "WARNUNG! Bei Fortfahren werden auch Log-Informationen ueber Caches geloescht, \
die nicht gefunden wurden."
LEAVE_PROGRAMME = "Willst du das Programm verlassen? (y/n) "
|
[
"susanneTsauer@gmx.net"
] |
susanneTsauer@gmx.net
|
dce5b05d3471948b064364cc6d832b137033b937
|
94a4d676a7a5ba490e04552be8758a22af2abd2b
|
/smsf.py
|
e5d8934d145c96f36a25e72a24addd7f73dd61e0
|
[] |
no_license
|
stdwarf/asterisk
|
90f95499dfde749b2e507b63e1b5a20dc9cb738f
|
2fa525f979848f0daf598452d6fd7321af6a9691
|
refs/heads/master
| 2021-01-22T04:40:32.589865
| 2013-05-16T07:27:42
| 2013-05-16T07:27:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,667
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os,sys,re,sqlite,datetime
import smtplib
from smtplib import SMTP
from email.MIMEText import MIMEText
from email.Header import Header
from email.Utils import parseaddr, formataddr
q="'"
# файл генерируемый для учета iccid sim карт
localpath='/var/lib/asterisk/scripts/iccid.txt'
smscmdre = re.compile(r'.{0,}p(?P<pin>\d{1,})#(?P<exten>\d{1,})#.{0,}')
inboxdir = '/var/spool/asterisk/smsincoming/'
# база данных смс
conn = sqlite.connect('/var/log/asterisk/polygator-sms.db',autocommit=True)
c = conn.cursor()
#данные для писем
emailbody = """SMS FROM: %s\r\nSENT: %s\r\n\r\n%s """
sent_from = 'root@gsm.lc'
sent_to = 'user@domain.lc'
server="mail1.domain.lc"
# функция отправки письма
def send_email(sender, recipient, subject, body):
header_charset = 'UTF-8'
for body_charset in 'US-ASCII', 'ISO-8859-1', 'UTF-8':
try:
body.encode(body_charset)
except UnicodeError:
pass
else:
break
sender_name, sender_addr = parseaddr(sender)
recipient_name, recipient_addr = parseaddr(recipient)
sender_name = str(Header(unicode(sender_name), header_charset))
recipient_name = str(Header(unicode(recipient_name), header_charset))
sender_addr = sender_addr.encode('ascii')
recipient_addr = recipient_addr.encode('ascii')
msg = MIMEText(body.encode(body_charset), 'plain', body_charset)
msg['From'] = formataddr((sender_name, sender_addr))
msg['To'] = formataddr((recipient_name, recipient_addr))
msg['Subject'] = Header(unicode(subject), header_charset)
smtp = smtplib.SMTP("msmail1.polo.lc")
smtp.sendmail(sender, recipient, msg.as_string())
smtp.close()
def escaper(string,symbols):
for symbol in symbols: string = string.replace(symbol,'')
return string
#with open(localpath) as inf:
# парсим файл
fl = open(localpath,"r")
for line in fl.xreadlines():
words = (line.split(' '))
iccid = words[2]
chan = words[1]
f = float(iccid)
if f == 0:
continue
else:
tablename = iccid.rstrip() + "-inbox"
#print tablename
# заходим в базу, получаем считываем данные
c.execute("select msgid,oaname,sent,partid,part,partof,content from %s" % (q + tablename + q) )
data = c.fetchall()
smshash = {}
for row in data:
if row[0] in smshash:
smshash[row[0]].append(list(row))
else:
smshash[row[0]] = [ list( row ) ]
smslist = []
for smskey in smshash.keys():
if smshash[smskey][0][5] == len(smshash[smskey]):
smsmp = sorted(smshash[smskey], key = lambda node: node[4])
smslist.append( smsmp )
for smsmp in smslist:
smsid = smsmp[0][0]
sender = smsmp[0][1]
sendtime = datetime.datetime.fromtimestamp(int(smsmp[0][2]))
smstext = ''
for x in smsmp:
smstext+=escaper(x[6].decode('utf-8'),'\r\n')
#print smstext
#print sender
#print sendtime
emailbody = """SMS FROM: %s\r\nSENT: %s\r\n\r\n%s """
try:
# send_email("%s",'user@domain.lc', "%s", "%s" % (sender, sendtime, smstext) )
send_email('%s<root@domain.lc>' % os.uname()[1], [' user@domain.lc ', ' user2@domain.lc '], iccid, emailbody % (sender, sendtime, smstext) )
#print 'Email sent'
except:
continue
try:
c.execute("delete from '%s' where msgid=%s;" % ( tablename, str(smsid) ) )
except:
continue
try:
os.unlink( os.path.join(inboxdir,sms) )
except:
continue
#print (' script DONE')
|
[
"stdwarf@gmail.com"
] |
stdwarf@gmail.com
|
a27cd524172294776c06edc2d24389936548aba7
|
652c587eb1f846901619d09dd2aab421c5939f06
|
/N6_While_Loop/049.py
|
8de63f81aafab58d6d7377f14ed0d132dbe48013
|
[] |
no_license
|
Lightman-EL/Python-by-Example---Nikola-Lacey
|
0bfd55acf0b4f46dac2608c2bdb657fcb0b74e35
|
374ea1d710933ac0fd315326c4963044792ac934
|
refs/heads/master
| 2022-11-19T19:46:53.902389
| 2020-07-08T19:07:28
| 2020-07-08T19:07:28
| 278,171,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
counts = 0
compnum = 50
while compnum == 50:
number = int(input("Enter a number: \n"))
counts += 1
if number > 50:
print("Too high")
elif number < 50:
print("Too low")
elif number == compnum:
break
print("Well done, you took", counts, " attempts.")
|
[
"noreply@github.com"
] |
Lightman-EL.noreply@github.com
|
8a9b786a69a3422594bf5a9aa764cfdb148778f8
|
d2e9d86c6ea1bf7c9d79afcc90ea9585fd362650
|
/HashSetAndHashMaps/HashMap/HashMap.py
|
06935effab9aee1d779ad53a9195446f52fea739
|
[] |
no_license
|
Ailloviee/data_structures_and_algorithms_review
|
1993018f18673e800d94ada64c30264737c33c03
|
7d2f00d78c43d01393d567076e8ffdf5ab7c1297
|
refs/heads/main
| 2023-05-04T00:53:31.628449
| 2021-05-20T03:12:37
| 2021-05-20T03:12:37
| 362,901,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,884
|
py
|
class Bucket:
def __init__(self):
self.bucket = []
def get(self, key):
for (k, v) in self.bucket:
if k == key:
return v
return -1
def update(self, key, value):
found = False
for i, kv in enumerate(self.bucket):
if key == kv[0]:
self.bucket[i] = (key, value)
found = True
break
if not found:
self.bucket.append((key, value))
def remove(self, key):
for i, kv in enumerate(self.bucket):
if key == kv[0]:
del self.bucket[i]
class MyHashMap(object):
def __init__(self):
"""
Initialize your data structure here.
"""
# better to be a prime number, less collision
self.key_space = 2069
self.hash_table = [Bucket() for i in range(self.key_space)]
def put(self, key, value):
"""
value will always be non-negative.
:type key: int
:type value: int
:rtype: None
"""
hash_key = key % self.key_space
self.hash_table[hash_key].update(key, value)
def get(self, key):
"""
Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key
:type key: int
:rtype: int
"""
hash_key = key % self.key_space
return self.hash_table[hash_key].get(key)
def remove(self, key):
"""
Removes the mapping of the specified value key if this map contains a mapping for the key
:type key: int
:rtype: None
"""
hash_key = key % self.key_space
self.hash_table[hash_key].remove(key)
# Your MyHashMap object will be instantiated and called as such:
# obj = MyHashMap()
# obj.put(key,value)
# param_2 = obj.get(key)
# obj.remove(key)
|
[
"j68zheng@uwaterloo.ca"
] |
j68zheng@uwaterloo.ca
|
c162da296af968a3490c034f866fcdeef0524e9a
|
05bc1cf1498af8b14a42cfe3f2403d063a083148
|
/python_lecture/control_flow.py
|
5658fcb726edf625ab29d65c580106da3320380f
|
[] |
no_license
|
kevinjaworski/python_notes
|
24f0159bf6a15d4f05d19f85a27ce2926257fb52
|
acc004a72debd710a96eb30d8e77ff0c937a95a6
|
refs/heads/main
| 2023-02-11T03:04:45.779894
| 2021-01-14T15:35:57
| 2021-01-14T15:35:57
| 329,657,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
# if block
x = 0
if x > 0 :
print("Hello")
elif x == 0 :
print("World")
else:
print('blank')
#ternary
is_on = 1
message = "I am on" if is_on == 1 else "I am stuck" if is_on == 3 else "I am off"
print(message)
a, b, c, d = False, True, True, True
print(c and d)
print((a and b and not a) or (not b) or (b and a) or (a and not a and not b))
|
[
"kmjaworski15@my.trine.edu"
] |
kmjaworski15@my.trine.edu
|
eefb5209d4d3b571ac8ea971a93683b56c8bc052
|
5a013bed0f815912aa85aac46250dd6043f65151
|
/Graph optimization problem.py
|
60dcb6c127c8bc92b137431895b507814b19eef2
|
[] |
no_license
|
Manijehkomeili/my-py-codes
|
2eee174556a9e2c18dfd8ddef2e5b5fd97527c7a
|
449ffbed7621d27a9c1c912ccee43fba24a9d096
|
refs/heads/master
| 2023-07-19T04:59:49.691786
| 2021-09-16T10:26:32
| 2021-09-16T10:26:32
| 404,399,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,525
|
py
|
"""
This is a sample Graph optimization problem with DFS and BFS methods
The result will be shown with a test graph section
DFS: Depth-First Search
BFS: Breadth-Fist search
@author: Manijeh Komeili
"""
class Node(object):
def __init__(self, name):
self.name = name
def getName(self):
return self.name
def __str__(self):
return self.name
class Edge(object):
def __init__(self, src, des):
self.src = src
self.des = des
def getSource(self):
return self.src
def getDestination(self):
return self.des
def __str__(self):
return self.src.getName() + '->' + self.des.getName()
class WeightedEdge(Edge):
def __init__(self, src, des, weight=1.0):
self.src = src
self.des = des
self.weight = weight
def getWeight(self):
return self.weight
def __str__(self):
return self.src.getName() + '-> (' + str(self.weight) + ')' + self.des.getName()
class Digraph(object):
def __init__(self):
self.nodes = []
self.edges = {}
def addNode(self, node):
if node in self.nodes:
raise ValueError('This node is already exist')
else:
self.nodes.append(node)
self.edges[node] = []
def addEdge(self, edge):
src = edge.getSource()
des = edge.getDestination()
if not (src in self.nodes and des in self.nodes):
raise ValueError('These nodes are not exist in our graph')
self.edges[src].append(des)
def childrenOf(self, node):
return self.edges[node]
def hasNode(self, node):
return node in self.nodes
def __str__(self):
result = ''
for src in self.nodes:
for dest in self.edges[src]:
result = result + src.getName() + '->' + dest.getName() + '\n'
return result[:-1]
class Graph(Digraph):
def addEdge(self, edge):
Digraph.addEdge(self, edge)
reverse = Edge(edge.getDestination(), edge.getSource())
Digraph.addEdge(self, reverse)
def printPath(path):
result = ''
for i in range(len(path)):
result = result + str(path[i])
if i != len(path) - 1:
result = result + '->'
return result
def DFS(graph, start, end, path, shortest, printing=False):
path = path + [start]
if printing:
print('current Depth-First search (DFS):', printPath(path))
if start == end:
return path
for node in graph.childrenOf(start):
if node not in path:
if shortest == None or len(path) < len(shortest):
newpath = DFS(graph, node, end, path, shortest, printing)
if newpath != None:
shortest = newpath
return shortest
def shortestPath(graph, start, end, printing=False):
return DFS(graph, start, end, [], None, printing)
def BFS(graph, start, end, printing=False):
initPath = [start]
exploredPath = [initPath]
if printing:
print('current Breadth-Fist search (BFS):', printPath(exploredPath))
while len(exploredPath) != 0:
tmpPath = exploredPath.pop(0)
print('current Breadth-Fist search (BFS):', printPath(tmpPath))
lastNode = tmpPath[-1]
if lastNode == end:
return tmpPath
for nextNode in graph.childrenOf(lastNode):
if nextNode not in tmpPath:
newPath = tmpPath + [nextNode]
exploredPath.append(newPath)
return None
def testDFS_BFS():
nodes = []
for name in range(7):
nodes.append(Node(str(name)))
g = Digraph()
for n in nodes:
g.addNode(n)
g.addEdge(Edge(nodes[0], nodes[1]))
g.addEdge(Edge(nodes[1], nodes[2]))
g.addEdge(Edge(nodes[2], nodes[3]))
g.addEdge(Edge(nodes[2], nodes[4]))
g.addEdge(Edge(nodes[3], nodes[4]))
g.addEdge(Edge(nodes[3], nodes[5]))
g.addEdge(Edge(nodes[3], nodes[6]))
g.addEdge(Edge(nodes[4], nodes[6]))
g.addEdge(Edge(nodes[1], nodes[0]))
g.addEdge(Edge(nodes[3], nodes[1]))
g.addEdge(Edge(nodes[0], nodes[2]))
g.addEdge(Edge(nodes[3], nodes[6]))
g.addEdge(Edge(nodes[4], nodes[1]))
spath_DFS = shortestPath(g, nodes[0], nodes[6], printing=True)
print('The shortest path between start and end with DFS method: ', printPath(spath_DFS))
spath_BFS = BFS(g, nodes[0], nodes[6])
print('The shortest path between start and end with BFS method: ', printPath(spath_BFS))
testDFS_BFS()
|
[
"89593933+Manijehkomeili@users.noreply.github.com"
] |
89593933+Manijehkomeili@users.noreply.github.com
|
d5fc2e9c95367713fde53a9b10a7e522573cc1da
|
4fe1dc7170d2d44e2c9988c71b08f66d469ee4b8
|
/Appendices/E/ejE5.py
|
77ce7a4cada9e676303b27e369f41adfd4fb3073
|
[] |
no_license
|
ftorresi/PythonLearning
|
53c0689a6f3e7e219a6314a673a318b25cda82d1
|
f2aeb5f81d9090a5a5aa69a8d1203688e9f01adf
|
refs/heads/master
| 2023-01-12T00:40:05.806774
| 2020-11-13T14:33:08
| 2020-11-13T14:33:08
| 267,460,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
"""Now we solve the ODE problem u - 10u' = 0 u(0)= 0.2 in [0,20] using HEUN's method"""
import numpy as np
import matplotlib.pyplot as plt
#Exact solution
def exact_u(t):
return 0.2*np.exp(0.1*t)
#u'=f(u,t) as a class
class f:
def __init__(self):
pass
def __call__(self,u,t):
return 0.1*u
#Forward Euler Method as a class
class Heun:
def __init__(self, f, U0, T, n):
if not callable(f):
raise TypeError('f is %s, not a function' % type(f))
self.f, self.U0, self.T, self.n = f, U0, T, n
self.dt = T/float(n)
self.u = np.zeros(n+1)
self.t = np.linspace(0,T,n+1)
def solve(self):
"""Compute solution for 0 <= t <= T."""
self.u[0] = float(self.U0)
for k in range(self.n):
self.k = k
self.u[k+1] = self.advance()
return self.u, self.t
def advance(self):
"""Advance the solution one time step."""
u, dt, f, k, t = self.u, self.dt, self.f, self.k, self.t
f_eval=f(u[k], t[k])
u_mid= u[k] + dt*f_eval
u_new = u[k] + 0.5*dt*(f_eval+f(u_mid, t[k+1]))
return u_new
#Parameters
T=20
U0=0.2
#Plot exact solution
tgrid=np.linspace(0,T,2001)
uexact=exact_u(tgrid)
plt.plot(tgrid, uexact, "r-", label="Exact Solution")
#Numerical calculations and plots
nlist=[4,40,400]
f_init=f()
for n in nlist:
solver=Heun(f=f_init, U0=U0, T=T, n=n)
sol, t = solver.solve()
plt.plot(t, sol, "--", label="dt=%g"%(t[1]-t[0]))
plt.legend()
plt.title("u-10u'=0, u(0)=0.2 with Heun's method")
plt.xlabel("t")
plt.ylabel("u(t)")
plt.savefig("ejE5.png")
#Save to file (only last solution)
with open("ejE5.out","w") as outfile:
outfile.write("Numerical Solution to u-10u'=0, u(0)=0.2 with Heun's method\n")
outfile.write(" t u(t)\n")
for i in range(len(t)):
outfile.write("%5.2f %7.4f\n"%(t[i], sol[i]))
|
[
"noreply@github.com"
] |
ftorresi.noreply@github.com
|
da0893ee12c8121cd392ce0dc9e6689b63cfe317
|
bdb3ab1d81958651d568859336f1a9eba74e279b
|
/load_data_for_nn.py
|
181a0fdb426c1ef4cdaf7df3dc6c30b9e93c2c87
|
[] |
no_license
|
mishabuch/Assignment2
|
3f7e153b5aecc38e8b533c61cef9719263dfa924
|
aeb0f8ee35a5ddcd4ef9018fa07135c4c5cbadea
|
refs/heads/main
| 2023-03-24T01:51:32.196899
| 2021-03-14T17:22:13
| 2021-03-14T17:22:13
| 347,669,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
from abc import ABC
from scipy.io import arff
import numpy as np
from sklearn import preprocessing
import sklearn.model_selection as model_selection
import pandas as pd
from sklearn.preprocessing import StandardScaler
class DataSetLoaderBaseClass(ABC):
def __init__(self, file_path, randomness):
self.test_x = None
self.test_y = None
self.train_x = None
self.train_y = None
self.validation_x = None
self.validation_y = None
self.x = None
self.y = None
self.feature_names = None
self.target_labels = None
self.data = None
self.dataset_name = None
self.randomness = randomness
self.file_path = file_path
class DiabetesDataSet(DataSetLoaderBaseClass):
def __init__(self, split_value=0.2, file_path='datasets/diabetes/messidor_features.arff', randomness=1):
super().__init__(file_path, randomness)
dataset = arff.loadarff(file_path)
df = pd.DataFrame(dataset[0])
attributes = pd.DataFrame(dataset[1])
self.data = df
self.dataset_name = 'Diabetes Data Set'
self.file_path = file_path
self.randomness = randomness
self.target_labels = attributes[-1:][0].to_list().pop()
self.feature_names = attributes[0:-1][0].to_list()
# assign x and y
self.x = np.array(self.data.iloc[:, 0:-1])
self.y = np.array(self.data.iloc[:, -1])
# Standardizing data
scaler = StandardScaler()
self.x = scaler.fit_transform(self.x)
self.y = self.y.astype('int')
self.train_x, self.test_x, self.train_y, self.test_y = model_selection.train_test_split(
self.x, self.y, test_size=split_value, random_state=self.randomness, stratify=self.y
)
self.train_x, self.validation_x, self.train_y, self.validation_y = model_selection.train_test_split(
self.train_x, self.train_y, test_size=split_value, random_state=self.randomness, stratify=self.y
)
tst = pd.DataFrame(np.hstack((self.test_x,self.test_y)))
trg = pd.DataFrame(np.hstack((self.train_x,self.train_y)))
val = pd.DataFrame(np.hstack((self.validation_x,self.validation_y)))
tst.to_csv('test.csv',index=False,header=False)
trg.to_csv('train.csv',index=False,header=False)
val.to_csv('validation.csv',index=False,header=False)
def load_datasets():
datasetDiabetes = DiabetesDataSet()
return datasetDiabetes
|
[
"noreply@github.com"
] |
mishabuch.noreply@github.com
|
1aa763525c70cc0133f8bd2527f150fc9b95b289
|
bf17a8783aed198468b6feb505e611a9fc336f72
|
/scriptlets/Wrapper.py
|
826bb31227e99e800e5d85885d0988539e7da44d
|
[] |
no_license
|
Mew1626/QOscript
|
6c2db2aca370502dbe474ca1a09a59a43cfb3147
|
fb52672eb744901ecb25d8b66788630980d8ce61
|
refs/heads/master
| 2020-07-14T20:25:57.438538
| 2019-09-13T17:47:48
| 2019-09-13T17:47:48
| 205,394,471
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,368
|
py
|
import os
import datetime
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
root = Tk()
reports = StringVar()
under10min = StringVar()
monthselection = 0
warantycsv = ""
purchasecsv = ""
evalcsv = ""
loncsv = ""
def wrap():
path = os.path.dirname(os.path.realpath(__file__))
os.chdir(path)
os.chdir("../")
excelpath = os.getcwd()
os.system('C:/Anaconda3/python.exe "' + os.path.join(path, "QO1.py") + '" "' + os.path.join(excelpath, "Quality_Objective_1_2019.xlsx") + '" ' + reports.get() + " " + under10min.get() + " " + str(month.current()+2))
os.system('C:/Anaconda3/python.exe "' + os.path.join(path, "QO2.py") + '" "' + os.path.join(excelpath, "Quality_Objective_2_2019.xlsx") + '" "' + purchasecsv + '" "' + warantycsv+ '"')
os.system('C:/Anaconda3/python.exe "' + os.path.join(path, "QO3.py") + '" "' + os.path.join(excelpath, "Quality_Objective_3_2019.xlsx") + '" "' + evalcsv + '"')
os.system('C:/Anaconda3/python.exe "' + os.path.join(path, "QO4.py") + '" "' + os.path.join(excelpath, "Quality_Objective_4_2019.xlsx") + '" "' + loncsv + '"')
def browse(iden):
global warantycsv, purchasecsv, evalcsv, loncsv
root.filename = filedialog.askopenfilename(initialdir = "C:\\Users\\iScree Laptop\\Downloads",title = "Select file",filetypes = (("csv file","*.csv"),("all files","*.*")))
if iden == 0:
purchasecsv = root.filename
pur.set(root.filename)
elif iden == 1:
warantycsv = root.filename
war.set(root.filename)
elif iden == 2:
evalcsv = root.filename
eva.set(root.filename)
elif iden == 3:
loncsv = root.filename
lon.set(root.filename)
def gui():
root.title("Quality Objectives")
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
reports_entry = ttk.Entry(mainframe, width=7, textvariable=reports)
under10min_entry = ttk.Entry(mainframe, width=7, textvariable=under10min)
reports_entry.grid(column=1, row= 2, sticky= (W, E))
under10min_entry.grid(column=2, row= 2, sticky= (W, E))
ttk.Label(mainframe, text="Reports").grid(column=1, row= 1, sticky= (W, E))
ttk.Label(mainframe, text="Under 10 Min").grid(column=2, row= 1, sticky= (W, E))
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
ttk.Label(mainframe, text="Month:").grid(column=1, row= 3, sticky= (W, E))
monthvar = StringVar()
global month
month = ttk.Combobox(mainframe, textvariable=monthvar)
month['values'] = months
month.grid(column=2, row= 3, sticky= (W, E))
month.current(datetime.date.today().month - 2)
ttk.Button(mainframe, text="Execute", command=wrap).grid(column=3, row= 2, sticky= (W, E))
global pur, war, eva, lon
pur = StringVar()
war = StringVar()
eva = StringVar()
lon = StringVar()
ttk.Label(mainframe, text="Purchase CSV").grid(column=1, row= 4, sticky= (W, E))
ttk.Entry(mainframe, textvariable=pur).grid(column=2, row=4, sticky=(W, E))
ttk.Button(mainframe, text="Browse", command=lambda:browse(0)).grid(column=3, row= 4, sticky= (W, E))
ttk.Label(mainframe, text="Waranty CSV").grid(column=1, row= 5, sticky= (W, E))
ttk.Entry(mainframe, textvariable=war).grid(column=2, row=5, sticky=(W, E))
ttk.Button(mainframe, text="Browse", command=lambda:browse(1)).grid(column=3, row= 5, sticky= (W, E))
ttk.Label(mainframe, text="Eval CSV").grid(column=1, row= 6, sticky= (W, E))
ttk.Entry(mainframe, textvariable=eva).grid(column=2, row=6, sticky=(W, E))
ttk.Button(mainframe, text="Browse", command=lambda:browse(2)).grid(column=3, row= 6, sticky= (W, E))
ttk.Label(mainframe, text="Lon CSV").grid(column=1, row= 7, sticky= (W, E))
ttk.Entry(mainframe, textvariable=lon).grid(column=2, row=7, sticky=(W, E))
ttk.Button(mainframe, text="Browse", command=lambda:browse(3)).grid(column=3, row= 7, sticky= (W, E))
for child in mainframe.winfo_children():
child.grid_configure(padx=5, pady=5)
root.mainloop()
def main():
gui()
main()
|
[
"noreply@github.com"
] |
Mew1626.noreply@github.com
|
6ac8aceb9e1f7b8a43ff8b98d98019bdd41c55d5
|
4c73bf392aa28319e87d5cbb425d13b85ae6fda3
|
/exercise/quiz/celeb/solution/main.py
|
f02d626e6018051980126f9aedfb8038acb60a26
|
[] |
no_license
|
potpath/algo-class
|
3b9f08e39867e4857a5f5ca12536b85b58cd7f52
|
93638d99f48bce991aa0abbe569947032cc597d0
|
refs/heads/master
| 2021-01-24T14:22:44.676921
| 2014-10-06T11:27:48
| 2014-10-06T11:27:48
| 24,842,788
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
N = int(raw_input())
K = [[int(i) for i in raw_input().split()] for i in range(N)]
R = [sum(K[i]) for i in range(N)] #sum each row
C = [sum([K[i][j] for i in range(N)]) for j in range(N)] #sum each column
ans = [i+1 for i in range(N) if R[i]==0 and C[i]==N-1]
print ans[0] if len(ans)>0 else 0
|
[
"GroupDoll"
] |
GroupDoll
|
a5ca0dba0718aa09e17d7542056fc9af17a7eb38
|
49c2492d91789b3c2def7d654a7396e8c6ce6d9f
|
/ROS/catkin_ws/build/dyros_simulator/dataspeed_can_tools/catkin_generated/pkg.installspace.context.pc.py
|
aaf615e0d8cdbcc7a35dbfeacc60e39121b30380
|
[] |
no_license
|
DavidHan008/lockdpwn
|
edd571165f9188e0ee93da7222c0155abb427927
|
5078a1b08916b84c5c3723fc61a1964d7fb9ae20
|
refs/heads/master
| 2021-01-23T14:10:53.209406
| 2017-09-02T18:02:50
| 2017-09-02T18:02:50
| 102,670,531
| 0
| 2
| null | 2017-09-07T00:11:33
| 2017-09-07T00:11:33
| null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ldataspeed_can_tools".split(';') if "-ldataspeed_can_tools" != "" else []
PROJECT_NAME = "dataspeed_can_tools"
PROJECT_SPACE_DIR = "/home/dyros-vehicle/gitrepo/lockdpwn/ROS/catkin_ws/install"
PROJECT_VERSION = "1.0.4"
|
[
"gyurse@gmail.com"
] |
gyurse@gmail.com
|
561487e1803023123990db3ae20bc96d55ac9143
|
6c054175f35bb975c2f6bc4bce00243541b9a75a
|
/wk5/ch9_6Exercise.py
|
4b419a12b0e1302325ae870e6ee3de6a4e91f38c
|
[] |
no_license
|
kayfay/python_informatics_course
|
629c12c951312ae8f34d315e6aed0f6c22bebc02
|
86d7fccb006a46f0d9189ab5867560c788fddbd6
|
refs/heads/master
| 2020-03-27T00:34:48.608787
| 2018-08-22T00:57:58
| 2018-08-22T00:57:58
| 145,635,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
#File: ch9_6Exercise.py
#Programmer: Allen Tools
#Date: 02/09/2017
#Course ID: LIS 4930 Python for Informatics
#Purpose: Write a function called is_abcecedarian that returns True
# if the letters are in alphabetical order, how many are there?
# reads a word from the wordlist
wordlist = open('/home/atools/Downloads/pythonLIS/sourcefiles/wk5/words.txt')
totalAbecedarians = 0 # here's our counter
def is_abecedarian(word):
"""
a function to compare last letter to current letter in a loop
"""
previousLetter = word[0] # sets the starting letter
for currentLetter in word:
if currentLetter < previousLetter: # tests iteration letter
return False
previousLetter = currentLetter # updates the starting letter
return True
for word in wordlist:
word = word.strip()
if is_abecedarian(word):
totalAbecedarians += 1
print("The total number of Abecedarians in the wordlist is", totalAbecedarians)
|
[
"allenchristophertools@gmail.com"
] |
allenchristophertools@gmail.com
|
8b794d9621af4cabd7f72b4a547be012ca0d18ff
|
f8afc5020d9757437f17574e16e15501016fb60a
|
/sawtooth-core/families/settings/sawtooth_settings/protobuf/client_transaction_pb2.py
|
199ee2ff6ecbba49d372fbc7ce1cbcd156d2a63d
|
[
"Zlib",
"MIT",
"Apache-2.0"
] |
permissive
|
prog-nov/hyperledger-sawtooth
|
cdbf168c2b893905d95d99fcead4122bbf6b2307
|
b2d8e1f37293a3ede2e43f18248d643023e4f2de
|
refs/heads/master
| 2023-03-17T02:51:05.317383
| 2018-11-07T18:08:29
| 2018-11-07T18:08:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 13,916
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sawtooth_settings/protobuf/client_transaction.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from sawtooth_settings.protobuf import transaction_pb2 as sawtooth__settings_dot_protobuf_dot_transaction__pb2
from sawtooth_settings.protobuf import client_list_control_pb2 as sawtooth__settings_dot_protobuf_dot_client__list__control__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='sawtooth_settings/protobuf/client_transaction.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n3sawtooth_settings/protobuf/client_transaction.proto\x1a,sawtooth_settings/protobuf/transaction.proto\x1a\x34sawtooth_settings/protobuf/client_list_control.proto\"\x95\x01\n\x1c\x43lientTransactionListRequest\x12\x0f\n\x07head_id\x18\x01 \x01(\t\x12\x17\n\x0ftransaction_ids\x18\x02 \x03(\t\x12%\n\x06paging\x18\x03 \x01(\x0b\x32\x15.ClientPagingControls\x12$\n\x07sorting\x18\x04 \x03(\x0b\x32\x13.ClientSortControls\"\xce\x02\n\x1d\x43lientTransactionListResponse\x12\x35\n\x06status\x18\x01 \x01(\x0e\x32%.ClientTransactionListResponse.Status\x12\"\n\x0ctransactions\x18\x02 \x03(\x0b\x32\x0c.Transaction\x12\x0f\n\x07head_id\x18\x03 \x01(\t\x12%\n\x06paging\x18\x04 \x01(\x0b\x32\x15.ClientPagingResponse\"\x99\x01\n\x06Status\x12\x10\n\x0cSTATUS_UNSET\x10\x00\x12\x06\n\x02OK\x10\x01\x12\x12\n\x0eINTERNAL_ERROR\x10\x02\x12\r\n\tNOT_READY\x10\x03\x12\x0b\n\x07NO_ROOT\x10\x04\x12\x0f\n\x0bNO_RESOURCE\x10\x05\x12\x12\n\x0eINVALID_PAGING\x10\x06\x12\x10\n\x0cINVALID_SORT\x10\x07\x12\x0e\n\nINVALID_ID\x10\x08\"5\n\x1b\x43lientTransactionGetRequest\x12\x16\n\x0etransaction_id\x18\x01 \x01(\t\"\xd0\x01\n\x1c\x43lientTransactionGetResponse\x12\x34\n\x06status\x18\x01 \x01(\x0e\x32$.ClientTransactionGetResponse.Status\x12!\n\x0btransaction\x18\x02 \x01(\x0b\x32\x0c.Transaction\"W\n\x06Status\x12\x10\n\x0cSTATUS_UNSET\x10\x00\x12\x06\n\x02OK\x10\x01\x12\x12\n\x0eINTERNAL_ERROR\x10\x02\x12\x0f\n\x0bNO_RESOURCE\x10\x05\x12\x0e\n\nINVALID_ID\x10\x08\x42\x31\n\x15sawtooth.sdk.protobufP\x01Z\x16\x63lient_transaction_pb2b\x06proto3')
,
dependencies=[sawtooth__settings_dot_protobuf_dot_transaction__pb2.DESCRIPTOR,sawtooth__settings_dot_protobuf_dot_client__list__control__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CLIENTTRANSACTIONLISTRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='ClientTransactionListResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OK', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_ERROR', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_READY', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO_ROOT', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO_RESOURCE', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_PAGING', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_SORT', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_ID', index=8, number=8,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=489,
serialized_end=642,
)
_sym_db.RegisterEnumDescriptor(_CLIENTTRANSACTIONLISTRESPONSE_STATUS)
_CLIENTTRANSACTIONGETRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='ClientTransactionGetResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OK', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_ERROR', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO_RESOURCE', index=3, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_ID', index=4, number=8,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=821,
serialized_end=908,
)
_sym_db.RegisterEnumDescriptor(_CLIENTTRANSACTIONGETRESPONSE_STATUS)
_CLIENTTRANSACTIONLISTREQUEST = _descriptor.Descriptor(
name='ClientTransactionListRequest',
full_name='ClientTransactionListRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='head_id', full_name='ClientTransactionListRequest.head_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction_ids', full_name='ClientTransactionListRequest.transaction_ids', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paging', full_name='ClientTransactionListRequest.paging', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sorting', full_name='ClientTransactionListRequest.sorting', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=305,
)
_CLIENTTRANSACTIONLISTRESPONSE = _descriptor.Descriptor(
name='ClientTransactionListResponse',
full_name='ClientTransactionListResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ClientTransactionListResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transactions', full_name='ClientTransactionListResponse.transactions', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='head_id', full_name='ClientTransactionListResponse.head_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paging', full_name='ClientTransactionListResponse.paging', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLIENTTRANSACTIONLISTRESPONSE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=308,
serialized_end=642,
)
_CLIENTTRANSACTIONGETREQUEST = _descriptor.Descriptor(
name='ClientTransactionGetRequest',
full_name='ClientTransactionGetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction_id', full_name='ClientTransactionGetRequest.transaction_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=644,
serialized_end=697,
)
_CLIENTTRANSACTIONGETRESPONSE = _descriptor.Descriptor(
name='ClientTransactionGetResponse',
full_name='ClientTransactionGetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ClientTransactionGetResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='ClientTransactionGetResponse.transaction', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLIENTTRANSACTIONGETRESPONSE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=700,
serialized_end=908,
)
_CLIENTTRANSACTIONLISTREQUEST.fields_by_name['paging'].message_type = sawtooth__settings_dot_protobuf_dot_client__list__control__pb2._CLIENTPAGINGCONTROLS
_CLIENTTRANSACTIONLISTREQUEST.fields_by_name['sorting'].message_type = sawtooth__settings_dot_protobuf_dot_client__list__control__pb2._CLIENTSORTCONTROLS
_CLIENTTRANSACTIONLISTRESPONSE.fields_by_name['status'].enum_type = _CLIENTTRANSACTIONLISTRESPONSE_STATUS
_CLIENTTRANSACTIONLISTRESPONSE.fields_by_name['transactions'].message_type = sawtooth__settings_dot_protobuf_dot_transaction__pb2._TRANSACTION
_CLIENTTRANSACTIONLISTRESPONSE.fields_by_name['paging'].message_type = sawtooth__settings_dot_protobuf_dot_client__list__control__pb2._CLIENTPAGINGRESPONSE
_CLIENTTRANSACTIONLISTRESPONSE_STATUS.containing_type = _CLIENTTRANSACTIONLISTRESPONSE
_CLIENTTRANSACTIONGETRESPONSE.fields_by_name['status'].enum_type = _CLIENTTRANSACTIONGETRESPONSE_STATUS
_CLIENTTRANSACTIONGETRESPONSE.fields_by_name['transaction'].message_type = sawtooth__settings_dot_protobuf_dot_transaction__pb2._TRANSACTION
_CLIENTTRANSACTIONGETRESPONSE_STATUS.containing_type = _CLIENTTRANSACTIONGETRESPONSE
DESCRIPTOR.message_types_by_name['ClientTransactionListRequest'] = _CLIENTTRANSACTIONLISTREQUEST
DESCRIPTOR.message_types_by_name['ClientTransactionListResponse'] = _CLIENTTRANSACTIONLISTRESPONSE
DESCRIPTOR.message_types_by_name['ClientTransactionGetRequest'] = _CLIENTTRANSACTIONGETREQUEST
DESCRIPTOR.message_types_by_name['ClientTransactionGetResponse'] = _CLIENTTRANSACTIONGETRESPONSE
ClientTransactionListRequest = _reflection.GeneratedProtocolMessageType('ClientTransactionListRequest', (_message.Message,), dict(
DESCRIPTOR = _CLIENTTRANSACTIONLISTREQUEST,
__module__ = 'sawtooth_settings.protobuf.client_transaction_pb2'
# @@protoc_insertion_point(class_scope:ClientTransactionListRequest)
))
_sym_db.RegisterMessage(ClientTransactionListRequest)
ClientTransactionListResponse = _reflection.GeneratedProtocolMessageType('ClientTransactionListResponse', (_message.Message,), dict(
DESCRIPTOR = _CLIENTTRANSACTIONLISTRESPONSE,
__module__ = 'sawtooth_settings.protobuf.client_transaction_pb2'
# @@protoc_insertion_point(class_scope:ClientTransactionListResponse)
))
_sym_db.RegisterMessage(ClientTransactionListResponse)
ClientTransactionGetRequest = _reflection.GeneratedProtocolMessageType('ClientTransactionGetRequest', (_message.Message,), dict(
DESCRIPTOR = _CLIENTTRANSACTIONGETREQUEST,
__module__ = 'sawtooth_settings.protobuf.client_transaction_pb2'
# @@protoc_insertion_point(class_scope:ClientTransactionGetRequest)
))
_sym_db.RegisterMessage(ClientTransactionGetRequest)
ClientTransactionGetResponse = _reflection.GeneratedProtocolMessageType('ClientTransactionGetResponse', (_message.Message,), dict(
DESCRIPTOR = _CLIENTTRANSACTIONGETRESPONSE,
__module__ = 'sawtooth_settings.protobuf.client_transaction_pb2'
# @@protoc_insertion_point(class_scope:ClientTransactionGetResponse)
))
_sym_db.RegisterMessage(ClientTransactionGetResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\025sawtooth.sdk.protobufP\001Z\026client_transaction_pb2'))
# @@protoc_insertion_point(module_scope)
|
[
"tupt@qsoftvietnam.com"
] |
tupt@qsoftvietnam.com
|
4fc3011a76a8c9902eca9bf38d2151c3a856bb67
|
e5a12238b92363468c2260082e784c317d864134
|
/RegularExpression.py
|
34931a08db360980cbec01200fc779186a9c10df
|
[] |
no_license
|
KrupaPatel02/MetroNet
|
751d3e48f71a22dda627408e1f21def317b01644
|
53d7c7ecd6412d885367fe26a6dd2d32fe585497
|
refs/heads/master
| 2020-07-02T22:27:05.281739
| 2019-08-10T22:09:08
| 2019-08-10T22:09:08
| 201,687,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
import re
from datetime import datetime
regex = "20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])"
regex1 = "([12]\d{3}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01]))"
def validateTimeStamp(timestamp):
print("Started validating")
if re.search(regex, timestamp) or re.search(regex1, timestamp):
print("Valid")
else:
print("Not valid")
datetime1 = "2017-02-14 19:30:52"
validateTimeStamp(datetime1)
datetime2 = "2017-02-14"
validateTimeStamp(datetime2)
# current date and time
# now = datetime.now()
# timestamp = datetime.timestamp(now)
#
# date_obj = datetime.fromtimestamp(timestamp)
# validateTimeStamp(date_obj)
# validateTimeStamp(timestamp)
|
[
"krupahp90@gmail.com"
] |
krupahp90@gmail.com
|
25e142e4f0055291c25003b1ada95a8fb4e3b38f
|
9f73613f8317e05d484ce6b6fde5c96a37a09b85
|
/apps/bus/translation.py
|
296414d127f227e3c8cba0cd68b060b3b26c5e88
|
[] |
no_license
|
alladot/amurauto
|
d88e74e9200dd1edf642dcda796c7e154f6405bc
|
cf46a64392e1ea33628f462a3ac80bd511361dd7
|
refs/heads/master
| 2020-04-06T13:50:26.237441
| 2019-10-08T03:27:29
| 2019-10-08T03:27:29
| 157,516,546
| 0
| 0
| null | 2019-10-08T03:27:30
| 2018-11-14T08:33:44
|
Python
|
UTF-8
|
Python
| false
| false
| 370
|
py
|
# -*- coding: utf-8 -*-
from modeltranslation.translator import register, TranslationOptions
from bus.models import Bus
@register(Bus)
class BusTranslationOptions(TranslationOptions):
"""
Класс настроек интернационализации полей модели Автобус
"""
fields = (
'title',
'content',
)
|
[
"alla.dotsenko@fogstream.ru"
] |
alla.dotsenko@fogstream.ru
|
7cdeb29429d3ea1c04319e3e4093cb11fc57445e
|
b0def47943423226b42fb86533c97ed864368622
|
/src/part2.py
|
775817c8de86cddb83f6acdc6485f6a0e15932a1
|
[] |
no_license
|
causztic/hmm
|
072b9d447ba15f436b1b0746b31530c1414ffce7
|
894724f20a72f34fb265c5f48f12f3b4ab2b9c4f
|
refs/heads/master
| 2020-04-07T22:30:00.716421
| 2018-12-09T08:58:42
| 2018-12-09T08:58:42
| 158,773,322
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,082
|
py
|
import numpy as np
from itertools import groupby
UNKNOWN_TOKEN = "#UNK#"
def prepare_data(file):
"""
Prepare the file, and returns a list of lists of "{observation} {label}"
file : the name of the file to read
"""
lines = [line for line in file]
chunks = (list(g) for k, g in groupby(lines, key=lambda x: x != '\n') if k)
return [[observation.rstrip('\n') for observation in chunk] for chunk in chunks]
def get_observation_set(sequence, add_unknown_token=False):
"""
Reads the sequence and returns a set() of observations.
sequence : a list of lists of either "{observation} {label}" or "{observation}"
"""
observation_set = set()
if add_unknown_token:
observation_set.add(UNKNOWN_TOKEN)
# flatten the lists
sequence = (item for sublist in training_sequence for item in sublist)
for item in sequence:
observation = item.rsplit(" ", 1)[0]
observation_set.add(item)
return observation_set
def get_label_set(training_sequence):
"""
Reads the sequence and returns a set() of labels.
training_sequence : a list of lists of "{observation} {label}"
"""
label_set = set()
# flatten the lists
training_sequence = (item for sublist in training_sequence for item in sublist)
for item in training_sequence:
label = item.rsplit(" ", 1)[1]
label_set.add(label)
return label_set
def estimate_emissions(sequence):
"""
Estimates the emission parameters from the given training sequence, without smoothing.
sequence : a list of lists of "{observation} {label}"
"""
label_counts = {} # count of every unique label
emission_counts = {} # count of label -> observation
results = {} # MLE results
observations = set() # track the observations in the training set
# flatten the list
sequence = (item for sublist in sequence for item in sublist)
for item in sequence:
observation, label = item.rsplit(" ", 1)
observations.add(observation)
if item in emission_counts:
emission_counts[item] += 1
else:
emission_counts[item] = 1
if label in label_counts:
label_counts[label] += 1
else:
label_counts[label] = 1
# here we estimate the emission parameters using MLE after obtaining the counts.
# count(label -> observation) / count(label)
for key, emission_count in emission_counts.items():
observation, label = key.rsplit(" ", 1)
results[f"{label} -> {observation}"] = emission_count / float(label_counts[label])
return results, list(observations), label_counts, emission_counts
def smooth_emissions(sequence, observations, label_counts, emission_counts):
"""
Estimates the emission parameters from the sequence (all the sentences) from the testing set,
and a given set of observations from the training set.
sequence : a list of lists of "{observation} {label}" from the testing set
observations : observations from the training set
label_counts : { label: count } from the training set
emission_counts: { "{observation} {label}": count } from the training set
"""
labels = list(label_counts)
B = np.zeros((len(label_counts), len(observations) + 1)) # + 1 to accomodate for UNKNOWN_TOKEN.
k = 1 # set k to 1 according to question
# flatten the list
sequence = (item for sublist in sequence for item in sublist)
for item in sequence:
if item not in observations:
# new observation, add to count of unknowns
k += 1
# If the observation appears in the training set i.e. it appeared in emission_counts.
for key, emission_count in emission_counts.items():
observation, label = key.rsplit(" ", 1)
probability = float(emission_count) / (label_counts[label] + k)
B[labels.index(label), observations.index(observation)] = probability
# If observation is #UNK#. i.e. for every label, we just add in a new condition #UNK#|y.
# This would be 0 if there are no #UNK#.
for label, label_count in label_counts.items():
B[labels.index(label), -1] = float(k) / (label_count + k)
return B
def predict_labels(locale, B, observations, label_counts):
"""
Get most probable label -> observation, and write to file.
locale : locale of the dataset. should be either SG, EN, CN, or FR
B : K by K matrix of emission probabilities.
observations : a list of observations in the training data
label_counts : { label -> count }
"""
labels = list(label_counts)
training_set = [line.rstrip("\n")
for line in open(f"./../data/{locale}/dev.in")]
file = open(f"./../data/{locale}/dev.p2.out", "w")
for line in training_set:
if not line.strip():
file.write("\n")
else:
# TODO: bottleneck here due to argmax.
if line in observations:
# if the observation is in our observations, we take the most probable label.
label_value = labels[np.argmax(B[:,observations.index(line)])]
else:
# take the unknown's value.
label_value = labels[np.argmax(B[:,-1])]
file.write(f"{line} {label_value}\n")
file.close()
if __name__ == "__main__":
for locale in ["EN", "FR", "CN", "SG"]:
DATA = open(f"./../data/{locale}/train")
training_set = prepare_data(DATA)
_results, observations, label_counts, emission_counts = estimate_emissions(
training_set)
TEST_DATA = open(f"./../data/{locale}/dev.in")
testing_set = prepare_data(TEST_DATA)
# with the test data, we are able to smooth out the emissions.
B = smooth_emissions(
testing_set, observations, label_counts, emission_counts)
# we perform argmax on each observation to get the most probable label for each observation.
predict_labels(locale, B, observations, label_counts)
|
[
"limyaojie93@gmail.com"
] |
limyaojie93@gmail.com
|
3c3df6bdd3f4186cae34a915ada8acc2cdde2af6
|
d347ff7a54fe608e9e262c12d28c928e6e3989d7
|
/data_preprocessing.py
|
d07d692859a175b6ec7a15ddcdcb94ca03b27282
|
[] |
no_license
|
NeverGX/SiamRPN
|
374cc6c60f7a0fc8367f8dbe81b5e8184e78705b
|
9224a6ecfb23986d7202d110d568a7ab30ad1345
|
refs/heads/master
| 2023-06-19T18:03:43.235531
| 2020-09-14T07:15:55
| 2020-09-14T07:15:55
| 295,334,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,138
|
py
|
import cv2
import numpy as np
import argparse
import glob
import os
from tqdm import tqdm
from concurrent import futures
from config import config
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir',type=str, default='/home/wangkh/Downloads/got-10k/full_data/train_data', help='got_10k data dir')
parser.add_argument('--output_dir',type=str, default='/home/wangkh/Downloads/got-10k/crop_train_data', help='got_10k output dir')
# parser.add_argument('--data_dir',type=str, default='/home/wangkh/o/train', help='got_10k data dir')
# parser.add_argument('--output_dir',type=str, default='/home/wangkh/o/crop_train', help='got_10k output dir')
parser.add_argument('--num_processings',type=int, default=None)
arg = parser.parse_args()
datapath = arg.data_dir
outputpath = arg.output_dir
num_processings = arg.num_processings
cfg = config
def _init_video(video):
frame_name_list = glob.glob(os.path.join(datapath, video)+'/**.jpg')
frame_name_list.sort()
try:
gt_file = os.path.join(datapath, video, 'groundtruth.txt')
try:
gt = np.loadtxt(gt_file, dtype=float, delimiter=',')
except:
gt = np.loadtxt(gt_file, dtype=float)
except:
gt_file = os.path.join(datapath, video, 'groundtruth_rect.txt')
try:
gt = np.loadtxt(gt_file, dtype=float, delimiter=',')
except:
gt = np.loadtxt(gt_file, dtype=float)
n_frames = len(frame_name_list)
assert n_frames == len(gt), 'Number of frames and number of GT lines should be equal.'
return gt, frame_name_list, n_frames
def get_center(x):
return (x - 1.) / 2.
def xyxy2cxcywh(bbox):
return get_center(bbox[0]+bbox[2]), \
get_center(bbox[1]+bbox[3]), \
(bbox[2]-bbox[0]), \
(bbox[3]-bbox[1])
def crop_and_pad(img, cx, cy, model_sz, original_sz, img_mean=None):
xmin = cx - original_sz // 2
xmax = cx + original_sz // 2
ymin = cy - original_sz // 2
ymax = cy + original_sz // 2
im_h, im_w, _ = img.shape
left = right = top = bottom = 0
if xmin < 0:
left = int(abs(xmin))
if xmax > im_w:
right = int(xmax - im_w)
if ymin < 0:
top = int(abs(ymin))
if ymax > im_h:
bottom = int(ymax - im_h)
xmin = int(max(0, xmin))
xmax = int(min(im_w, xmax))
ymin = int(max(0, ymin))
ymax = int(min(im_h, ymax))
im_patch = img[ymin:ymax, xmin:xmax]
if left != 0 or right !=0 or top!=0 or bottom!=0:
if img_mean is None:
img_mean = tuple(map(int, img.mean(axis=(0, 1))))
im_patch = cv2.copyMakeBorder(im_patch, top, bottom, left, right,
cv2.BORDER_CONSTANT, value=img_mean)
if model_sz != original_sz:
im_patch = cv2.resize(im_patch, (model_sz, model_sz))
return im_patch
def get_instance_image(img, bbox, size_z, size_x, context_amount, img_mean=None):
cx, cy, w, h = xyxy2cxcywh(bbox)
wc_z = w + context_amount * (w+h)
hc_z = h + context_amount * (w+h)
s_z = np.sqrt(wc_z * hc_z)
s_x = s_z * (size_x / size_z)
scale_x = size_x / s_x # = size_x * size_z / ( s_z * size_x)= size_z / s_z = scale_z
instance_img = crop_and_pad(img, cx, cy, size_x, s_x, img_mean)
w_x = w * scale_x
h_x = h * scale_x
return instance_img, w_x, h_x
def crop_video(video):
gt, frame_name_list, n_frames = _init_video(video)
savepath = os.path.join(outputpath, video)
if not os.path.exists(savepath):
os.mkdir(savepath)
counter = 0
for i in range(n_frames):
img = cv2.imread(frame_name_list[i])
img_mean = tuple(map(int, img.mean(axis=(0, 1))))
bbox = gt[i]
if bbox[2]<=1 or bbox[3]<=1:
counter = counter + 1
continue
if np.sqrt((bbox[2] + 0.5 * (bbox[2]+bbox[3])) * (bbox[3] + 0.5 * (bbox[2]+bbox[3]))) > img.shape[0]*img.shape[1]:
counter = counter + 1
continue
if not max(bbox[2]/bbox[3], bbox[3]/bbox[2]) <= 10:
counter = counter + 1
continue
assert bbox[2]>0 and bbox[3]>0
bbox = [int(bbox[0]), int(bbox[1]), int(bbox[0]+bbox[2]), int(bbox[1]+bbox[3])]
instance_img, w_x, h_x = get_instance_image(img, bbox,
cfg.exemplar_size, cfg.instance_size, cfg.context_amount,
img_mean)
cv2.imwrite(savepath+"/{:0>8d}.x={:.2f}={:.2f}=.jpg".format(i+1-counter, w_x, h_x), instance_img)
def main():
videos = os.listdir(datapath)
videos.sort()
# crop_video(videos[0])
with futures.ProcessPoolExecutor(max_workers=None) as executor:
fs = [executor.submit(crop_video, video) for video in videos]
bar = tqdm(futures.as_completed(fs), total=len(videos))
for f in bar:
bar.set_description('Loading:')
#or in this way
# results = executor.map(crop_video,videos)
if __name__ == '__main__':
main()
|
[
"15058715723@163.com"
] |
15058715723@163.com
|
cd97fc37637ebbea191dfde0b5de158f4d957ec8
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractNorthbladetlBlogspotCom.py
|
6ae6636f041f4c6171da2a37228f9012d3e74405
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 562
|
py
|
def extractNorthbladetlBlogspotCom(item):
'''
Parser for 'northbladetl.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
aeda73e4de7393ca198519384998e625a5a63d26
|
6f33381dcb19a042d916b4a452f9cb7438729798
|
/jabba/graphs/legend.py
|
6767d1bc8f8140580a6220c89e4327bd31cd22ab
|
[
"MIT"
] |
permissive
|
puppetlabs/jabba
|
8308adf5be9ba25efb414f384bf3568854be55e2
|
71c1d008ab497020fba6ffa12a600721eb3f5ef7
|
refs/heads/master
| 2023-06-13T09:17:49.274408
| 2017-06-30T11:02:27
| 2017-06-30T11:02:27
| 185,443,592
| 0
| 1
| null | 2019-05-07T16:54:03
| 2019-05-07T16:54:02
| null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
import graphviz as gv
class Legend(object):
"""
GraphViz graph for rendering legend
"""
def __init__(self):
# Legend is presented as map (name -> settings)
self.items = {}
def add_item(self, name, settings):
self.items[name] = settings
def render(self):
legend = gv.Digraph('cluster_legend')
legend.body.extend(['label="Legend"'])
for name, settings in self.items.items():
legend.node("{}-1".format(name), label="")
legend.node("{}-2".format(name), label="")
# format label so it doesn't overlap with edge
label = " {}".format(name)
legend.edge("{}-1".format(name), "{}-2".format(name), label=label, **settings)
legend_wrapper = gv.Digraph('cluster_legend_wrapper')
legend_wrapper.subgraph(legend)
legend_wrapper.body.extend(['style=invis'])
return legend_wrapper
|
[
"rebovykin@gmail.com"
] |
rebovykin@gmail.com
|
c73062c7a81aad5fa911ce01f8de736e1bb69024
|
7fc2977b09fa8fc7c26a5efc9788a6170b6ca7e5
|
/Stylying_Example.py
|
4022f2aa5a6c6defee1c144fae5c49ba9ba9fb74
|
[] |
no_license
|
fes7713/Qt_Template
|
d701996991b4ee7e59054f92e72628eaa307a541
|
bb0f602ce77a171673be824a9b7d12a0f70eb01b
|
refs/heads/main
| 2023-06-25T20:28:07.130729
| 2021-07-23T07:18:26
| 2021-07-23T07:18:26
| 388,712,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,518
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Stylying_Example.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
# APP Imports
import sys
import os
import platform
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import (QCoreApplication, QPropertyAnimation, QDate, QDateTime, QMetaObject, QObject, QPoint, QRect,
QSize, QTime, QTimer, QUrl, Qt, QEvent)
from PyQt5.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QIcon, QKeySequence,
QLinearGradient, QPalette, QPainter, QPixmap, QRadialGradient)
from PyQt5.QtWidgets import *
class MainWindow(QMainWindow):
def __init__(self):
super.__init__(self)
# MainWindow.setObjectName("MainWindow")
self.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.main_frame = QtWidgets.QFrame(self.centralwidget)
self.main_frame.setStyleSheet("background-color: rgb(60, 60, 60);\n"
"border-radius : 20px")
self.main_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.main_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.main_frame.setObjectName("main_frame")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.main_frame)
self.verticalLayout_2.setContentsMargins(50, 64, 70, 50)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.frame_8 = QtWidgets.QFrame(self.main_frame)
self.frame_8.setMaximumSize(QtCore.QSize(16777215, 120))
self.frame_8.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_8.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_8.setObjectName("frame_8")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame_8)
self.verticalLayout_3.setContentsMargins(19, 0, 9, 14)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.result_frame = QtWidgets.QFrame(self.frame_8)
self.result_frame.setMaximumSize(QtCore.QSize(16777215, 70))
self.result_frame.setStyleSheet("background-color: transparent;")
self.result_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.result_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.result_frame.setObjectName("result_frame")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.result_frame)
self.horizontalLayout_2.setContentsMargins(94, 20, 57, 5)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.display_line = QtWidgets.QLineEdit(self.result_frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.display_line.sizePolicy().hasHeightForWidth())
self.display_line.setSizePolicy(sizePolicy)
self.display_line.setMinimumSize(QtCore.QSize(0, 40))
self.display_line.setStyleSheet("QLineEdit {\n"
" border: 2px solid rgb(180, 180, 180);\n"
" border-radius: 10px;\n"
" padding: 0 8px;\n"
" background-color: transparent;\n"
" color: rgb(255, 255, 255);\n"
" font: bold 15px;\n"
"}")
self.display_line.setText("")
self.display_line.setReadOnly(True)
self.display_line.setObjectName("display_line")
self.horizontalLayout_2.addWidget(self.display_line)
self.button_ok = QtWidgets.QPushButton(self.result_frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.button_ok.sizePolicy().hasHeightForWidth())
self.button_ok.setSizePolicy(sizePolicy)
self.button_ok.setMinimumSize(QtCore.QSize(50, 50))
self.button_ok.setMaximumSize(QtCore.QSize(50, 70))
self.button_ok.setStyleSheet("QPushButton {\n"
" background-color: transparent;\n"
" color: rgb(255, 255, 255);\n"
" font: bold 15px;\n"
" padding: 6px;\n"
"}\n"
"\n"
"\n"
"QPushButton:pressed {\n"
" color: rgb(180, 180, 180);\n"
"}")
self.button_ok.setObjectName("button_ok")
self.horizontalLayout_2.addWidget(self.button_ok)
self.verticalLayout_3.addWidget(self.result_frame)
self.verticalLayout_2.addWidget(self.frame_8)
self.keypad_frame = QtWidgets.QFrame(self.main_frame)
self.keypad_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.keypad_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.keypad_frame.setObjectName("keypad_frame")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.keypad_frame)
self.verticalLayout_6.setContentsMargins(30, -1, 30, 10)
self.verticalLayout_6.setSpacing(0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.frame_123 = QtWidgets.QFrame(self.keypad_frame)
self.frame_123.setStyleSheet("background-color: transparent;")
self.frame_123.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_123.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_123.setObjectName("frame_123")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.frame_123)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.button_1 = QtWidgets.QPushButton(self.frame_123)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.button_1.sizePolicy().hasHeightForWidth())
self.button_1.setSizePolicy(sizePolicy)
self.button_1.setMinimumSize(QtCore.QSize(80, 80))
self.button_1.setMaximumSize(QtCore.QSize(80, 80))
self.button_1.setStyleSheet("QPushButton {\n"
" background-color: transparent;\n"
" color: rgb(255, 255, 255);\n"
" border-style: solid;\n"
" border-width: 2px;\n"
" border-radius: 40px;\n"
" border-color: rgb(180, 180, 180);\n"
" font: 35px;\n"
" padding: 6px;\n"
"}\n"
"\n"
"\n"
"QPushButton:hover{\n"
" background-color:rgb(85, 85, 85);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(180, 180, 180);\n"
" border-color: rgb(180, 180, 180);\n"
"}")
self.button_1.setObjectName("button_1")
self.horizontalLayout_3.addWidget(self.button_1)
self.button_2 = QtWidgets.QPushButton(self.frame_123)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.button_2.sizePolicy().hasHeightForWidth())
self.button_2.setSizePolicy(sizePolicy)
self.button_2.setMinimumSize(QtCore.QSize(80, 80))
self.button_2.setMaximumSize(QtCore.QSize(80, 80))
self.button_2.setStyleSheet("QPushButton {\n"
" background-color: transparent;\n"
" color: rgb(255, 255, 255);\n"
" border-style: solid;\n"
" border-width: 2px;\n"
" border-radius: 40px;\n"
" border-color: rgb(180, 180, 180);\n"
" font: 35px;\n"
" padding: 6px;\n"
"}\n"
"\n"
"\n"
"QPushButton:hover{\n"
" background-color:rgb(85, 85, 85);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(180, 180, 180);\n"
" border-color: rgb(180, 180, 180);\n"
"}")
self.button_2.setObjectName("button_2")
self.horizontalLayout_3.addWidget(self.button_2)
self.button_3 = QtWidgets.QPushButton(self.frame_123)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.button_3.sizePolicy().hasHeightForWidth())
self.button_3.setSizePolicy(sizePolicy)
self.button_3.setMinimumSize(QtCore.QSize(80, 80))
self.button_3.setMaximumSize(QtCore.QSize(80, 80))
font = QtGui.QFont()
font.setPointSize(-1)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.button_3.setFont(font)
self.button_3.setStyleSheet("QPushButton {\n"
" background-color: transparent;\n"
" color: rgb(255, 255, 255);\n"
" border-style: solid;\n"
" border-width: 2px;\n"
" border-radius: 40px;\n"
" border-color: rgb(180, 180, 180);\n"
" font: 35px;\n"
" padding: 6px;\n"
"}\n"
"\n"
"\n"
"QPushButton:hover{\n"
" background-color:rgb(85, 85, 85);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(180, 180, 180);\n"
" border-color: rgb(180, 180, 180);\n"
"}")
self.button_3.setObjectName("button_3")
self.horizontalLayout_3.addWidget(self.button_3)
self.verticalLayout_6.addWidget(self.frame_123)
self.verticalLayout_2.addWidget(self.keypad_frame)
self.verticalLayout.addWidget(self.main_frame)
self.title_bar = QtWidgets.QFrame(self.centralwidget)
self.title_bar.setMaximumSize(QtCore.QSize(16777215, 50))
self.title_bar.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(4, 160, 255, 255), stop:1 rgb(1, 212, 255));\n"
"border-color: none;")
self.title_bar.setFrameShape(QtWidgets.QFrame.NoFrame)
self.title_bar.setFrameShadow(QtWidgets.QFrame.Raised)
self.title_bar.setObjectName("title_bar")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.title_bar)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.title_container_2 = QtWidgets.QFrame(self.title_bar)
self.title_container_2.setStyleSheet("background:transparent")
self.title_container_2.setFrameShape(QtWidgets.QFrame.NoFrame)
self.title_container_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.title_container_2.setObjectName("title_container_2")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.title_container_2)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_2 = QtWidgets.QLabel(self.title_container_2)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setStyleSheet("color: rgb(56, 56, 56);")
self.label_2.setObjectName("label_2")
self.horizontalLayout_7.addWidget(self.label_2)
self.horizontalLayout_4.addWidget(self.title_container_2)
self.top_right_btns = QtWidgets.QFrame(self.title_bar)
self.top_right_btns.setMaximumSize(QtCore.QSize(200, 16777215))
self.top_right_btns.setStyleSheet("background:transparent")
self.top_right_btns.setFrameShape(QtWidgets.QFrame.NoFrame)
self.top_right_btns.setFrameShadow(QtWidgets.QFrame.Raised)
self.top_right_btns.setObjectName("top_right_btns")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.top_right_btns)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.restoreButton = QtWidgets.QPushButton(self.top_right_btns)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.restoreButton.sizePolicy().hasHeightForWidth())
self.restoreButton.setSizePolicy(sizePolicy)
self.restoreButton.setStyleSheet("QPushButton{\n"
" background: none;\n"
" border-radius: 5px;\n"
"}\n"
"QPushButton:hover{\n"
" background-color:rgb(255, 105, 105);\n"
" border-radius: 5px;\n"
"}")
self.restoreButton.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/Binary_clock_assets/fullscreen.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.restoreButton.setIcon(icon)
self.restoreButton.setIconSize(QtCore.QSize(32, 32))
self.restoreButton.setObjectName("restoreButton")
self.horizontalLayout_5.addWidget(self.restoreButton)
self.minimizeButton = QtWidgets.QPushButton(self.top_right_btns)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.minimizeButton.sizePolicy().hasHeightForWidth())
self.minimizeButton.setSizePolicy(sizePolicy)
self.minimizeButton.setStyleSheet("QPushButton{\n"
" background: none;\n"
" border-radius: 5px;\n"
"}\n"
"QPushButton:hover{\n"
" background-color:rgb(170, 255, 127);\n"
" border-radius: 5px;\n"
"}")
self.minimizeButton.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/Binary_clock_assets/minimize.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.minimizeButton.setIcon(icon1)
self.minimizeButton.setIconSize(QtCore.QSize(32, 32))
self.minimizeButton.setObjectName("minimizeButton")
self.horizontalLayout_5.addWidget(self.minimizeButton)
self.closeButton = QtWidgets.QPushButton(self.top_right_btns)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.closeButton.sizePolicy().hasHeightForWidth())
self.closeButton.setSizePolicy(sizePolicy)
self.closeButton.setStyleSheet("QPushButton{\n"
" background: none;\n"
" border-radius: 5px;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: rgb(0, 92, 157);\n"
" border-radius: 5px;\n"
"}")
self.closeButton.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/Binary_clock_assets/X.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.closeButton.setIcon(icon2)
self.closeButton.setIconSize(QtCore.QSize(32, 32))
self.closeButton.setObjectName("closeButton")
self.horizontalLayout_5.addWidget(self.closeButton)
self.horizontalLayout_4.addWidget(self.top_right_btns)
self.verticalLayout.addWidget(self.title_bar)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setStyleSheet("QPushButton::!pressed\n"
" {\n"
" font: 75 10pt \"Microsoft YaHei UI\";\n"
" font-weight: bold;\n"
" color: rgb(255, 255, 255);\n"
" border-style: solid;\n"
" border-radius:21px;\n"
" background:gray;\n"
" }\n"
" QPushButton::pressed\n"
" {\n"
" font: 75 10pt \"Microsoft YaHei UI\";\n"
" font-weight: bold;\n"
" color: rgb(255, 255, 255);\n"
" border-style: solid;\n"
" border-radius:21px;\n"
" background-color : red;\n"
" }")
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton)
self.horizontalScrollBar = QtWidgets.QScrollBar(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.horizontalScrollBar.sizePolicy().hasHeightForWidth())
self.horizontalScrollBar.setSizePolicy(sizePolicy)
self.horizontalScrollBar.setMaximumSize(QtCore.QSize(16777215, 50))
self.horizontalScrollBar.setStyleSheet("QScrollBar:horizontal {\n"
" border: 2px solid grey;\n"
" background: #32CC99;\n"
" height: 15px;\n"
" margin: 0px 20px 0 20px;\n"
"}\n"
"QScrollBar::handle:horizontal {\n"
" background: white;\n"
" min-width: 20px;\n"
"}\n"
"QScrollBar::add-line:horizontal {\n"
" border: 2px solid grey;\n"
" background: #32CC99;\n"
" width: 20px;\n"
" subcontrol-position: right;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::sub-line:horizontal {\n"
" border: 2px solid grey;\n"
" background: #32CC99;\n"
" width: 20px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar:left-arrow:horizontal, QScrollBar::right-arrow:horizontal {\n"
" border: 2px solid grey;\n"
" width: 3px;\n"
" height: 3px;\n"
" background: white;\n"
"}\n"
"\n"
"QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {\n"
" background: none;\n"
"}")
self.horizontalScrollBar.setOrientation(QtCore.Qt.Horizontal)
self.horizontalScrollBar.setObjectName("horizontalScrollBar")
self.verticalLayout.addWidget(self.horizontalScrollBar)
self.horizontalSlider = QtWidgets.QSlider(self.centralwidget)
self.horizontalSlider.setStyleSheet("QSlider::groove:horizontal {\n"
" background: red;\n"
" position: absolute; /* absolutely position 4px from the left and right of the widget. setting margins on the widget should work too... */\n"
" left: 4px; right: 4px;\n"
"}\n"
"\n"
"QSlider::handle:horizontal {\n"
" height: 10px;\n"
" background: green;\n"
" margin: 0 -4px; /* expand outside the groove */\n"
"}\n"
"\n"
"QSlider::add-page:horizontal {\n"
" background: white;\n"
"}\n"
"\n"
"QSlider::sub-page:horizontal {\n"
" background: pink;\n"
"}")
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider.setObjectName("horizontalSlider")
self.verticalLayout.addWidget(self.horizontalSlider)
self.retranslateUi()
QtCore.QMetaObject.connectSlotsByName()
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.button_ok.setText(_translate("MainWindow", "OK"))
self.button_1.setText(_translate("MainWindow", "1"))
self.button_2.setText(_translate("MainWindow", "2"))
self.button_3.setText(_translate("MainWindow", "3"))
self.label_2.setText(_translate("MainWindow", "Binary Clock"))
self.pushButton.setText(_translate("MainWindow", "PushButton"))
# import binary_clock_icons_rc
# Execute app
#
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
else:
print(__name__, "hh")
|
[
"tun05036@temple.edu"
] |
tun05036@temple.edu
|
15bafbcebb7a5ee33f644bfcac1ff16f634e54b6
|
c4ee7adb7113c855b54c4c4b752d6e82758a735d
|
/ecommerce/ecommerce/settings.py
|
664987da3f61cb452fe63d12f80ef5d52d5772a7
|
[] |
no_license
|
devblocs/django-snippets
|
c000f73bb9bbe0bb638b2bf0574466de6cfe1bec
|
a2c06c64cc13b6459e2a7506dca81544d6e756ad
|
refs/heads/master
| 2022-12-17T01:34:32.598922
| 2020-09-14T16:51:46
| 2020-09-14T16:51:46
| 295,435,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,080
|
py
|
"""
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ixv8z7yaw6we9znx=oiehc)xy&85we4s@+y6og)au-@6_sak7x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"venkatesh.nadar@neosofttech.com"
] |
venkatesh.nadar@neosofttech.com
|
ec47bc066bc69f6cf12e1ef76fe29f8be677394c
|
5667cc877342204b7d54b6c3cc5a9f4854f08829
|
/.history/apppersona/views_20201101174230.py
|
3ca271ad58977d9585b9c4096dc875f160abb1d5
|
[] |
no_license
|
Nyckhos/TestCommit
|
d62e3f6fefb04ab5647475cc7ead0d72cbd89efa
|
9aa8e2e35280b7862960cc8a864e9c02ac7f4796
|
refs/heads/main
| 2023-01-05T05:57:59.223641
| 2020-11-02T02:08:18
| 2020-11-02T02:08:18
| 309,237,224
| 2
| 0
| null | 2020-11-02T02:30:43
| 2020-11-02T02:30:43
| null |
UTF-8
|
Python
| false
| false
| 3,787
|
py
|
from django.http import request
from django.shortcuts import redirect, render
from django.http import HttpResponse
from .models import *
from .forms import *
from django.contrib.auth.models import User
from django.contrib.auth import *
from django.urls import reverse
from django.contrib.auth import login
from django.contrib.auth.decorators import *
from django.contrib.admin.views.decorators import *
from django.shortcuts import render, redirect
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.db.models.query_utils import Q
from django.utils.http import urlsafe_base64_encode
from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_bytes
# Create your views here.
#@login_required
#def index(request):
# return render(request,'appersona/index.html')
def lista_personas(request):
lista = User.objects.all() # Todas las personas
return render(request, 'apppersona/lista_personas.html', {'lista': lista})
def lista_tarjetas(request):
tarjetas = TarjetaJunaeb.objects.all()
return render(request, 'apppersona/lista_tarjetas.html', {'listaTarjetas': tarjetas})
def tarjetas_con_plata(request):
tarjetas = TarjetaJunaeb.objects.filter(montoDisponible__gte=1)
return render(request, 'apppersona/lista_tarjetas.html', {'listaTarjetas': tarjetas})
def index(request):
return render(request, 'apppersona/index.html')
def contacto(request):
return render(request, 'apppersona/contacto.html')
def nosotros(request):
return render(request, 'apppersona/nosotros.html')
def register(request):
if request.method == "POST":
form = ExtendedUserCreationForm(request.POST)
profile_form = FormularioPersona(request.POST)
if form.is_valid() and profile_form.is_valid():
user = form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=password)
login(request, user)
return redirect('index')
else:
form = ExtendedUserCreationForm()
profile_form = FormularioPersona()
context = {'form': form, 'profile_form': profile_form}
return render(request, "apppersona/registro.html", context)
def password_reset_request(request):
if request.method == "POST":
password_reset_form = PasswordResetForm(request.POST)
if password_reset_form.is_valid():
data = password_reset_form.cleaned_data['email']
associated_users = User.objects.filter(Q(email=data))
if associated_users.exists():
for user in associated_users:
subject = "Password Reset Requested"
email_template_name = "main/password/password_reset_email.txt"
c = {
"email":user.email,
'domain':'127.0.0.1:8000',
'site_name': 'Website',
"uid": urlsafe_base64_encode(force_bytes(user.pk)),
"user": user,
'token': default_token_generator.make_token(user),
'protocol': 'http',
}
email = render_to_string(email_template_name, c)
try:
send_mail(subject, email, 'admin@example.com' , [user.email], fail_silently=False)
except BadHeaderError:
return HttpResponse('Invalid header found.')
return redirect ("/password_reset/done/")
password_reset_form = PasswordResetForm()
return render(request=request, template_name="main/password/password_reset.html", context={"password_reset_form":password_reset_form})
|
[
"fernandox_240997@live.com"
] |
fernandox_240997@live.com
|
90254a2e8ba7c81e196fd637cbd4598c1fdaa717
|
3e1b5d7cb529be1529ae45fa062a423f8328d6d2
|
/Edgar-new-codes/Getting-10-K- and-10-Q doc links.py
|
54aab244b45e5b116a35eee0099a5ad3de1aba53
|
[] |
no_license
|
abhigupta4/Finrsch
|
260687b3b53d3c94a03dc2b9e640952718033486
|
ca0b7f1631fbbe109b81403b9ffc36c67c759d23
|
refs/heads/master
| 2021-01-19T10:54:19.705213
| 2017-07-06T12:22:37
| 2017-07-06T12:22:37
| 87,913,197
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
import requests
from bs4 import BeautifulSoup
import urlparse
def get_file(cur,val):
base = 'https://www.sec.gov'
r = requests.get(cur)
document = BeautifulSoup(r.content,"lxml")
links = document.find_all('a')
for link in links:
if 'Archives' in link.get("href"):
print 'Document link'
print base+link.get("href")
break
def take_second_link(cur,cik,val):
begin = 'https://www.sec.gov'
r = requests.get(cur)
document = BeautifulSoup(r.content,"lxml")
links = document.find_all('a')
for link in links:
temp = link.get("href")
if 'index' in temp and 'headers' not in temp and cik in temp:
print
print 'Company link'
if val:
print "Type 10-K"
else:
print "Type 10-Q"
print begin+temp
get_file(begin + temp,val)
def find_link1(entire,val):
begin = 'https://www.sec.gov/Archives/edgar/data/'
for part in entire:
if 'data' in part:
temp = part.split('/')
last = ''
for ele in temp[-1]:
if ele.isdigit():
last += ele
new = begin + temp[-2] + '/' + last
take_second_link(new,temp[-2],val)
def inside_index(link1):
r = requests.get(main_link+link1)
document = BeautifulSoup(r.content,"lxml")
soup = document.get_text()
lines = soup.split("\n")
flag = 1
for line in lines:
temp = line.split(" ")
for i in xrange(len(temp)):
if temp[i] == '10-Q' and temp[i-1] == '' and temp[i+1] == '':
find_link1(temp,0)
break
if temp[i] == '10-K' and temp[i-1] == '' and temp[i+1] == '':
find_link1(temp,1)
break
main_link = 'https://www.sec.gov/Archives/edgar/daily-index/2017/QTR2/'
r = requests.get(main_link)
document = BeautifulSoup(r.content,"lxml")
links = document.find_all('a')
for link in links:
if 'company' in link.get("href") and '.idx' in link.get("href"):
inside_index(link.get("href"))
# break
|
[
"abhinavgupta6245@gmail.com"
] |
abhinavgupta6245@gmail.com
|
97c689ed7738f6c1cdd66ac8e2e6f724392842c8
|
99ee9124c12c889821f407fe5f29316ea276131d
|
/1.py
|
b76abe519ef86ead1537c66e88e3bea758ae3c83
|
[] |
no_license
|
17818909875/pythontianqi
|
3b7a17cd3d9e7be0b115549d246221e7479ee646
|
7c20fde9ec6dd2cf9dd9ab3a4e5ca46a14ac114a
|
refs/heads/master
| 2020-06-01T12:16:53.326667
| 2019-06-07T16:58:28
| 2019-06-07T16:58:28
| 190,777,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
import requests
r=requests.get('http://www.baidu.com',params=None)#与post相同去爬取信息,但是正则好难,这里构造请求request,None可以是字典或则字节流,r此时是response对象
r.encoding='utf-8'#可以用r.apprent_encoding时猜测的编码方式,如果header不存在charset,就不能解析中文了,如百度,注意header不是head,所以百度没有
print(r.text)#字符串形式信息
#r.content,http响应内容的二进制形式,如图片就是这种形式
|
[
"2534907211@qq.com"
] |
2534907211@qq.com
|
cdae847f9f3fce2eddb4d6dd44e2910e92a82277
|
f222202da3ad7d9f897608363c4f6ad18ccb6238
|
/srm685/multiplication_table2_easy.py
|
37383f97b427629bf3d3b8b2f4eb57e61388afac
|
[] |
no_license
|
antontomusiak/TopCoder
|
fbc510b53bd2f201443f3846a7143db752116094
|
124d8426c9faeedc787ce0fa9643a6e80828126e
|
refs/heads/master
| 2020-08-22T11:31:23.436037
| 2019-10-20T16:08:42
| 2019-10-20T16:08:42
| 216,384,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
from math import sqrt
class MultiplicationTable2Easy:
def isGoodSet(self, table, t):
n = int(sqrt(len(table)))
for i in t:
for j in t:
if table[i * n + j] not in t: return 'Not Good'
return 'Good'
|
[
"noreply@github.com"
] |
antontomusiak.noreply@github.com
|
752c22f97ab80a626fa28eba09160396d731d400
|
a93977614af9f4461a8f39b66b37456c7de10b92
|
/jws_beta/logon.py
|
4e2fcecb1ab4005f4d93dc571cb99235a133dd34
|
[] |
no_license
|
gikieng/jws
|
90039d8fc7cbc551bde5612cb2ec2ff9c3453618
|
81ec2f6e632390f95d8e6b36aeaae3104cf6de6a
|
refs/heads/master
| 2020-05-17T12:14:02.642195
| 2014-04-22T23:21:23
| 2014-04-22T23:21:23
| 18,941,316
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,005
|
py
|
# -*- coding:utf-8 -*-
import gtk, urllib2, urllib,sys, re
class LoginPage(gtk.Window):
url = 'http://jw.hrbeu.edu.cn/ACTIONLOGON.APPPROCESS?mode=4'
def __init__(self, opener, headers):
super(LoginPage, self).__init__()
self.set_title("哈工程学生成绩查询系统")
self.set_size_request(420, 270)
self.set_resizable(False)
self.set_position(gtk.WIN_POS_CENTER)
#设置连接信息
self.opener = opener
self.headers = headers
#设置窗口布局
self.fixed = gtk.Fixed()
self.pw_text = ""
self.user_text = ""
self.agonm_text = ''
self.user_entry = gtk.Entry()
self.user_label = gtk.Label("学号")
self.pword_entry = gtk.Entry()
self.pword_entry.set_visibility(False)
self.pword_label = gtk.Label("密码")
self.agnom_label = gtk.Label("验证码")
self.response_label = gtk.Label("欢迎使用!")
self.agnom_image = gtk.Image()
self.agnom_image.set_size_request(60, 30)
self.agnom_btn = gtk.Button()
self.agnom_btn.set_size_request(64, 34)
self.agnom_entry = gtk.Entry()
self.agnom_entry.set_size_request(60, 30)
self.login_button = gtk.Button("登录")
self.login_button.set_border_width(0)
self.login_button.set_size_request(80, 50)
self.agnom_btn.add(self.agnom_image)
self.fixed.put(self.login_button, 300, 100)
self.fixed.put(self.user_label, 60, 40)
self.fixed.put(self.user_entry, 120, 40)
self.fixed.put(self.pword_label, 60, 82)
self.fixed.put(self.pword_entry, 120, 82)
self.fixed.put(self.agnom_label, 54, 124)
self.fixed.put(self.agnom_entry, 120, 124)
self.fixed.put(self.agnom_btn, 190, 120)
self.fixed.put(self.response_label, 160, 180)
#加载验证码
self.changeImage('')
#绑定窗口事件
self.agnom_btn.connect("clicked", self.changeImage)
self.user_entry.connect("key_release_event", self.user_gettext)
self.pword_entry.connect("key_release_event", self.pass_gettext)
self.agnom_entry.connect("key_release_event", self.agnom_gettext)
self.login_button.connect("clicked", self.login)
self.connect("destroy", gtk.main_quit)
self.add(self.fixed)
self.show_all()
gtk.main()
#登录
def login(self,event):
data={'submit.x': '0', 'WebUserNO':self.user_text, 'Agnomen': self.agnom_text,'Password': self.pw_text, 'submit.y': '0'}
request = urllib2.Request(url = self.url, data = urllib.urlencode(data),headers = self.headers)
response = self.opener.open(request)
self.getResponseInfo(response.read())
#事件
def user_gettext(self, widget, event):
self.user_text = widget.get_text()
def agnom_gettext(self, widget, event):
self.agnom_text = widget.get_text()
def pass_gettext(self, widget, event):
self.pw_text = widget.get_text()
#改变验证码
def changeImage(self, event):
path = r"./tmp/agnomen.jpg"
f=file(path,'wb')
imageurl = 'http://jw.hrbeu.edu.cn/ACTIONVALIDATERANDOMPICTURE.APPPROCESS'
imrequest = urllib2.Request(url = imageurl,headers = self.headers)
f.write(self.opener.open(imrequest).read())
f.close()
self.agnom_image.set_from_file("./tmp/agnomen.jpg")
def getResponseInfo(self,strbuff):
agnomen = re.compile(r'\xc7\xeb\xca\xe4\xc8\xeb\xd5\xfd\xc8\xb7\xb5\xc4\xb8\xbd\xbc\xd3\xc2\xeb')
if agnomen.search(strbuff):
self.response_label.set_label("验证码错误!")
return
user = re.compile(r'\xb4\xed\xce\xf3\xb5\xc4\xd3\xc3\xbb\xa7\xc3\xfb\xbb\xf2\xd5\xdf\xc3\xdc\xc2\xeb\\n')
if user.search(strbuff):
self.response_label.set_label("用户名或密码错误!")
return
gtk.main_quit()
|
[
"gikieng@gmail.com"
] |
gikieng@gmail.com
|
5fc5736b6ff1ad4da80432c6975910ee3332d969
|
3c328704f965a7e7024e67ef672689f339c049ca
|
/features/daysSinceRegistration.py
|
39b93e352142a40add930e26fda44ec222606591
|
[] |
no_license
|
valkheim/url-classifier
|
1152cd54d418cec452a538b1b70371a89fb998cc
|
7f431b6fcc996c8c94062a5d6d5468815857c587
|
refs/heads/master
| 2020-05-01T18:06:21.593033
| 2019-04-01T08:35:49
| 2019-04-01T08:35:49
| 177,616,846
| 1
| 0
| null | 2020-01-25T14:57:42
| 2019-03-25T15:45:15
|
TeX
|
UTF-8
|
Python
| false
| false
| 640
|
py
|
#!/usr/bin/env python
import whois
import datetime
import numpy as np
def get(address):
if address is None:
return np.nan
try:
creation = whois.whois(address).creation_date
except BaseException: # non existent domain raises an exception
return np.nan
if creation is None:
return np.nan
today = datetime.date.today()
try:
if type(creation) == list:
diff = today - creation[-1].date()
else:
diff = today - creation.date()
except BaseException: #AttributeError for amazon.co.uk : before Aug-1996
return np.nan
return diff.days
|
[
"charles.paulet@epitech.eu"
] |
charles.paulet@epitech.eu
|
042eed307db1e9a2af3b5a50786088f3bb73b6df
|
e2e283294d6c9ed6708fef38f7627d0b27e85779
|
/pymixconsole/processors/panner.py
|
cf240c517fb00467ad3af3dc28678f6139198303
|
[
"MIT"
] |
permissive
|
JaeDukSeo/pymixconsole
|
3f8315b7c409cbf9d5d979e1dedf00dc39734b7c
|
49d86aeddfdd0b59c18830b02f63c98f721caf9d
|
refs/heads/master
| 2023-05-06T04:41:57.434512
| 2021-04-13T16:40:22
| 2021-04-13T16:40:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
from numba import jit, float64
import numpy as np
from ..parameter import Parameter
from ..processor import Processor
from ..parameter_list import ParameterList
@jit(nopython=True)
def n_process(data, L, R):
""" Apply panning gains based on chosen pan law.
Params
-------
data : ndarrary
Input audio data. (samples, channels)
currently only support max of 2 channels
Returns
-------
output_buffer : ndarray
Panned input audio. (samples, channels)
"""
if data.ndim < 2:
# apply the channel gains
output_buffer_L = L * data
output_buffer_R = R * data
else:
# apply the channel gains
output_buffer_L = L * data[:,0]
output_buffer_R = R * data[:,1]
return output_buffer_L, output_buffer_R
class Panner(Processor):
""" Simple stereo panner.
For a mono input this will produce a stereo buffer.
For a stereo input this will produce a stereo buffer.
Supported pan laws: ["linear", "constant_power", "-4.5dB"]
"""
def __init__(self, name="Panner", block_size=512, sample_rate=44100):
# default processor class constructor
super().__init__(name, None, block_size, sample_rate)
self.parameters = ParameterList()
self.parameters.add(Parameter("pan", 0.5, "float", processor=self, minimum=0.0, maximum=1.0))
#self.parameters.add(Parameter("outputs", 2, "int", processor=self, minimum=2, maximum=2))
self.parameters.add(Parameter("pan_law", "-4.5dB", "string", processor=self, options=["-4.5dB"]))
# setup the coefficents based on default params
self.update(None)
# buffer to hold
self._output_buffer = np.empty([self.block_size, 2])
def _calculate_pan_coefficents(self):
""" Based on the set pan law deteremine the gain value
to apply for the left and right channel to achieve panning effect.
This operates on the assumption that the input channel is mono.
The output data will be stereo at the moment, but could be expnanded
to a higher channel count format.
The panning value is in the range [0, 1], where
0 means the signal is panned completely to the left, and
1 means the signal is apanned copletely to the right.
"""
# first scale the linear [0, 1] to [0, pi/2]
theta = self.parameters.pan.value * (np.pi/2)
if self.parameters.pan_law.value == "linear":
self._L = ((np.pi/2) - theta) * (2/np.pi)
self._R = theta * (2/np.pi)
elif self.parameters.pan_law.value == "constant_power":
self._L = np.cos(theta)
self._R = np.sin(theta)
elif self.parameters.pan_law.value == "-4.5dB":
self._L = np.sqrt(((np.pi/2) - theta) * (2/np.pi) * np.cos(theta))
self._R = np.sqrt(theta * (2/np.pi) * np.sin(theta))
else:
raise ValueError(f"Invalid pan_law {self.parameters.pan_law.value}.")
def process(self, data):
L, R = n_process(data, self._L, self._R)
return np.stack((L, R), axis=1)
def update(self, parameter_name):
self._calculate_pan_coefficents()
@property
def block_size(self):
return self._block_size
@block_size.setter
def block_size(self, block_size):
self._block_size = block_size
self._output_buffer = np.empty([block_size, 2])
|
[
"csteinmetz1@gmail.com"
] |
csteinmetz1@gmail.com
|
c86eb97e7e33e0eb1daa5c7af3fdb94195670482
|
38b87c9bede63d0d26d3fb56e453d9f2b92e49ed
|
/theme/migrations/0061_auto_20160529_1935.py
|
2ef31fb0bab366a335497b43f82c082d9b73f281
|
[] |
no_license
|
webcommittee/cfwebsite
|
f1614bcc49b600918fd6c85e6cb7023c9702bc8d
|
d7bf23db8609655a8af53d9548a3ab02c1440a1b
|
refs/heads/master
| 2021-01-01T15:35:08.140308
| 2017-08-10T00:34:42
| 2017-08-10T00:34:42
| 97,653,307
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-29 23:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('theme', '0060_auto_20160529_1429'),
]
operations = [
migrations.RemoveField(
model_name='companyprofile',
name='public_phone_number',
),
migrations.AddField(
model_name='companyprofile',
name='phone_number',
field=models.CharField(blank=True, max_length=15),
),
migrations.AlterField(
model_name='studentprofile',
name='open_to_relocation',
field=models.BooleanField(default=False, help_text='Open to relocation?'),
),
]
|
[
"webcommittee.rpicareerfair@gmail.com"
] |
webcommittee.rpicareerfair@gmail.com
|
3b670b3ba9034ce06aecedf5d367f6512e9bc61b
|
ce002bac9a46694bbd0227b895da4ca02832311c
|
/tests/app_test.py
|
7297f123212e800aa77873ea51c6b0b8514f5366
|
[] |
no_license
|
Pranav143/ECE444-F2021-Lab6
|
787cb8f12da0679821585a5417ed5693ea8e5ef2
|
5e766fd36f9c3fdc7e0dadf34c3b28ec92e0be2d
|
refs/heads/main
| 2023-09-04T05:29:19.787720
| 2021-11-01T03:40:09
| 2021-11-01T03:40:09
| 423,250,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
import pytest
import os
from pathlib import Path
import json
from project.app import app, db
TEST_DB = "test.db"
@pytest.fixture
def client():
BASE_DIR = Path(__file__).resolve().parent.parent
app.config["TESTING"] = True
app.config["DATABASE"] = BASE_DIR.joinpath(TEST_DB)
app.config["SQLALCHEMY_DATABASE_URI"] = f"sqlite:///{BASE_DIR.joinpath(TEST_DB)}"
db.create_all() # setup
yield app.test_client() # tests run here
db.drop_all() # teardown
def login(client, username, password):
"""Login helper function"""
return client.post(
"/login",
data=dict(username=username, password=password),
follow_redirects=True,
)
def logout(client):
"""Logout helper function"""
return client.get("/logout", follow_redirects=True)
def test_index(client):
response = client.get("/", content_type="html/text")
assert response.status_code == 200
def test_database(client):
"""initial test. ensure that the database exists"""
tester = Path("test.db").is_file()
assert tester
def test_empty_db(client):
"""Ensure database is blank"""
rv = client.get("/")
assert b"No entries yet. Add some!" in rv.data
def test_login_logout(client):
"""Test login and logout using helper functions"""
rv = login(client, app.config["USERNAME"], app.config["PASSWORD"])
assert b"You were logged in" in rv.data
rv = logout(client)
assert b"You were logged out" in rv.data
rv = login(client, app.config["USERNAME"] + "x", app.config["PASSWORD"])
assert b"Invalid username" in rv.data
rv = login(client, app.config["USERNAME"], app.config["PASSWORD"] + "x")
assert b"Invalid password" in rv.data
def test_messages(client):
"""Ensure that user can post messages"""
login(client, app.config["USERNAME"], app.config["PASSWORD"])
rv = client.post(
"/add",
data=dict(title="<Hello>", text="<strong>HTML</strong> allowed here"),
follow_redirects=True,
)
assert b"No entries here so far" not in rv.data
assert b"<Hello>" in rv.data
assert b"<strong>HTML</strong> allowed here" in rv.data
def test_delete_message(client):
"""Ensure the messages are being deleted"""
rv = client.get("/delete/1")
data = json.loads(rv.data)
assert data["status"] == 0
login(client, app.config["USERNAME"], app.config["PASSWORD"])
rv = client.get("/delete/1")
data = json.loads(rv.data)
assert data["status"] == 1
|
[
"pranavagnihotri143@gmail.com"
] |
pranavagnihotri143@gmail.com
|
69fe2635469cacf0543c8bdc6588c35e1ff15509
|
aa1972e6978d5f983c48578bdf3b51e311cb4396
|
/nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/network/vrid6_interface_binding.py
|
514e8bd208b3024680ba6c2b0c5d4530d8b2a8a3
|
[
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MayankTahil/nitro-ide
|
3d7ddfd13ff6510d6709bdeaef37c187b9f22f38
|
50054929214a35a7bb19ed10c4905fffa37c3451
|
refs/heads/master
| 2020-12-03T02:27:03.672953
| 2017-07-05T18:09:09
| 2017-07-05T18:09:09
| 95,933,896
| 2
| 5
| null | 2017-07-05T16:51:29
| 2017-07-01T01:03:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,678
|
py
|
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vrid6_interface_binding(base_resource) :
""" Binding class showing the interface that can be bound to vrid6.
"""
def __init__(self) :
self._ifnum = None
self._vlan = None
self._flags = None
self._id = None
self.___count = 0
@property
def id(self) :
r"""Integer value that uniquely identifies a VMAC6 address.<br/>Minimum value = 1<br/>Maximum value = 255.
"""
try :
return self._id
except Exception as e:
raise e
@id.setter
def id(self, id) :
r"""Integer value that uniquely identifies a VMAC6 address.<br/>Minimum value = 1<br/>Maximum value = 255
"""
try :
self._id = id
except Exception as e:
raise e
@property
def ifnum(self) :
r"""Interfaces to bind to the VMAC6, specified in (slot/port) notation (for example, 1/2).Use spaces to separate multiple entries.
"""
try :
return self._ifnum
except Exception as e:
raise e
@ifnum.setter
def ifnum(self, ifnum) :
r"""Interfaces to bind to the VMAC6, specified in (slot/port) notation (for example, 1/2).Use spaces to separate multiple entries.
"""
try :
self._ifnum = ifnum
except Exception as e:
raise e
@property
def flags(self) :
r"""Flags.
"""
try :
return self._flags
except Exception as e:
raise e
@property
def vlan(self) :
r"""The VLAN in which this VRID resides.
"""
try :
return self._vlan
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vrid6_interface_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vrid6_interface_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.id is not None :
return str(self.id)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vrid6_interface_binding()
updateresource.id = resource.id
updateresource.ifnum = resource.ifnum
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vrid6_interface_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].id = resource[i].id
updateresources[i].ifnum = resource[i].ifnum
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vrid6_interface_binding()
deleteresource.id = resource.id
deleteresource.ifnum = resource.ifnum
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vrid6_interface_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].id = resource[i].id
deleteresources[i].ifnum = resource[i].ifnum
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, id="", option_="") :
r""" Use this API to fetch vrid6_interface_binding resources.
"""
try :
if not id :
obj = vrid6_interface_binding()
response = obj.get_resources(service, option_)
else :
obj = vrid6_interface_binding()
obj.id = id
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, id, filter_) :
r""" Use this API to fetch filtered set of vrid6_interface_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vrid6_interface_binding()
obj.id = id
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, id) :
r""" Use this API to count vrid6_interface_binding resources configued on NetScaler.
"""
try :
obj = vrid6_interface_binding()
obj.id = id
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, id, filter_) :
r""" Use this API to count the filtered set of vrid6_interface_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vrid6_interface_binding()
obj.id = id
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class vrid6_interface_binding_response(base_response) :
def __init__(self, length=1) :
self.vrid6_interface_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vrid6_interface_binding = [vrid6_interface_binding() for _ in range(length)]
|
[
"Mayank@Mandelbrot.local"
] |
Mayank@Mandelbrot.local
|
827e3bcf2fca26a7b7abc2fc74da531da077f856
|
f6078890ba792d5734d289d7a0b1d429d945a03a
|
/extra/oconnorcollin_24162_1340359_Collin_O'Connor_1607318_ExtraCredit_week7.py
|
768973e38dd5ce426badbd6677369e038d6aa08c
|
[] |
no_license
|
huazhige/EART119_Lab
|
1c3d0b986a0f59727ee4ce11ded1bc7a87f5b7c0
|
47931d6f6a2c7bc053cd15cef662eb2f2027712c
|
refs/heads/master
| 2020-05-04T23:40:53.709217
| 2019-06-11T18:30:45
| 2019-06-11T18:30:45
| 179,552,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,750
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 13 08:26:50 2019
@author: collin O'Connor
"""
import numpy as np
import integrate_utils as int_utils
#==============================================================================
#Question 1
#==============================================================================
"""
Numerical integration of difinite integrals:
ex: f(t) = 3t**2 * exp(t^3)
F(t) = exp(t^3)
between: a, b with F'(t) =f(t)
"""
#==============================================================================
#fn defs
#==============================================================================
def fct_f(t):
return 3*t**2 * np.exp(t**3)
def fct_F(t):
return np.exp(t**3)
###########integration fn##########
def trapezoidal(fct_x, x0, xn, N):
"""
composite trapezoidal method
impelmentation of eq 3.17 pg 60 in Linge & Langtangen
params:
fct_x = compute integral of the fn.
x0, xn = integration bounds
N = number of trapezoids
return:
value of definite integral of fct_x
between x0 and xn
"""
dx = float(xn-x0)/N
#wrtite sum as for loop
f_Integ = 0.5*(fct_x(x0) + fct_x(xn))
for i in range(1, N):
f_Integ += fct_x(x0 + i*dx)
## write sum in vectorized form
#f_Integ = 0.5*(fct_x(x0) + fct_x(xn)) + (fct_x(x0 + dx*np.arange(1, N, 1))).sum()
return dx*f_Integ
def midpoint( fct_x, x0, xn, N):
"""
Composite Midpoint method, eq. 3.21 page 66 in Linge & Langtangen
:param fct_x: - function whose integral is in question
:param x1: - integration bounds
:param x2: - integration bounds
:param N: - number of trapezoids, chose high number for high accuracy
:return: - integral of fct_x between x0 and xn
"""
dx = float( xn-x0)/N
a_xi = x0 + 0.5*dx + np.arange( N)*dx
f_int = dx*( fct_x(a_xi)).sum()
return f_int
#==============================================================================
#parameters
#==============================================================================
xmin, xmax = 0, 1
N = 1000
#==============================================================================
#num integration and plotting
#==============================================================================
#exact solution
f_IntExact = fct_F(xmax) - fct_F(xmin)
#Trapazoidal method numerical approximation
f_IntNum = trapezoidal(fct_f, xmin, xmax, N)
#Midpoint method numerical approximation
f_mid=midpoint(fct_f, xmin, xmax, N)
#compare exact and Numerical
print "Question 1:"
print 'exact integral: ', f_IntExact
print 'Trapazoidal Method Numerical approx.: ', f_IntNum
print 'Midpoint Method Numerical approx.: ', f_mid
print
#==============================================================================
#Question 2
#==============================================================================
"""Compute mean value of fns
and compare to the definite integral:
f(x)=sin(x)
g(x)=2x*exp(x**2)
"""
def fx(x):
return np.sin(x)
def gx(x):
return 2*x * np.exp(x**2)
def mean_val(integral_fx, xmax, xmin):
return (1/(xmax - xmin)) * integral_fx
print "Question 2:"
print 'mean value of f(x): ', round(mean_val(trapezoidal(fx,0, np.pi, 1000), np.pi, 0), 3)
print 'integral of f(x): ', round(trapezoidal(fx,0, np.pi, 1000), 3)
print 'mean value of g(x): ', round(mean_val(trapezoidal(gx, 0, 1, 1000), 1, 0), 3)
print 'Integral of g(x): ', round(trapezoidal(gx, 0, 1, 1000), 3)
print
#==============================================================================
#Question 3
#==============================================================================
#================================================
# fct definition
#================================================
def fct2_xy( x, y):
return (x**2 + y**2)**0.5
def fct_xy( x, y):
return x*(y**2)
def fct_gxy( x, y):
"""
- rectangular domain
return: -1 for points outside
"""
f_retVal = -1
if x >= xmin and x <= xmax and y >= ymin and y <= ymax:
f_retVal = 1
return f_retVal
def fct_Fxy_exact(x, y):
return (0.5*(x**2))+ ((1./3)*(y**3))
def fct_exact(r, theta):
return theta*((r**3)/3.)
#================================================
# parameters
#================================================
xmin, xmax = 0, 2
ymin, ymax = 0, 1.5
rmin, rmax = 0, 2
theta_min, theta_max = 0, 2*np.pi
#================================================
# compute integral
#================================================
#compute definite integral
print "Question 3:"
print ('exact solution part a: ', round(fct_exact(rmax, theta_max) - fct_exact(rmin, theta_min), 3))
print 'monte Carlo solution part a: '
for n in np.arange(100, 1200, 200):
gInt = int_utils.monteCarlo(fct2_xy, fct_gxy, rmin, rmax, theta_min, theta_max, n)
#in unt_utils the MonteCarlo method results was supposed to be squared, but they never were.
gInt = gInt**2
print 'no. random points', n, 'number integral', round(gInt, 3)
print
print('exac. sol part b: ', round(fct_Fxy_exact(xmax, ymax) - fct_Fxy_exact(xmin, ymin), 3))
print 'monte Carlo solution part b: '
for n in np.arange(100, 1200, 200):
fInt=int_utils.monteCarlo(fct_xy, fct_gxy, xmin+1, xmax+1, ymin, ymax, n )
#in unt_utils the MonteCarlo method results was supposed to be squared, but they never were.
fInt = (fInt**2)
print 'no. random points', n, 'number integral', round(fInt, 3)
|
[
"hge2@ucsc.edu"
] |
hge2@ucsc.edu
|
b1321be198118b1993a81931de6a882a23c86dbd
|
9f4b9df74d87dbb493661fb341c12779a3108873
|
/DownLoad_Files/CrawlNet.py
|
3ccf53fe66defceb6d7c1c46d6c77a8f5ab64982
|
[] |
no_license
|
yangfanjx/SearchEngine
|
8559f70bec067bd4217c1ef94fd03f19be7ea292
|
626194108b7e87602438cb6ced31748dd4287592
|
refs/heads/master
| 2022-07-31T13:42:54.683605
| 2020-05-26T13:43:03
| 2020-05-26T13:43:03
| 266,956,043
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40,442
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests # 导入requests包
from bs4 import BeautifulSoup
import csv
from SearchEngine.DownLoad_Files import Handle_Excel
from SearchEngine.DownLoad_Files.Handle_Word import Word_Table
import re
down_folder = "./download_file/"
# csv写入
class CSV_Class(object):
def __init__(self, file_path):
self.csvFile = open(file_path, "w", newline='') # 创建csv文件 , encoding='utf-8'
self.writer = csv.writer(self.csvFile) # 创建写的对象
def write_msg(self, title_list):
self.writer.writerow(title_list)
def stop_write(self):
self.csvFile.close()
# 爬网
class CrawlClass(object):
# Get形式获取网页内容
@staticmethod
def get_html(_url, use_type='GBK'):
# headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
# 'Accept-Encoding': 'gzip,deflate,br',
# 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
# 'Cache-Control': 'max-age=0',
# 'Connection': 'keep-alive',
# 'Cookie': 'JSESSIONID=5AA69E9392EE1DAE363949D193346BA4',
# 'Host': 'www.ahzsks.cn',
# 'Upgrade-Insecure-Requests': '1',
# "user-agent": "Mozilla/5.0(Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36"}
strhtml = requests.get(_url) # Get方式获取网页数据
strhtml.encoding = use_type
return strhtml
# 获取html中对象
@staticmethod
def get_element(thle_text, select_item):
soup = BeautifulSoup(thle_text, 'lxml')
data = soup.select(select_item)
return data
# 下载文件
@staticmethod
def download_file(_url, file_path):
r = requests.get(_url)
with open(file_path, "wb") as f:
f.write(r.content)
f.close()
# 写入txt文件
@staticmethod
def write_txt(_txt, file_path):
with open(file_path, "w") as f:
_txt = _txt.replace(u'\xa0', u'')
f.write(str(_txt))
f.close()
# 北京
class BeiJing(object):
'''
北京进入二级页面以后获取<h1>同级别与子标签中的信息
'''
def __init__(self):
self.start_url = 'https://www.bjeea.cn/html/selfstudy/'
self.father_url = 'https://www.bjeea.cn'
# 全年考试安排
# self.QNKSAP = "div.main>div:nth-child(2)>div>div:nth-child(2)>div.top2>div:nth-child(1)>div>div> ul >li:nth-child(1)>a"
# 自考重要日程安排
# self.ZKZYRCAP = "div.main>div:nth-child(2)>div>div:nth-child(2)>div.top2>div:nth-child(1)>div>div> ul >li:nth-child(2)>a"
# “快速通道”中链接
self.all_links = "div.main>div:nth-child(2)>div>div:nth-child(2)>div.top2>div:nth-child(1)>div>div>ul>li>a"
'''
def get_msg(self):
html_obj = CrawlClass.get_html(self.start_url)
get_obj = CrawlClass.get_element(html_obj.text, self.QNKSAP)
del html_obj
if get_obj:
sub_url = self.father_url + get_obj[0].get('href')
sub_html = CrawlClass.get_html(sub_url)
h1_obj = CrawlClass.get_element(sub_html.text, "h1")
if h1_obj:
parent_obj = h1_obj[0].parent
# 将页面中的文字写入txt文件
CrawlClass.write_txt(parent_obj.text, "./download_file/text.txt")
# 遍历所有链接,将使用带有<u>的下载文件
for i in parent_obj.select("div>p.MsoNormal>a"):
# all_span = i.find_all("span")
# if all_span:
# for _s in all_span:
# if _s.find_all("href"):
# print(_s)
# elif _s.text.split():
# continue
# print("".join(_s.text.split()))
children = i.find('u')
if children:
file_name = "./download_file/" + children.text.split(".")[1] + "." + i.get("href").split(".")[1]
CrawlClass.download_file(self.father_url + i.get("href"), file_name)
print("download file:{}".format(file_name))
def test(self):
html_obj = CrawlClass.get_html(self.start_url)
get_obj = CrawlClass.get_element(html_obj.text, self.ZKZYRCAP)
del html_obj
if get_obj:
sub_url = self.father_url + get_obj[0].get('href')
sub_html = CrawlClass.get_html(sub_url)
h1_obj = CrawlClass.get_element(sub_html.text, "h1")[0]
table_obj = CrawlClass.get_element(sub_html.text, "table")[0]
csv_obj = CSV_Class("./download_file/{}.csv".format(h1_obj.text))
for _item in table_obj.select("tr"):
write_list = []
for _i in _item.select("td"):
write_list.append(str(_i.text.replace(u'\xa0', u'')))
csv_obj.write_msg(write_list)
csv_obj.stop_write()
'''
def handle_msg(self, text_obj):
h1_obj = CrawlClass.get_element(text_obj, "h1")
if h1_obj:
# 有表格的只取表格信息
if CrawlClass.get_element(text_obj, "table"):
table_obj = CrawlClass.get_element(text_obj, "table")[0]
csv_obj = CSV_Class("./download_file/{}.csv".format(h1_obj[0].text))
max_len = 0
for _index, _item in enumerate(table_obj.select("tr")):
write_list = []
for _i in _item.select("td"):
write_list.append(str(_i.text.replace(u'\xa0', u'')))
if _index and len(write_list) < max_len:
for _ in range(max_len - len(write_list)):
write_list.insert(0, "")
csv_obj.write_msg(write_list)
if _index == 0:
max_len = len(write_list)
csv_obj.stop_write()
else:
parent_obj = h1_obj[0].parent
# 将页面中的文字写入txt文件
CrawlClass.write_txt(parent_obj.text, "{}/{}.txt".format(down_folder, h1_obj[0].text))
# 遍历所有链接,将使用带有<u>的下载文件
for i in parent_obj.select("div>p.MsoNormal>a"):
children = i.find('u')
if children:
file_name = down_folder + children.text.split(".")[1] + "." + i.get("href").split(".")[1]
CrawlClass.download_file(self.father_url + i.get("href"), file_name)
print("download file:{}".format(file_name))
def all_page(self):
html_obj = CrawlClass.get_html(self.start_url)
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
for _index, i in enumerate(get_obj):
if _index == 4:
break
sub_url = self.father_url + i.get("href")
sub_html = CrawlClass.get_html(sub_url)
self.handle_msg(sub_html.text)
# 上海
class ShangHai(object):
def __init__(self):
self.start_url = "http://www.shzkw.org/"
# 考试安排
self.ksap = "http://www.shzkw.org/shksap/"
self.all_links = "body > div.main.list-page > div.col-left > ul > li > a"
# 网页抓取上海“自学考试各专业课程考试日程”网页table转Excel保存
def handle_msg(self, text_obj):
h1_obj = CrawlClass.get_element(text_obj, "h1")
if h1_obj:
# 有表格的只取表格信息
if CrawlClass.get_element(text_obj, "table"):
table_obj = CrawlClass.get_element(text_obj, "table")[0]
# 中间环节转换为Excel保存路径
xls_obj = Handle_Excel.Html_to_Excel("./download_file/{}.xls".format(h1_obj[0].text))
try:
# 循环添加信息的起始行
current_row = 0
# 循环添加信息的起始列
current_col = 0
first_college = {}
for row_index, _item in enumerate(table_obj.select("tr")):
current_row = row_index - 1
# 第一行为名称,无用。
if row_index == 0:
continue
# 前两行特殊处理
elif row_index == 1:
# 第一行添加title与日期信息
# 表格没有划分“院校”title,添加
xls_obj.write_msg(0, 1, 0, 0, "院校")
# “专业”为固定项,直接添加
xls_obj.write_msg(0, 1, 1, 1, "专业")
current_col = 2
for col_index, _i in enumerate(_item.select("td")):
if col_index == 0:
first_college["name"] = _i.text
first_college["col"] = int(_i.get("rowspan", 0)) - 2
continue
elif col_index < 2:
continue
xls_obj.write_msg(current_row, current_row + int(_i.get("rowspan", 0)), current_col,
current_col + int(_i.get("colspan", 0)) - 1, _i.text)
current_col += int(_i.get("colspan", 1))
continue
elif row_index == 2:
# 第二行只有时间信息,切从第二列开始添加信息
current_col = 2
for col_index, _i in enumerate(_item.select("td")):
xls_obj.write_msg(current_row, current_row + int(_i.get("rowspan", 0)), current_col,
current_col + int(_i.get("colspan", 0)), _i.text)
current_col += int(_i.get("colspan", 1))
continue
elif row_index == 3:
# 第三行开始时,需要将第一个学校名称填入表格第一列
xls_obj.write_msg(2, 2 + first_college["col"] - 1, 0, 0, first_college["name"])
current_col = 1
for col_index, _i in enumerate(_item.select("td")):
# 学校列宽度为72,使用此条件过滤学校名称列
if _i.get("width") == "72":
current_col = 0
get_row = int(_i.get("rowspan", 0))
# 当个别学校只有一行信息时,不能减1
xls_obj.write_msg(current_row, current_row + (get_row - 1 if get_row else 0),
current_col,
current_col + int(_i.get("colspan", 0)), _i.text)
current_col += int(_i.get("colspan", 1))
continue
xls_obj.write_msg(current_row, current_row + int(_i.get("rowspan", 0)), current_col,
current_col + int(_i.get("colspan", 0)), _i.text)
current_col += int(_i.get("colspan", 1))
except:
print(current_row, current_row + int(_i.get("rowspan", 0)), current_col,
current_col + int(_i.get("colspan", 0)), _i.text)
finally:
xls_obj.write_finish()
# write_list.append(str(_i.text.replace(u'\xa0', u'')))
def all_page(self):
html_obj = CrawlClass.get_html(self.ksap)
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
for _index, i in enumerate(get_obj):
if _index == 1:
break
if "【" not in i.text and "2020年" in i.text:
sub_url = i.get("href")
sub_html = CrawlClass.get_html(sub_url)
self.handle_msg(sub_html.text)
# 广东
class GuangDong(object):
def __init__(self):
self.start_url = "http://eea.gd.gov.cn/zxks/index.html"
self.all_links = "body > div.main > div.content > ul > li > a"
def all_page(self):
html_obj = CrawlClass.get_html(self.start_url)
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
for _index, i in enumerate(get_obj):
if "2020" in i.text and "考试时间" in i.text:
_href = i.get('href')
print(_href)
sub_html = CrawlClass.get_html(_href)
h1_obj = CrawlClass.get_element(sub_html.text, "h3")
if h1_obj:
parent_obj = h1_obj[0].parent
p_list = parent_obj.select("p > a")
for i in p_list:
if i.get("href") and "时间安排" in i.text:
# _name = i.text.replace(".doc",".docx")
CrawlClass.download_file(i.get("href"), down_folder + i.text)
Word_Table.GuangDong_table("./download_file/" + i.text)
# 安徽
class AnHui(object):
def __init__(self):
self.start_url = "https://www.ahzsks.cn/gdjyzxks/"
self.parent = "https://www.ahzsks.cn"
self.all_links = "body > div.zk-main > div.container > div > div > div > h3"
def all_page(self):
html_obj = CrawlClass.get_html(self.start_url)
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
for _index, i in enumerate(get_obj):
if i.select("font")[0].text == "考试安排":
continue
parent_obj = i.parent
a_list = parent_obj.select("ul > li > a")
for _i in a_list:
_title = _i.get("title")
if "2020" in _title and "自学考试" in _title and "安排" in _title:
_href = self.start_url + _i.get("href")
sub_html = CrawlClass.get_html(_href)
h1_obj = CrawlClass.get_element(sub_html.text, "h3")
if h1_obj:
parent_obj = h1_obj[0].parent
# print(parent_obj)
p_list = parent_obj.select("div > p > span > a")
for _a in p_list:
if "2020" in _a.text and "自学考试" in _a.text and "安排" in _a.text:
down_link = self.parent + _a.get("href")
print("Download url:{},local path:{}".format(down_link, down_folder + _a.text))
# CrawlClass.download_file(down_link, down_folder + _a.text)
print("Download file finish")
gd_obj = Handle_Excel.GuangDong_Excel(down_folder + _a.text)
gd_obj.handle()
elif i.select("font")[0].text == "自考动态":
parent_obj = i.parent
a_list = parent_obj.select("ul > li > a")
for _i in a_list:
_title = _i.get("title")
if "2020" in _title and "自学考试" in _title:
_href = self.start_url + _i.get("href")
sub_html = CrawlClass.get_html(_href)
h1_obj = CrawlClass.get_element(sub_html.text, "h3")
if h1_obj:
parent_obj = h1_obj[0].parent
# print(parent_obj)
p_list = parent_obj.select("div.content")
print(p_list[0].text.replace(u"\n", "").replace(" ", ""))
# 广西
class GuangXi(object):
def __init__(self):
self.start_url = "https://www.gxeea.cn/zxks/tzgg.htm"
self.parent = "https://www.gxeea.cn"
self.all_links = "body > div.g-doc.m-artcle-list > div.right-list.fr > div > ul > li > a"
self.sub_links = "body > div.g-doc.m-detail > div.artcle-detail > p > a"
def all_page(self):
html_obj = CrawlClass.get_html(self.start_url)
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
for _index, i in enumerate(get_obj):
if "2020" in i.text and "自学考试" in i.text and "安排" in i.text:
_href = self.parent + str(i.get("href")).replace("..", "")
sub_html = CrawlClass.get_html(_href)
links_obj = CrawlClass.get_element(sub_html.text, self.sub_links)
if links_obj:
for _i in links_obj:
if "2020" in _i.text and "课程考试" in _i.text and "安排" in _i.text:
print(_i.get("href"))
file_path = down_folder + _i.text + "." + _i.get("href").split(".")[-1]
CrawlClass.download_file(_i.get("href"), file_path)
gc_obj = Handle_Excel.GuangXi_Excel(file_path)
gc_obj.handle()
# 河北
class HeBei(object):
def __init__(self):
# body > table > tbody > tr > td > div > div.sub_right
self.start_url = "http://www.hebeea.edu.cn/html/zxks/list.html"
self.parent = "http://www.hebeea.edu.cn"
self.all_links = "body>div>ul>li>div>a"
self.sub_links = "body>div.sub_main2>table.con_content>tr>td>div>p>span>a"
def all_page(self):
html_obj = CrawlClass.get_html(self.start_url, "utf-8")
# print(html_obj.text)
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
# print(get_obj)
for _index, i in enumerate(get_obj):
if "2020" in i.text and "自学考试" in i.text and "报考简章" in i.text:
_href = self.parent + i.get("href")
sub_html = CrawlClass.get_html(_href, "utf-8")
links_obj = CrawlClass.get_element(sub_html.text, self.sub_links)
if links_obj:
for _i in links_obj:
if _i.get("href"):
if "2020" in _i.text and "专业理论" in _i.text:
print(_i.get("href"))
file_path = down_folder + _i.text + "." + _i.get("href").split(".")[-1]
CrawlClass.download_file(_i.get("href"), file_path)
hb_obj = Handle_Excel.HeBei_Excel(file_path)
hb_obj.handle()
# 湖北
class HuBei(object):
def __init__(self):
self.start_url = "http://www.hbea.edu.cn/html/zxks/index.shtml"
self.parent = "http://www.hbea.edu.cn"
self.all_links = "#c01>table>table>tr>td>li>a"
self.sub_links_10 = "#news>ul>div>div:nth-child(3) > p > a"
self.sub_links_4 = "#news>ul>div>div:nth-child(3) > p > strong >a"
def all_page(self):
html_obj = CrawlClass.get_html(self.start_url, "utf-8")
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
for _index, i in enumerate(get_obj):
if "2020" in i.text and "自学考试" in i.text and "报考" in i.text:
_href = i.get("href")
print(_href, i.get("title"))
sub_html = CrawlClass.get_html(_href, "utf-8")
if "4月" in i.text:
links_obj = CrawlClass.get_element(sub_html.text, self.sub_links_4)
if "10月" in i.text:
links_obj = CrawlClass.get_element(sub_html.text, self.sub_links_10)
if links_obj:
for _i in links_obj:
if "2020" in _i.text and "自学考试" in _i.text and "考试安排" in _i.text:
_href = self.parent + _i.get("href")
print(_i)
elif "请点击此处下载" in _i.text:
_href = self.parent + _i.get("href")
print(_i)
else:
continue
_name = down_folder + _href.split("/")[-1]
CrawlClass.download_file(_href, _name)
# 吉林
class JiLin(object):
def __init__(self):
self.start_url = "http://www.jleea.com.cn/zxks/"
self.parent = "http://www.jleea.com.cn/"
self.all_links = "body>div:nth-child(6)>div:nth-child(4)>div.border1.mbottom>div.main>ul>table >tr>td>a"
self.sub_links = "#rightbox>div>span>a"
def all_page(self):
html_obj = CrawlClass.get_html(self.start_url, "GBK")
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
for _index, i in enumerate(get_obj):
if "2020" in i.text and "自学考试" in i.text and "课程安排" in i.text:
_href = self.parent + i.get("href")
sub_html = CrawlClass.get_html(_href, "GBK")
links_obj = CrawlClass.get_element(sub_html.text, self.sub_links)
if links_obj:
for _i in links_obj:
if _i.get("href"):
if "2020" in _i.text and "自学考试" in _i.text and "课程安排" in _i.text:
_href = self.parent + _i.get("href")
print(_i)
_name = down_folder + _i.text
# CrawlClass.download_file(_href, _name)
# 需要将doc转为docx
_name = _name.replace(".doc", ".docx")
Word_Table.JiLin_table(_name)
else:
continue
# 江西
class JiangXi(object):
def __init__(self):
self.start_url = "http://www.jxeea.cn/index/zxks/ksap.htm"
self.parent = "http://www.jxeea.cn/"
self.all_links = "#line152488_3 > td > a"
self.sub_links_list = ["#vsb_content > table:nth-child(22)", "#vsb_content > table:nth-child(25)"]
def all_page(self):
html_obj = CrawlClass.get_html(self.start_url, "utf-8")
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
for _index, i in enumerate(get_obj):
if "2020" in i.text and "自学考试" in i.text and "课程安排" in i.text:
_href = self.parent + i.get("href").replace("../", "")
sub_html = CrawlClass.get_html(_href, "utf-8")
for table_index, _table in enumerate(self.sub_links_list):
table_obj = CrawlClass.get_element(sub_html.text, _table)[0]
# 中间环节转换为Excel保存路径
xls_obj = Handle_Excel.Html_to_Excel(
"./download_file/{}.xls".format("JiangXi" + str(table_index)))
try:
# 循环添加信息的起始行
current_row = 0
# 循环添加信息的起始列
current_col = 0
for row_index, _item in enumerate(table_obj.select("tr")):
# 第一行为日期
if row_index == 0:
for col_index, _i in enumerate(_item.select("td")):
if col_index == 0:
xls_obj.write_msg(row_index, row_index, col_index, col_index, _i.text)
current_col += 1
else:
_weight = int(_i.get("colspan")) - 1 if int(_i.get("colspan")) else 0
xls_obj.write_msg(0, 0, current_col, current_col + _weight, _i.text)
current_col += _weight + 1
elif row_index == 1:
for col_index, _i in enumerate(_item.select("td")):
xls_obj.write_msg(row_index, row_index, col_index, col_index, _i.text)
current_col += 1
elif row_index == 2:
current_row = 2
current_col = 0
continue
else:
use_col = 0
for col_index, _i in enumerate(_item.select("td")):
_height = int(_i.get("rowspan", 0))
if col_index == 0 and _height > 2:
use_col = -1
continue
elif col_index == 0:
use_col = 0
current_col = col_index + use_col
_height = 1 if _height == 2 else 0
print(current_row, current_row + _height, current_col, current_col)
xls_obj.write_msg(current_row, current_row + _height, current_col, current_col,
_i.text)
current_row += 1
'''
# 前两行特殊处理
elif row_index == 1:
# 第一行添加title与日期信息
# 表格没有划分“院校”title,添加
xls_obj.write_msg(0, 1, 0, 0, "院校")
# “专业”为固定项,直接添加
xls_obj.write_msg(0, 1, 1, 1, "专业")
current_col = 2
for col_index, _i in enumerate(_item.select("td")):
if col_index == 0:
first_college["name"] = _i.text
first_college["col"] = int(_i.get("rowspan", 0)) - 2
continue
elif col_index < 2:
continue
xls_obj.write_msg(current_row,
current_row + int(_i.get("rowspan", 0)),
current_col,
current_col + int(_i.get("colspan", 0)) - 1,
_i.text)
current_col += int(_i.get("colspan", 1))
continue
elif row_index == 2:
# 第二行只有时间信息,切从第二列开始添加信息
current_col = 2
for col_index, _i in enumerate(_item.select("td")):
xls_obj.write_msg(current_row,
current_row + int(_i.get("rowspan", 0)),
current_col,
current_col + int(_i.get("colspan", 0)), _i.text)
current_col += int(_i.get("colspan", 1))
continue
elif row_index == 3:
# 第三行开始时,需要将第一个学校名称填入表格第一列
xls_obj.write_msg(2, 2 + first_college["col"] - 1, 0, 0,
first_college["name"])
current_col = 1
for col_index, _i in enumerate(_item.select("td")):
# 学校列宽度为72,使用此条件过滤学校名称列
if _i.get("width") == "72":
current_col = 0
get_row = int(_i.get("rowspan", 0))
# 当个别学校只有一行信息时,不能减1
xls_obj.write_msg(current_row,
current_row + (get_row - 1 if get_row else 0),
current_col,
current_col + int(_i.get("colspan", 0)), _i.text)
current_col += int(_i.get("colspan", 1))
continue
xls_obj.write_msg(current_row, current_row + int(_i.get("rowspan", 0)),
current_col,
current_col + int(_i.get("colspan", 0)), _i.text)
current_col += int(_i.get("colspan", 1))
'''
except:
print("ERROR:", current_row, current_row + int(_i.get("rowspan", 0)), current_col,
current_col + int(_i.get("colspan", 0)), _i.text)
finally:
xls_obj.write_finish()
# write_list.append(str(_i.text.replace(u'\xa0', u'')))
Handle_Excel.JiangXi_Excel(xls_obj)
# 辽宁
class LiaoNing(object):
def __init__(self):
self.start_url = "http://www.lnzsks.com/listinfo/zxks_1.html"
self.parent = "http://www.lnzsks.com/"
self.all_links = "body>div.main.clearfix>ul>li>a"
self.sub_links = "body>div.main.clearfix>div.info>div.content>p>a"
self.table_header = "body>div>div>table>thead>tr>td"
self.table_body = "body > div > div > table > tr"
def all_page(self):
def handle_special(m_msg):
_code = re.findall(r"\d{3}/\d{6}", m_msg)[0]
_name = re.sub(r"\d{3}/\d{6}", "", m_msg)
return [_code, _name]
def handle_calss(m_msg_list):
return_list = []
for i in zip(re.findall(r"\d{5}", m_msg_list), [i for i in re.split(r"\d{5}", m_msg_list) if i]):
return_list.append({"calss_code": i[0], "class_name": i[1]})
return return_list
html_obj = CrawlClass.get_html(self.start_url, "utf-8")
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
for _index, i in enumerate(get_obj):
if "2020" in i.text and "自学考试" in i.text and "报考简章" in i.text:
_href = self.parent + i.get("href").replace("../", "")
# print(_href)
sub_html = CrawlClass.get_html(_href, "utf-8")
links_obj = CrawlClass.get_element(sub_html.text, self.sub_links)
if links_obj:
for _i in links_obj:
if _i.get("href"):
if "2020" in _i.text and "月考试课程安排表" in _i.text:
_href = _i.get("href")
table_html = CrawlClass.get_html(_href, "GBK")
heater_obj = CrawlClass.get_element(table_html.text, self.table_header)
date_time_list = []
for _msg in heater_obj:
date_time_list.append(
[i.replace(" ", "").replace(u"\r", "") for i in _msg.text.split(u"\n") if i])
print(date_time_list)
structured_dict = {}
_special_code = ""
body_obj = CrawlClass.get_element(table_html.text, self.table_body)
for _tr in body_obj:
msg_list = []
for _index, _td in enumerate(_tr.select("td")):
if _td.text:
if _index == 0:
special_msg = _td.text.replace(u"\n", "")
_special_code, _special_name = handle_special(special_msg)
else:
dict_list = handle_calss(
_td.text.replace(" ", "").replace(u"\r", "").replace(u"\n", ""))
for __dict in dict_list:
__dict["date"] = date_time_list[_index][0]
__dict["time"] = date_time_list[_index][1]
__dict["special_code"] = _special_code
__dict["special_name"] = _special_name
msg_list.append(__dict)
else:
continue
structured_dict[_special_code] = msg_list
print(structured_dict)
else:
continue
# 山西
class ShanXi(object):
def __init__(self):
self.start_url = "http://www.sxkszx.cn/ksfwpt.html"
self.parent = "http://www.sxkszx.cn/"
self.all_links = "#nav > ul > li:nth-child(4) > div > div > dl:nth-child(2) > dd > a"
self.sub_links = ["#newsbody_class > div:nth-child(4) > table", "#newsbody_class > div:nth-child(8) > table"]
def all_page(self):
def handle_special(m_msg):
_code = re.findall(r"\d{3}/\d{6}", m_msg)[0]
_name = re.sub(r"\d{3}/\d{6}", "", m_msg)
return [_code, _name]
def handle_calss(m_msg_list):
print(m_msg_list)
return_list = []
for i in zip(re.findall(r"[(](\d{5})[)]", m_msg_list),
[i for i in re.split(r"[(](\d{5})[)]", m_msg_list) if i]):
return_list.append({"calss_code": i[0], "class_name": i[1]})
return return_list
html_obj = CrawlClass.get_html(self.start_url, "utf-8")
get_obj = CrawlClass.get_element(html_obj.text, self.all_links)
for _index, i in enumerate(get_obj):
if "2020" in i.text and "考试课程" in i.text and "时间" in i.text:
_href = self.parent + i.get("href").replace("../", "")
print(_href)
sub_html = CrawlClass.get_html(_href, "utf-8")
h1_obj = CrawlClass.get_element(sub_html.text, "#newsbody_class>div>table>tbody")
date_list = []
time_list = []
max_col = 7
for table_obj in h1_obj:
for _num, _tr in enumerate(table_obj.select("tr")):
if _num == 0:
continue
if _num == 1:
for _td in _tr.select("td"):
add_msg = _td.text.replace(u"\n", "")
if add_msg in date_list:
continue
else:
date_list.append(add_msg)
elif _num == 2:
for _td in _tr.select("td"):
add_msg = _td.text.replace(u"\n", "")
if add_msg in time_list:
continue
else:
time_list.append(add_msg)
else:
special_code = ""
special_name = ""
for _ind, _td in enumerate(_tr.select("td")):
if _ind in (0, max_col):
continue
elif _ind == 1:
special_code = _td.text.replace(u"\n", "")
elif _ind == 2:
special_name = _td.text.replace(u"\n", "")
else:
print(_td)
print(CrawlClass.get_element(_td.text, "div>span"))
# print(_td.select("span"))
# print(handle_calss(_td.select("div>span")))
# print(_tr)
# print(tabls_obj)
# for _table in self.sub_links:
# links_obj = CrawlClass.get_element(sub_html.text, _table)
# print(links_obj)
# # for _tr in body_obj:
# # msg_list = []
# # for _index, _td in enumerate(_tr.select("td")):
# # if _td.text:
# # if _index == 0:
# # special_msg = _td.text.replace(u"\n", "")
# # _special_code, _special_name = handle_special(special_msg)
# # else:
# # dict_list = handle_calss(
# # _td.text.replace(" ", "").replace(u"\r", "").replace(u"\n", ""))
# # for __dict in dict_list:
# # __dict["date"] = date_time_list[_index][0]
# # __dict["time"] = date_time_list[_index][1]
# # __dict["special_code"] = _special_code
# # __dict["special_name"] = _special_name
# # msg_list.append(__dict)
# # else:
# # continue
# # structured_dict[_special_code] = msg_list
# # print(structured_dict)
break
# 浙江:PDF,云南:图片,新疆:图片
if __name__ == "__main__":
# bj = BeiJing()
# bj.all_page()
test = AnHui()
test.all_page()
|
[
"40845650+yangfanjx@users.noreply.github.com"
] |
40845650+yangfanjx@users.noreply.github.com
|
3b357c8230a89675c8166c429994a142c867bce6
|
8eca4bbf07e2cad4dbc4fa685d2b34b0cdc4508e
|
/build/usb_cam/catkin_generated/pkg.installspace.context.pc.py
|
37ff0319fff6a308e509f32f7168d0b64ab31053
|
[] |
no_license
|
marcfroberts/NASASwarmathon2018
|
a4555d432a0ab23cc96bc36c1f8e2ba417f49478
|
e2b1f62eb0d255fe10ab6b5e699bfa71a5e02ffd
|
refs/heads/master
| 2021-05-15T09:04:55.268856
| 2018-03-27T18:41:42
| 2018-03-27T18:41:42
| 108,076,212
| 0
| 0
| null | 2018-03-27T18:41:43
| 2017-10-24T04:21:17
|
Makefile
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/swarmie/SwarmBaseCode-ROS/install/include".split(';') if "/home/swarmie/SwarmBaseCode-ROS/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lusb_cam".split(';') if "-lusb_cam" != "" else []
PROJECT_NAME = "usb_cam"
PROJECT_SPACE_DIR = "/home/swarmie/SwarmBaseCode-ROS/install"
PROJECT_VERSION = "0.3.6"
|
[
"matthew@fricke.co.uk"
] |
matthew@fricke.co.uk
|
7d755fc576080c119f8c871344e8aedfbbb39a79
|
350864228c8979440115cfc394757598214d4f73
|
/Music4U/app.py
|
ce56bcc49665f228384c36b5852bf202da888422
|
[] |
no_license
|
yunjujo/homework
|
c207689d64e9ef9aaa3af2d8d914f4b3a522752d
|
59dff1607e6e596d0e738d9ecf4e883237843725
|
refs/heads/master
| 2022-12-30T13:41:25.125410
| 2020-10-27T23:00:32
| 2020-10-27T23:00:32
| 304,310,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,965
|
py
|
from flask import Flask, render_template, jsonify, request
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
app = Flask(__name__)
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbsparta # 'dbsparta'라는 이름의 db를 만들거나 사용합니다.
@app.route('/')
def home():
return render_template('index.html')
@app.route('/memo', methods=['POST'])
def post_article():
# 1. 클라이언트로부터 데이터를 받기
url_receive = request.form['url_give']
comment_receive = request.form['comment_give']
lyrics_receive = request.form['lyrics_give']
# 2. meta tag를 스크래핑하기
# URL을 읽어서 HTML를 받아오고,
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get(url_receive, headers=headers)
# HTML을 BeautifulSoup이라는 라이브러리를 활용해 검색하기 용이한 상태로 만듦
soup = BeautifulSoup(data.text, 'html.parser')
# select를 이용해서, tr들을 불러오기
title = soup.select_one('meta[property="og:title"]')['content']
image = soup.select_one('meta[property="og:image"]')['content']
doc = {
'title' : title,
'image' : image,
'desc' : lyrics_receive,
'url' : url_receive,
'comment' : comment_receive,
'like' : 0
}
db.music.insert_one(doc)
# 3. mongoDB에 데이터 넣기
return jsonify({'result': 'success', 'msg': '저장이 완료되었습니다!'})
@app.route('/memo', methods=['GET'])
def read_articles():
# 1. mongoDB에서 _id 값을 제외한 모든 데이터 조회해오기(Read)
musics = list(db.music.find({}, {'_id': False}).sort('like', -1))
# 2. articles라는 키 값으로 articles 정보 보내주기
return jsonify({'result': 'success', 'musics': musics})
@app.route('/memo/like', methods=['POST'])
def like_star():
# 1. 클라이언트가 전달한 title_give를 title_receive 변수에 넣습니다.
title_receive = request.form['title_give']
# 2. music 목록에서 find_one으로 title이 title_receive와 일치하는 star를 찾습니다.
music_title = db.music.find_one({'title' : title_receive})
# 3. music_title의 like 에 1을 더해준 music_like 변수를 만듭니다.
new_like = music_title['like'] + 1
# 4. music 목록에서 title이 title_receive인 문서의 like 를 new_like로 변경합니다.
db.music.update_one({'title' : title_receive}, {'$set':{'like':new_like}})
# 참고: '$set' 활용하기!
# 5. 성공하면 success 메시지를 반환합니다.
return jsonify({'result': 'success', 'msg': '좋아요 완료!'})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True)
|
[
"35657441+yunjujo@users.noreply.github.com"
] |
35657441+yunjujo@users.noreply.github.com
|
7f06032dba7dbccf331c8ae2b298cd060ce22cac
|
16aabd7d01d327815b64fe1ee39e1c42e07acdfa
|
/pipeline/table_figure_scripts/TALONClass.py
|
a6e320389960515565580f61f089deb026399519
|
[
"MIT"
] |
permissive
|
dewyman/TALON-paper-2019
|
b17e1139afabd350c72fcc9d7078f49de746c046
|
8644b34573d6a5924e8d84a234fd0fcbf010c233
|
refs/heads/master
| 2020-04-28T06:31:33.907456
| 2019-11-04T20:13:34
| 2019-11-04T20:13:34
| 175,061,699
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,733
|
py
|
import sys
# this is the "class" code for creating Transcript objects.
# all objects belong to a class, and this is the definition of it
# there will be 1 object for each Talon Transcript
# each column name is added to the object as an attribute
# we could have written this in a separate file, then imported it using import
class Transcript:
# this is a list of column names shared between between all objects
fieldNames = []
# this is the function that is called when we create an object
def __init__(self, inline):
#we read the line and split it into a list of fields. .strip() removes the newline
fields = inline.strip().split()
self.transcript_ID = ''
# we go through the columns in order using a numeric index
for index in range(len(self.fieldNames)):
attribute = self.fieldNames[index]
value = fields[index]
# this is the command that adds the attribute to the object for each field
setattr(self, attribute, value)
# This will change with the new TALON rewrite
if attribute == 'transcript_novelty':
self.transcriptType = value
def __len__(self):
return int(self.length)
def getCounts(self,dataset):
return int(getattr(self,dataset))
def __str__(self):
return "instance of Gene for " + self.transcript_ID + "\n" + str(self.__dict__)
def __repr__(self):
return "Talon transcript " + str(self.transcript_ID)
class Gene:
def __init__(self,gID,gAnnot,gTalonID,gStatus):
self.geneID = gID
self.status = gStatus
self.geneAnnot = gAnnot
self.geneTalonID = gTalonID
self.transcriptDict = {}
def setTypes(self,typeList):
for aType in typeList:
self.transcriptDict[aType]=[]
def addTranscript(self,aTranscript):
transcriptType = aTranscript.transcriptType
self.transcriptDict[transcriptType].append(aTranscript)
def getTranscripts(self,aType=''):
if aType in self.transcriptDict:
return self.transcriptDict[aType]
results=[]
for oneType in self.transcriptDict:
results += self.transcriptDict[oneType]
return results
def __str__(self):
return "instance of Gene for " + self.geneID + "\n" + str(self.__dict__)
class talonResults:
# These are the different type of transcripts in the order that they will be recorded in the output files
transcriptTypes = ['Known','NIC','NNC','ISM','Antisense','Intergenic','Genomic']
def __init__(self, abundanceFile, datasetList):
self.infilename = abundanceFile
self.geneDict = {}
self.nameDict={}
infile = open(abundanceFile)
# the first line of the TALON abundance file has all of the column names
firstLine = infile.readline()
# this is where we define the column names for all of the Transcript objects
Transcript.fieldNames = firstLine.strip().split()
# We are going to read each line of the input file, create a transcript object, and record it in the dictionaries
for line in infile:
# create an object for this line
thisTranscript = Transcript(line)
# this is the object's TALON gene_ID
currentGID = thisTranscript.gene_ID
# initialize geneDict for the gene using a new Gene object if necessary
if currentGID not in self.geneDict:
self.geneDict[currentGID] = Gene( thisTranscript.annot_gene_id, thisTranscript.annot_gene_name,currentGID, thisTranscript.gene_novelty)
#self.nameDict[currentGID]=thisTranscript.geneAnnot
self.nameDict[thisTranscript.annot_gene_name.lower()] = currentGID
self.geneDict[currentGID].setTypes(talonResults.transcriptTypes)
# we now add the transcriptObject in the geneDict, which will track also its novelty type
self.geneDict[currentGID].addTranscript(thisTranscript)
infile.close()
def __str__(self):
return "instance of talonResults for " + self.infilename + " with " + str(len(self.geneDict)) + " genes"
def getGenes(self):
return self.geneDict
def getTranscriptTypes(self):
return self.transcriptTypes
def getGene(self,geneAnnot):
thisGID = self.nameDict[geneAnnot.lower()]
return self.geneDict[thisGID]
# just a helper function for writing a file given a name, a header line, and a list of lines
def writeOutfile(outfilename, header, lineList):
outfile = open(outfilename,'w')
outfile.write(header)
outfile.writelines(lineList)
outfile.close()
|
[
"noreply@github.com"
] |
dewyman.noreply@github.com
|
61bc8a3a202bc70ca7a6d6c6a4c970e5e87ea59c
|
06b25df867b9a4741b4ca803eceb254aa50758e9
|
/editor_api/rest/lum.py
|
acdf87c0a73ffc1bdb30ca30bc06f1dcb3063474
|
[
"MIT"
] |
permissive
|
jphuart/swatplus-automatic-workflow
|
e5ceaa745096926176d9fc45042f836e628d0504
|
dd2eeb7f882eb2d4ab7e1e5265c10b9beb93ddc4
|
refs/heads/master
| 2023-08-15T02:47:40.742352
| 2021-10-05T14:57:19
| 2021-10-05T14:57:19
| 282,170,706
| 0
| 0
| null | 2020-07-24T08:52:25
| 2020-07-24T08:52:24
| null |
UTF-8
|
Python
| false
| false
| 19,887
|
py
|
from flask_restful import Resource, reqparse, abort
from playhouse.shortcuts import model_to_dict
from peewee import *
from .base import BaseRestModel
from database.project import base
from database.project.setup import SetupProjectDatabase
from database.project.lum import Landuse_lum, Management_sch, Cntable_lum, Cons_prac_lum, Ovn_table_lum, Management_sch_auto, Management_sch_op
from database.project.structural import Tiledrain_str, Septic_str, Filterstrip_str, Grassedww_str, Bmpuser_str
from database.project.hru_parm_db import Urban_urb
from database.project.init import Plant_ini
from database.project.decision_table import D_table_dtl
from database.datasets.setup import SetupDatasetsDatabase
from database.datasets import lum as ds_lum
from database import lib
from helpers import utils
invalid_name_msg = 'Invalid name {name}. Please ensure the value exists in your database.'
def get_landuse_args(get_selected_ids=False):
parser = reqparse.RequestParser()
if get_selected_ids:
parser.add_argument('selected_ids', type=int, action='append', required=False, location='json')
else:
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('description', type=str, required=False, location='json')
parser.add_argument('cal_group', type=str, required=False, location='json')
parser.add_argument('urb_ro', type=str, required=False, location='json')
parser.add_argument('plnt_com_name', type=str, required=False, location='json')
parser.add_argument('mgt_name', type=str, required=False, location='json')
parser.add_argument('cn2_name', type=str, required=False, location='json')
parser.add_argument('cons_prac_name', type=str, required=False, location='json')
parser.add_argument('urban_name', type=str, required=False, location='json')
parser.add_argument('ov_mann_name', type=str, required=False, location='json')
parser.add_argument('tile_name', type=str, required=False, location='json')
parser.add_argument('sep_name', type=str, required=False, location='json')
parser.add_argument('vfs_name', type=str, required=False, location='json')
parser.add_argument('grww_name', type=str, required=False, location='json')
parser.add_argument('bmp_name', type=str, required=False, location='json')
args = parser.parse_args(strict=True)
return args
def save_landuse_args(self, m, args):
m.name = args['name']
m.description = args['description']
m.cal_group = utils.remove_space(args['cal_group'])
m.urb_ro = args['urb_ro']
m.plnt_com_id = self.get_id_from_name(Plant_ini, args['plnt_com_name'])
m.mgt_id = self.get_id_from_name(Management_sch, args['mgt_name'])
m.cn2_id = self.get_id_from_name(Cntable_lum, args['cn2_name'])
m.cons_prac_id = self.get_id_from_name(Cons_prac_lum, args['cons_prac_name'])
m.urban_id = self.get_id_from_name(Urban_urb, args['urban_name'])
m.ov_mann_id = self.get_id_from_name(Ovn_table_lum, args['ov_mann_name'])
m.tile_id = self.get_id_from_name(Tiledrain_str, args['tile_name'])
m.sep_id = self.get_id_from_name(Septic_str, args['sep_name'])
m.vfs_id = self.get_id_from_name(Filterstrip_str, args['vfs_name'])
m.grww_id = self.get_id_from_name(Grassedww_str, args['grww_name'])
m.bmp_id = self.get_id_from_name(Bmpuser_str, args['bmp_name'])
return m.save()
class LanduseLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Landuse_lum
list_name = 'landuse'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name, True)
class LanduseLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Landuse_lum, 'Landuse', True)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Landuse_lum, 'Landuse')
def put(self, project_db, id):
args = get_landuse_args()
try:
SetupProjectDatabase.init(project_db)
m = Landuse_lum.get(Landuse_lum.id == id)
result = save_landuse_args(self, m, args)
if result > 0:
return 200
abort(400, message='Unable to update land use properties {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Land use name must be unique.')
except Landuse_lum.DoesNotExist:
abort(404, message='Land use properties {id} does not exist'.format(id=id))
except Plant_ini.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['plnt_com_name']))
except Management_sch.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['mgt_name']))
except Cntable_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cn2_name']))
except Cons_prac_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cons_prac_name']))
except Urban_urb.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['urban_name']))
except Ovn_table_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['ov_mann_name']))
except Tiledrain_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['tile_name']))
except Septic_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['sep_name']))
except Filterstrip_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['vfs_name']))
except Grassedww_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['grww_name']))
except Bmpuser_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['bmp_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class LanduseLumPostApi(BaseRestModel):
def post(self, project_db):
args = get_landuse_args()
try:
SetupProjectDatabase.init(project_db)
m = Landuse_lum()
result = save_landuse_args(self, m, args)
if result > 0:
return 200
abort(400, message='Unable to update channel properties {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Channel properties name must be unique.')
except Plant_ini.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['plnt_com_name']))
except Management_sch.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['mgt_name']))
except Cntable_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cn2_name']))
except Cons_prac_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cons_prac_name']))
except Urban_urb.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['urban_name']))
except Ovn_table_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['ov_mann_name']))
except Tiledrain_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['tile_name']))
except Septic_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['sep_name']))
except Filterstrip_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['vfs_name']))
except Grassedww_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['grww_name']))
except Bmpuser_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['bmp_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class LanduseLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Landuse_lum)
def put(self, project_db):
SetupProjectDatabase.init(project_db)
args = get_landuse_args(True)
try:
param_dict = {}
if args['cal_group'] is not None:
param_dict['cal_group'] = utils.remove_space(args['cal_group'])
if args['urb_ro'] is not None:
param_dict['urb_ro'] = args['urb_ro']
if args['plnt_com_name'] is not None:
param_dict['plnt_com_id'] = self.get_id_from_name(Plant_ini, args['plnt_com_name'])
if args['mgt_name'] is not None:
param_dict['mgt_id'] = self.get_id_from_name(Management_sch, args['mgt_name'])
if args['cn2_name'] is not None:
param_dict['cn2_id'] = self.get_id_from_name(Cntable_lum, args['cn2_name'])
if args['cons_prac_name'] is not None:
param_dict['cons_prac_id'] = self.get_id_from_name(Cons_prac_lum, args['cons_prac_name'])
if args['urban_name'] is not None:
param_dict['urban_id'] = self.get_id_from_name(Urban_urb, args['urban_name'])
if args['ov_mann_name'] is not None:
param_dict['ov_mann_id'] = self.get_id_from_name(Ovn_table_lum, args['ov_mann_name'])
if args['tile_name'] is not None:
param_dict['tile_id'] = self.get_id_from_name(Tiledrain_str, args['tile_name'])
if args['sep_name'] is not None:
param_dict['sep_id'] = self.get_id_from_name(Septic_str, args['sep_name'])
if args['vfs_name'] is not None:
param_dict['vfs_id'] = self.get_id_from_name(Filterstrip_str, args['vfs_name'])
if args['grww_name'] is not None:
param_dict['grww_id'] = self.get_id_from_name(Grassedww_str, args['grww_name'])
if args['bmp_name'] is not None:
param_dict['bmp_id'] = self.get_id_from_name(Bmpuser_str, args['bmp_name'])
query = Landuse_lum.update(param_dict).where(Landuse_lum.id.in_(args['selected_ids']))
result = query.execute()
if result > 0:
return 200
abort(400, message='Unable to update channel properties.')
except Plant_ini.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['plnt_com_name']))
except Management_sch.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['mgt_name']))
except Cntable_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cn2_name']))
except Cons_prac_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cons_prac_name']))
except Urban_urb.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['urban_name']))
except Ovn_table_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['ov_mann_name']))
except Tiledrain_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['tile_name']))
except Septic_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['sep_name']))
except Filterstrip_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['vfs_name']))
except Grassedww_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['grww_name']))
except Bmpuser_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['bmp_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
def save_cntable_lum(m, args):
m.name = args['name']
m.description = utils.remove_space(args['description'])
m.cn_a = args['cn_a']
m.cn_b = args['cn_b']
m.cn_c = args['cn_c']
m.cn_d = args['cn_d']
m.treat = utils.remove_space(args['treat'])
m.cond_cov = utils.remove_space(args['cond_cov'])
return m.save()
class CntableLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Cntable_lum
list_name = 'cntable'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name)
class CntableLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Cntable_lum, 'Curve Number')
def delete(self, project_db, id):
return self.base_delete(project_db, id, Cntable_lum, 'Curve Number')
def put(self, project_db, id):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args('cntable_lum', project_db)
m = Cntable_lum.get(Cntable_lum.id == id)
result = save_cntable_lum(m, args)
if result > 0:
return 200
abort(400, message='Unable to update curve number table {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Curve number table name must be unique.')
except Cntable_lum.DoesNotExist:
abort(404, message='Curve number table {id} does not exist'.format(id=id))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class CntableLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Cntable_lum)
def put(self, project_db):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args('cntable_lum', project_db, True)
remove_spaces = ['description', 'treat', 'cond_cov']
param_dict = {}
for key in args.keys():
if args[key] is not None and key != 'selected_ids':
param_dict[key] = utils.remove_space(args[key]) if key in remove_spaces else args[key]
query = Cntable_lum.update(param_dict).where(Cntable_lum.id.in_(args['selected_ids']))
result = query.execute()
if result > 0:
return 200
abort(400, message='Unable to update curve number tables.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class CntableLumPostApi(BaseRestModel):
def post(self, project_db):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args('cntable_lum', project_db)
m = Cntable_lum()
result = save_cntable_lum(m, args)
if result > 0:
return model_to_dict(m), 201
abort(400, message='Unable to update curve number table {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Curve number table name must be unique.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class CntableLumDatasetsApi(BaseRestModel):
def get(self, datasets_db, name):
return self.base_get_datasets_name(datasets_db, name, ds_lum.Cntable_lum, 'Curve number table')
class OvntableLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Ovn_table_lum
list_name = 'ovntable'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name)
class OvntableLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Ovn_table_lum, 'Mannings n')
def delete(self, project_db, id):
return self.base_delete(project_db, id, Ovn_table_lum, 'Mannings n')
def put(self, project_db, id):
return self.base_put(project_db, id, Ovn_table_lum, 'Mannings n')
class OvntableLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Ovn_table_lum)
def put(self, project_db):
return self.base_put_many(project_db, Ovn_table_lum, 'Mannings n')
class OvntableLumPostApi(BaseRestModel):
def post(self, project_db):
return self.base_post(project_db, Ovn_table_lum, 'Mannings n')
class OvntableLumDatasetsApi(BaseRestModel):
def get(self, datasets_db, name):
return self.base_get_datasets_name(datasets_db, name, ds_lum.Ovn_table_lum, 'Mannings n table')
class ConsPracLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Cons_prac_lum
list_name = 'cons_prac'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name)
class ConsPracLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Cons_prac_lum, 'Conservation practice')
def delete(self, project_db, id):
return self.base_delete(project_db, id, Cons_prac_lum, 'Conservation practice')
def put(self, project_db, id):
return self.base_put(project_db, id, Cons_prac_lum, 'Conservation practice')
class ConsPracLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Cons_prac_lum)
def put(self, project_db):
return self.base_put_many(project_db, Cons_prac_lum, 'Conservation practice')
class ConsPracLumPostApi(BaseRestModel):
def post(self, project_db):
return self.base_post(project_db, Cons_prac_lum, 'Conservation practice')
class ConsPracLumDatasetsApi(BaseRestModel):
def get(self, datasets_db, name):
return self.base_get_datasets_name(datasets_db, name, ds_lum.Cons_prac_lum, 'Conservation practices')
def get_mgt_args():
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('auto_ops', type=list, required=False, location='json')
parser.add_argument('operations', type=list, required=False, location='json')
args = parser.parse_args(strict=True)
return args
class ManagementSchListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Management_sch
list_name = 'mgt_sch'
SetupProjectDatabase.init(project_db)
total = table.select().count()
sort_val = SQL(sort)
if reverse == 'true':
sort_val = SQL(sort).desc()
m = table.select().order_by(sort_val).paginate(int(page), int(items_per_page))
ml = [{'id': v.id, 'name': v.name, 'num_ops': len(v.operations), 'num_auto': len(v.auto_ops)} for v in m]
return {'total': total, list_name: ml}
class ManagementSchApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Management_sch, 'Management schedule', back_refs=True, max_depth=2)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Management_sch, 'Management schedule')
def put(self, project_db, id):
try:
SetupProjectDatabase.init(project_db)
args = get_mgt_args()
m = Management_sch.get(Management_sch.id == id)
m.name = args['name']
m.save()
new_auto = []
for a in args['auto_ops']:
try:
dt = D_table_dtl.get((D_table_dtl.file_name == 'lum.dtl') & (D_table_dtl.name == a))
new_auto.append({'management_sch_id': m.id, 'd_table_id': dt.id})
except D_table_dtl.DoesNotExist:
abort(404, message='Decision table {name} does not exist'.format(name=a))
new_ops = []
order = 1
for o in args['operations']:
new_ops.append({
'management_sch_id': m.id,
'op_typ': o['op_typ'],
'mon': o['mon'],
'day': o['day'],
'op_data1': o['op_data1'],
'op_data2': o['op_data2'],
'op_data3': o['op_data3'],
'order': o['order'],
'hu_sch': o['hu_sch']
})
order += 1
Management_sch_auto.delete().where(Management_sch_auto.management_sch_id == m.id).execute()
lib.bulk_insert(base.db, Management_sch_auto, new_auto)
Management_sch_op.delete().where(Management_sch_op.management_sch_id == m.id).execute()
lib.bulk_insert(base.db, Management_sch_op, new_ops)
return 200
except IntegrityError as e:
abort(400, message='Management schedule name must be unique.')
except Cons_prac_lum.DoesNotExist:
abort(404, message='Management schedule {id} does not exist'.format(id=id))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class ManagementSchPostApi(BaseRestModel):
def post(self, project_db):
try:
args = get_mgt_args()
m = Management_sch()
m.name = args['name']
m.save()
new_auto = []
for a in args['auto_ops']:
try:
dt = D_table_dtl.get((D_table_dtl.file_name == 'lum.dtl') & (D_table_dtl.name == a))
new_auto.append({'management_sch_id': m.id, 'd_table_id': dt.id})
except D_table_dtl.DoesNotExist:
abort(404, message='Decision table {name} does not exist'.format(name=a))
new_ops = []
order = 1
for o in args['operations']:
new_ops.append({
'management_sch_id': m.id,
'op_typ': o['op_typ'],
'mon': o['mon'],
'day': o['day'],
'op_data1': o['op_data1'],
'op_data2': o['op_data2'],
'op_data3': o['op_data3'],
'order': o['order'],
'hu_sch': o['hu_sch']
})
order += 1
lib.bulk_insert(base.db, Management_sch_auto, new_auto)
lib.bulk_insert(base.db, Management_sch_op, new_ops)
return 201
except IntegrityError as e:
abort(400, message='Management schedule name must be unique.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
|
[
"celray.chawanda@outlook.com"
] |
celray.chawanda@outlook.com
|
0f5deea79f8da3d2ff9acc9d892c0b222e273d5d
|
a41d53564a56210851c784422a430cf31bf2bfd6
|
/examples/coffee_bar/cash_register/models/handlers/coffee_served_handler.py
|
7c5c44532ce019c132b5638bc3ad0503ff51bbf6
|
[
"MIT"
] |
permissive
|
ohemelaar/tonga
|
3227bbf9e516aa0e48a0097cc86081ca37269660
|
d98f56df0400e1fdfc8f2df4b650c9c0addd0bc1
|
refs/heads/master
| 2020-05-31T17:57:58.089966
| 2019-06-05T12:35:49
| 2019-06-05T12:35:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
from aiokafka import TopicPartition
from typing import Optional
# Import BaseEvent
from tonga.models.events.event import BaseEvent
# Import BaseEventHandler
from tonga.models.handlers.event.event_handler import BaseEventHandler
# Import StoreBuilderBase
from tonga.stores.store_builder.base import BaseStoreBuilder
# Import BaseProducer
from tonga.services.producer.base import BaseProducer
# Import Coffee Model
from examples.coffee_bar.cash_register.models.events.bill_paid import BillPaid
from examples.coffee_bar.cash_register.models.bill import Bill
class CoffeeServedHandler(BaseEventHandler):
_store_builder: BaseStoreBuilder
_transactional_producer: BaseProducer
def __init__(self, store_builder: BaseStoreBuilder, transactional_producer: BaseProducer, **kwargs):
super().__init__(**kwargs)
self._store_builder = store_builder
self._transactional_producer = transactional_producer
async def handle(self, event: BaseEvent, tp: TopicPartition, group_id: str, offset: int) -> Optional[str]:
if not self._transactional_producer.is_running():
await self._transactional_producer.start_producer()
async with self._transactional_producer.init_transaction():
# Creates commit_offsets dict
commit_offsets = {tp: offset + 1}
# Gets bill in local store
bill = Bill.__from_bytes_dict__(await self._store_builder.get_from_local_store(event.context['bill_uuid']))
# Updates bill
bill.set_is_paid(True)
bill.set_context(event.context)
# Sets updated bill on cash register local store
await self._store_builder.set_from_local_store(bill.uuid, bill.__to_bytes_dict__())
# Creates BillPaid event
bill_paid = BillPaid(uuid=bill.uuid, coffee_uuid=bill.coffee_uuid, amount=bill.amount,
context=bill.context)
# Sends BillCreated event
await self._transactional_producer.send_and_await(bill_paid, 'cash-register-events')
# End transaction
await self._transactional_producer.end_transaction(commit_offsets, group_id)
# TODO raise an exception
return 'transaction'
@classmethod
def handler_name(cls) -> str:
return 'tonga.waiter.event.CoffeeServed'
|
[
"theo.dangla@epitech.eu"
] |
theo.dangla@epitech.eu
|
29c8d5b661e0845d95e719b70c945412bef5640f
|
b28cce9b6b2f30c7ebce8433de8c53cf6ec75a84
|
/retinanet/model.py
|
6cc4b58baa238a862562d5ec22fb5d2a81aa8fde
|
[
"Apache-2.0"
] |
permissive
|
ccl-1/retinanet_pytorch
|
7c2fbd0128891e3b89e9ba09dee89f2aea71abf0
|
493cf4a942639df63839e575a280c56e3abe065d
|
refs/heads/master
| 2023-04-02T00:48:30.180338
| 2021-03-22T08:53:48
| 2021-03-22T08:53:48
| 201,150,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,253
|
py
|
import torch.nn as nn
import torch
import math
import torch.utils.model_zoo as model_zoo
from torchvision.ops import nms
from retinanet.utils import BasicBlock, Bottleneck, BBoxTransform, ClipBoxes
from retinanet.anchors import Anchors
from retinanet import losses
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
# FPN
class PyramidFeatures(nn.Module):
def __init__(self, C3_size, C4_size, C5_size, feature_size=256):
super(PyramidFeatures, self).__init__()
# upsample C5 to get P5 from the FPN paper
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
self.P6 = nn.Conv2d(C5_size, feature_size, kernel_size=3, stride=2, padding=1)
# "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
self.P7_1 = nn.ReLU()
self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
def forward(self, inputs):
C3, C4, C5 = inputs
P5_x = self.P5_1(C5)
P5_upsampled_x = self.P5_upsampled(P5_x)
P5_x = self.P5_2(P5_x)
P4_x = self.P4_1(C4)
P4_x = P5_upsampled_x + P4_x
P4_upsampled_x = self.P4_upsampled(P4_x)
P4_x = self.P4_2(P4_x)
P3_x = self.P3_1(C3)
P3_x = P3_x + P4_upsampled_x
P3_x = self.P3_2(P3_x)
P6_x = self.P6(C5)
P7_x = self.P7_1(P6_x)
P7_x = self.P7_2(P7_x)
return [P3_x, P4_x, P5_x, P6_x, P7_x]
# 回归坐标(x1,y1,x2,y2) 4点,由于是回归问题,所以没有进行激活操作。
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=3, padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
# out is B x C x W x H, with C = 4*num_anchors
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 4) # (B,-1,4)
# 分类,这里的激活函数使用的是sigmoid(),如果你想使用softmax()输出,那么就需要增加一个类别。
def __init__(self, num_features_in, num_anchors=9, num_classes=80, prior=0.01, feature_size=256):
super(ClassificationModel, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
out = self.output_act(out)
# out is B x C x W x H, with C = n_classes + n_anchors
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, channels = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchors, self.num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
class ResNet(nn.Module):
def __init__(self, num_classes, block, layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
if block == BasicBlock:
fpn_sizes = [self.layer2[layers[1] - 1].conv2.out_channels, self.layer3[layers[2] - 1].conv2.out_channels,
self.layer4[layers[3] - 1].conv2.out_channels]
elif block == Bottleneck:
fpn_sizes = [self.layer2[layers[1] - 1].conv3.out_channels, self.layer3[layers[2] - 1].conv3.out_channels,
self.layer4[layers[3] - 1].conv3.out_channels]
else:
raise ValueError(f"Block type {block} not understood")
self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2])
self.regressionModel = RegressionModel(256)
self.classificationModel = ClassificationModel(256, num_classes=num_classes)
self.anchors = Anchors()
self.regressBoxes = BBoxTransform()
self.clipBoxes = ClipBoxes()
self.focalLoss = losses.FocalLoss()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
prior = 0.01
self.classificationModel.output.weight.data.fill_(0)
self.classificationModel.output.bias.data.fill_(-math.log((1.0 - prior) / prior))
self.regressionModel.output.weight.data.fill_(0)
self.regressionModel.output.bias.data.fill_(0)
self.freeze_bn()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def freeze_bn(self):
'''Freeze BatchNorm layers.'''
for layer in self.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eval()
def forward(self, inputs):
if self.training:
img_batch, annotations = inputs
else:
img_batch = inputs
x = self.conv1(img_batch)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
features = self.fpn([x2, x3, x4])
regression = torch.cat([self.regressionModel(feature) for feature in features], dim=1)
classification = torch.cat([self.classificationModel(feature) for feature in features], dim=1)
anchors = self.anchors(img_batch)
if self.training:
return self.focalLoss(classification, regression, anchors, annotations)
else:
transformed_anchors = self.regressBoxes(anchors, regression)
transformed_anchors = self.clipBoxes(transformed_anchors, img_batch)
finalResult = [[], [], []]
finalScores = torch.Tensor([])
finalAnchorBoxesIndexes = torch.Tensor([]).long()
finalAnchorBoxesCoordinates = torch.Tensor([])
if torch.cuda.is_available():
finalScores = finalScores.cuda()
finalAnchorBoxesIndexes = finalAnchorBoxesIndexes.cuda()
finalAnchorBoxesCoordinates = finalAnchorBoxesCoordinates.cuda()
for i in range(classification.shape[2]):
scores = torch.squeeze(classification[:, :, i])
scores_over_thresh = (scores > 0.05)
if scores_over_thresh.sum() == 0:
# no boxes to NMS, just continue
continue
scores = scores[scores_over_thresh]
anchorBoxes = torch.squeeze(transformed_anchors)
anchorBoxes = anchorBoxes[scores_over_thresh]
anchors_nms_idx = nms(anchorBoxes, scores, 0.5)
finalResult[0].extend(scores[anchors_nms_idx])
finalResult[1].extend(torch.tensor([i] * anchors_nms_idx.shape[0]))
finalResult[2].extend(anchorBoxes[anchors_nms_idx])
finalScores = torch.cat((finalScores, scores[anchors_nms_idx]))
finalAnchorBoxesIndexesValue = torch.tensor([i] * anchors_nms_idx.shape[0])
if torch.cuda.is_available():
finalAnchorBoxesIndexesValue = finalAnchorBoxesIndexesValue.cuda()
finalAnchorBoxesIndexes = torch.cat((finalAnchorBoxesIndexes, finalAnchorBoxesIndexesValue))
finalAnchorBoxesCoordinates = torch.cat((finalAnchorBoxesCoordinates, anchorBoxes[anchors_nms_idx]))
return [finalScores, finalAnchorBoxesIndexes, finalAnchorBoxesCoordinates]
def resnet18(num_classes, pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)
return model
def resnet34(num_classes, pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)
return model
def resnet50(num_classes, pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(num_classes, Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)
return model
def resnet101(num_classes, pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(num_classes, Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False)
return model
def resnet152(num_classes, pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False)
return model
|
[
"Roronoa_Zoro_1@163.com"
] |
Roronoa_Zoro_1@163.com
|
457d4c6a1076ae536700f2daa77aa5ad82d61938
|
462ebf4ae330bb2b0738ed9193ca57c91d44f036
|
/utils/ops.py
|
03325cfd11d12f5b01e84d3d8b8c4d3102f926e6
|
[
"CC-BY-SA-4.0",
"MIT"
] |
permissive
|
sekilab/RoadDamageDetector
|
51aabfb0025d79d11b345f4e05fe981d484dd0eb
|
a7e867cc11d6895e7ea37acd43a9daf28ad16174
|
refs/heads/master
| 2022-10-20T13:21:14.983887
| 2022-09-30T02:00:53
| 2022-09-30T02:00:53
| 116,769,587
| 663
| 254
|
MIT
| 2022-09-30T02:00:56
| 2018-01-09T05:21:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 31,229
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for helper tensorflow ops."""
import math
import numpy as np
import six
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import standard_fields as fields
from object_detection.utils import static_shape
def expanded_shape(orig_shape, start_dim, num_dims):
"""Inserts multiple ones into a shape vector.
Inserts an all-1 vector of length num_dims at position start_dim into a shape.
Can be combined with tf.reshape to generalize tf.expand_dims.
Args:
orig_shape: the shape into which the all-1 vector is added (int32 vector)
start_dim: insertion position (int scalar)
num_dims: length of the inserted all-1 vector (int scalar)
Returns:
An int32 vector of length tf.size(orig_shape) + num_dims.
"""
with tf.name_scope('ExpandedShape'):
start_dim = tf.expand_dims(start_dim, 0) # scalar to rank-1
before = tf.slice(orig_shape, [0], start_dim)
add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
after = tf.slice(orig_shape, start_dim, [-1])
new_shape = tf.concat([before, add_shape, after], 0)
return new_shape
def normalized_to_image_coordinates(normalized_boxes, image_shape,
parallel_iterations=32):
"""Converts a batch of boxes from normal to image coordinates.
Args:
normalized_boxes: a float32 tensor of shape [None, num_boxes, 4] in
normalized coordinates.
image_shape: a float32 tensor of shape [4] containing the image shape.
parallel_iterations: parallelism for the map_fn op.
Returns:
absolute_boxes: a float32 tensor of shape [None, num_boxes, 4] containg the
boxes in image coordinates.
"""
def _to_absolute_coordinates(normalized_boxes):
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(normalized_boxes),
image_shape[1], image_shape[2], check_range=False).get()
absolute_boxes = tf.map_fn(
_to_absolute_coordinates,
elems=(normalized_boxes),
dtype=tf.float32,
parallel_iterations=parallel_iterations,
back_prop=True)
return absolute_boxes
def meshgrid(x, y):
"""Tiles the contents of x and y into a pair of grids.
Multidimensional analog of numpy.meshgrid, giving the same behavior if x and y
are vectors. Generally, this will give:
xgrid(i1, ..., i_m, j_1, ..., j_n) = x(j_1, ..., j_n)
ygrid(i1, ..., i_m, j_1, ..., j_n) = y(i_1, ..., i_m)
Keep in mind that the order of the arguments and outputs is reverse relative
to the order of the indices they go into, done for compatibility with numpy.
The output tensors have the same shapes. Specifically:
xgrid.get_shape() = y.get_shape().concatenate(x.get_shape())
ygrid.get_shape() = y.get_shape().concatenate(x.get_shape())
Args:
x: A tensor of arbitrary shape and rank. xgrid will contain these values
varying in its last dimensions.
y: A tensor of arbitrary shape and rank. ygrid will contain these values
varying in its first dimensions.
Returns:
A tuple of tensors (xgrid, ygrid).
"""
with tf.name_scope('Meshgrid'):
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
x_exp_shape = expanded_shape(tf.shape(x), 0, tf.rank(y))
y_exp_shape = expanded_shape(tf.shape(y), tf.rank(y), tf.rank(x))
xgrid = tf.tile(tf.reshape(x, x_exp_shape), y_exp_shape)
ygrid = tf.tile(tf.reshape(y, y_exp_shape), x_exp_shape)
new_shape = y.get_shape().concatenate(x.get_shape())
xgrid.set_shape(new_shape)
ygrid.set_shape(new_shape)
return xgrid, ygrid
def pad_to_multiple(tensor, multiple):
"""Returns the tensor zero padded to the specified multiple.
Appends 0s to the end of the first and second dimension (height and width) of
the tensor until both dimensions are a multiple of the input argument
'multiple'. E.g. given an input tensor of shape [1, 3, 5, 1] and an input
multiple of 4, PadToMultiple will append 0s so that the resulting tensor will
be of shape [1, 4, 8, 1].
Args:
tensor: rank 4 float32 tensor, where
tensor -> [batch_size, height, width, channels].
multiple: the multiple to pad to.
Returns:
padded_tensor: the tensor zero padded to the specified multiple.
"""
tensor_shape = tensor.get_shape()
batch_size = static_shape.get_batch_size(tensor_shape)
tensor_height = static_shape.get_height(tensor_shape)
tensor_width = static_shape.get_width(tensor_shape)
tensor_depth = static_shape.get_depth(tensor_shape)
if batch_size is None:
batch_size = tf.shape(tensor)[0]
if tensor_height is None:
tensor_height = tf.shape(tensor)[1]
padded_tensor_height = tf.to_int32(
tf.ceil(tf.to_float(tensor_height) / tf.to_float(multiple))) * multiple
else:
padded_tensor_height = int(
math.ceil(float(tensor_height) / multiple) * multiple)
if tensor_width is None:
tensor_width = tf.shape(tensor)[2]
padded_tensor_width = tf.to_int32(
tf.ceil(tf.to_float(tensor_width) / tf.to_float(multiple))) * multiple
else:
padded_tensor_width = int(
math.ceil(float(tensor_width) / multiple) * multiple)
if (padded_tensor_height == tensor_height and
padded_tensor_width == tensor_width):
return tensor
if tensor_depth is None:
tensor_depth = tf.shape(tensor)[3]
# Use tf.concat instead of tf.pad to preserve static shape
height_pad = tf.zeros([
batch_size, padded_tensor_height - tensor_height, tensor_width,
tensor_depth
])
padded_tensor = tf.concat([tensor, height_pad], 1)
width_pad = tf.zeros([
batch_size, padded_tensor_height, padded_tensor_width - tensor_width,
tensor_depth
])
padded_tensor = tf.concat([padded_tensor, width_pad], 2)
return padded_tensor
def padded_one_hot_encoding(indices, depth, left_pad):
"""Returns a zero padded one-hot tensor.
This function converts a sparse representation of indices (e.g., [4]) to a
zero padded one-hot representation (e.g., [0, 0, 0, 0, 1] with depth = 4 and
left_pad = 1). If `indices` is empty, the result will simply be a tensor of
shape (0, depth + left_pad). If depth = 0, then this function just returns
`None`.
Args:
indices: an integer tensor of shape [num_indices].
depth: depth for the one-hot tensor (integer).
left_pad: number of zeros to left pad the one-hot tensor with (integer).
Returns:
padded_onehot: a tensor with shape (num_indices, depth + left_pad). Returns
`None` if the depth is zero.
Raises:
ValueError: if `indices` does not have rank 1 or if `left_pad` or `depth are
either negative or non-integers.
TODO: add runtime checks for depth and indices.
"""
if depth < 0 or not isinstance(depth, (int, long) if six.PY2 else int):
raise ValueError('`depth` must be a non-negative integer.')
if left_pad < 0 or not isinstance(left_pad, (int, long) if six.PY2 else int):
raise ValueError('`left_pad` must be a non-negative integer.')
if depth == 0:
return None
if len(indices.get_shape().as_list()) != 1:
raise ValueError('`indices` must have rank 1')
def one_hot_and_pad():
one_hot = tf.cast(tf.one_hot(tf.cast(indices, tf.int64), depth,
on_value=1, off_value=0), tf.float32)
return tf.pad(one_hot, [[0, 0], [left_pad, 0]], mode='CONSTANT')
result = tf.cond(tf.greater(tf.size(indices), 0), one_hot_and_pad,
lambda: tf.zeros((depth + left_pad, 0)))
return tf.reshape(result, [-1, depth + left_pad])
def dense_to_sparse_boxes(dense_locations, dense_num_boxes, num_classes):
"""Converts bounding boxes from dense to sparse form.
Args:
dense_locations: a [max_num_boxes, 4] tensor in which only the first k rows
are valid bounding box location coordinates, where k is the sum of
elements in dense_num_boxes.
dense_num_boxes: a [max_num_classes] tensor indicating the counts of
various bounding box classes e.g. [1, 0, 0, 2] means that the first
bounding box is of class 0 and the second and third bounding boxes are
of class 3. The sum of elements in this tensor is the number of valid
bounding boxes.
num_classes: number of classes
Returns:
box_locations: a [num_boxes, 4] tensor containing only valid bounding
boxes (i.e. the first num_boxes rows of dense_locations)
box_classes: a [num_boxes] tensor containing the classes of each bounding
box (e.g. dense_num_boxes = [1, 0, 0, 2] => box_classes = [0, 3, 3]
"""
num_valid_boxes = tf.reduce_sum(dense_num_boxes)
box_locations = tf.slice(dense_locations,
tf.constant([0, 0]), tf.stack([num_valid_boxes, 4]))
tiled_classes = [tf.tile([i], tf.expand_dims(dense_num_boxes[i], 0))
for i in range(num_classes)]
box_classes = tf.concat(tiled_classes, 0)
box_locations.set_shape([None, 4])
return box_locations, box_classes
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=tf.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
size = tf.to_int32(size)
zeros = tf.ones([size], dtype=dtype) * default_value
values = tf.ones_like(indices, dtype=dtype) * indices_value
return tf.dynamic_stitch([tf.range(size), tf.to_int32(indices)],
[zeros, values])
def retain_groundtruth(tensor_dict, valid_indices):
"""Retains groundtruth by valid indices.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
valid_indices: a tensor with valid indices for the box-level groundtruth.
Returns:
a dictionary of tensors containing only the groundtruth for valid_indices.
Raises:
ValueError: If the shape of valid_indices is invalid.
ValueError: field fields.InputDataFields.groundtruth_boxes is
not present in tensor_dict.
"""
input_shape = valid_indices.get_shape().as_list()
if not (len(input_shape) == 1 or
(len(input_shape) == 2 and input_shape[1] == 1)):
raise ValueError('The shape of valid_indices is invalid.')
valid_indices = tf.reshape(valid_indices, [-1])
valid_dict = {}
if fields.InputDataFields.groundtruth_boxes in tensor_dict:
# Prevents reshape failure when num_boxes is 0.
num_boxes = tf.maximum(tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_boxes])[0], 1)
for key in tensor_dict:
if key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_instance_masks]:
valid_dict[key] = tf.gather(tensor_dict[key], valid_indices)
# Input decoder returns empty tensor when these fields are not provided.
# Needs to reshape into [num_boxes, -1] for tf.gather() to work.
elif key in [fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_label_types]:
valid_dict[key] = tf.reshape(
tf.gather(tf.reshape(tensor_dict[key], [num_boxes, -1]),
valid_indices), [-1])
# Fields that are not associated with boxes.
else:
valid_dict[key] = tensor_dict[key]
else:
raise ValueError('%s not present in input tensor dict.' % (
fields.InputDataFields.groundtruth_boxes))
return valid_dict
def retain_groundtruth_with_positive_classes(tensor_dict):
"""Retains only groundtruth with positive class ids.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
Returns:
a dictionary of tensors containing only the groundtruth with positive
classes.
Raises:
ValueError: If groundtruth_classes tensor is not in tensor_dict.
"""
if fields.InputDataFields.groundtruth_classes not in tensor_dict:
raise ValueError('`groundtruth classes` not in tensor_dict.')
keep_indices = tf.where(tf.greater(
tensor_dict[fields.InputDataFields.groundtruth_classes], 0))
return retain_groundtruth(tensor_dict, keep_indices)
def replace_nan_groundtruth_label_scores_with_ones(label_scores):
"""Replaces nan label scores with 1.0.
Args:
label_scores: a tensor containing object annoation label scores.
Returns:
a tensor where NaN label scores have been replaced by ones.
"""
return tf.where(
tf.is_nan(label_scores), tf.ones(tf.shape(label_scores)), label_scores)
def filter_groundtruth_with_crowd_boxes(tensor_dict):
"""Filters out groundtruth with boxes corresponding to crowd.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
is_not_crowd = tf.logical_not(is_crowd)
is_not_crowd_indices = tf.where(is_not_crowd)
tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
return tensor_dict
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
"""Filters out groundtruth with no bounding boxes.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
valid_indicator_vector = tf.logical_not(nan_indicator_vector)
valid_indices = tf.where(valid_indicator_vector)
return retain_groundtruth(tensor_dict, valid_indices)
def normalize_to_target(inputs,
target_norm_value,
dim,
epsilon=1e-7,
trainable=True,
scope='NormalizeToTarget',
summarize=True):
"""L2 normalizes the inputs across the specified dimension to a target norm.
This op implements the L2 Normalization layer introduced in
Liu, Wei, et al. "SSD: Single Shot MultiBox Detector."
and Liu, Wei, Andrew Rabinovich, and Alexander C. Berg.
"Parsenet: Looking wider to see better." and is useful for bringing
activations from multiple layers in a convnet to a standard scale.
Note that the rank of `inputs` must be known and the dimension to which
normalization is to be applied should be statically defined.
TODO: Add option to scale by L2 norm of the entire input.
Args:
inputs: A `Tensor` of arbitrary size.
target_norm_value: A float value that specifies an initial target norm or
a list of floats (whose length must be equal to the depth along the
dimension to be normalized) specifying a per-dimension multiplier
after normalization.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
trainable: Whether the norm is trainable or not
scope: Optional scope for variable_scope.
summarize: Whether or not to add a tensorflow summary for the op.
Returns:
The input tensor normalized to the specified target norm.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
ValueError: If target_norm_value is not a float or a list of floats with
length equal to the depth along the dimension to be normalized.
"""
with tf.variable_scope(scope, 'NormalizeToTarget', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_shape = inputs.get_shape().as_list()
input_rank = len(input_shape)
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be non-negative but smaller than the input rank.')
if not input_shape[dim]:
raise ValueError('input shape should be statically defined along '
'the specified dimension.')
depth = input_shape[dim]
if not (isinstance(target_norm_value, float) or
(isinstance(target_norm_value, list) and
len(target_norm_value) == depth) and
all([isinstance(val, float) for val in target_norm_value])):
raise ValueError('target_norm_value must be a float or a list of floats '
'with length equal to the depth along the dimension to '
'be normalized.')
if isinstance(target_norm_value, float):
initial_norm = depth * [target_norm_value]
else:
initial_norm = target_norm_value
target_norm = tf.contrib.framework.model_variable(
name='weights', dtype=tf.float32,
initializer=tf.constant(initial_norm, dtype=tf.float32),
trainable=trainable)
if summarize:
mean = tf.reduce_mean(target_norm)
mean = tf.Print(mean, ['NormalizeToTarget:', mean])
tf.summary.scalar(tf.get_variable_scope().name, mean)
lengths = epsilon + tf.sqrt(tf.reduce_sum(tf.square(inputs), dim, True))
mult_shape = input_rank*[1]
mult_shape[dim] = depth
return tf.reshape(target_norm, mult_shape) * tf.truediv(inputs, lengths)
def position_sensitive_crop_regions(image,
boxes,
box_ind,
crop_size,
num_spatial_bins,
global_pool,
extrapolation_value=None):
"""Position-sensitive crop and pool rectangular regions from a feature grid.
The output crops are split into `spatial_bins_y` vertical bins
and `spatial_bins_x` horizontal bins. For each intersection of a vertical
and a horizontal bin the output values are gathered by performing
`tf.image.crop_and_resize` (bilinear resampling) on a a separate subset of
channels of the image. This reduces `depth` by a factor of
`(spatial_bins_y * spatial_bins_x)`.
When global_pool is True, this function implements a differentiable version
of position-sensitive RoI pooling used in
[R-FCN detection system](https://arxiv.org/abs/1605.06409).
When global_pool is False, this function implements a differentiable version
of position-sensitive assembling operation used in
[instance FCN](https://arxiv.org/abs/1603.08678).
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`, `half`, `float32`, `float64`.
A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32`.
A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
specifies the coordinates of a box in the `box_ind[i]` image and is
specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized
coordinate value of `y` is mapped to the image coordinate at
`y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image
height is mapped to `[0, image_height - 1] in image height coordinates.
We do allow y1 > y2, in which case the sampled crop is an up-down flipped
version of the original image. The width dimension is treated similarly.
Normalized coordinates outside the `[0, 1]` range are allowed, in which
case we use `extrapolation_value` to extrapolate the input image values.
box_ind: A `Tensor` of type `int32`.
A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the `i`-th box refers
to.
crop_size: A list of two integers `[crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the
image content is not preserved. Both `crop_height` and `crop_width` need
to be positive.
num_spatial_bins: A list of two integers `[spatial_bins_y, spatial_bins_x]`.
Represents the number of position-sensitive bins in y and x directions.
Both values should be >= 1. `crop_height` should be divisible by
`spatial_bins_y`, and similarly for width.
The number of image channels should be divisible by
(spatial_bins_y * spatial_bins_x).
Suggested value from R-FCN paper: [3, 3].
global_pool: A boolean variable.
If True, we perform average global pooling on the features assembled from
the position-sensitive score maps.
If False, we keep the position-pooled features without global pooling
over the spatial coordinates.
Note that using global_pool=True is equivalent to but more efficient than
running the function with global_pool=False and then performing global
average pooling.
extrapolation_value: An optional `float`. Defaults to `0`.
Value used for extrapolation, when applicable.
Returns:
position_sensitive_features: A 4-D tensor of shape
`[num_boxes, K, K, crop_channels]`,
where `crop_channels = depth / (spatial_bins_y * spatial_bins_x)`,
where K = 1 when global_pool is True (Average-pooled cropped regions),
and K = crop_size when global_pool is False.
Raises:
ValueError: Raised in four situations:
`num_spatial_bins` is not >= 1;
`num_spatial_bins` does not divide `crop_size`;
`(spatial_bins_y*spatial_bins_x)` does not divide `depth`;
`bin_crop_size` is not square when global_pool=False due to the
constraint in function space_to_depth.
"""
total_bins = 1
bin_crop_size = []
for (num_bins, crop_dim) in zip(num_spatial_bins, crop_size):
if num_bins < 1:
raise ValueError('num_spatial_bins should be >= 1')
if crop_dim % num_bins != 0:
raise ValueError('crop_size should be divisible by num_spatial_bins')
total_bins *= num_bins
bin_crop_size.append(crop_dim // num_bins)
if not global_pool and bin_crop_size[0] != bin_crop_size[1]:
raise ValueError('Only support square bin crop size for now.')
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)
spatial_bins_y, spatial_bins_x = num_spatial_bins
# Split each box into spatial_bins_y * spatial_bins_x bins.
position_sensitive_boxes = []
for bin_y in range(spatial_bins_y):
step_y = (ymax - ymin) / spatial_bins_y
for bin_x in range(spatial_bins_x):
step_x = (xmax - xmin) / spatial_bins_x
box_coordinates = [ymin + bin_y * step_y,
xmin + bin_x * step_x,
ymin + (bin_y + 1) * step_y,
xmin + (bin_x + 1) * step_x,
]
position_sensitive_boxes.append(tf.stack(box_coordinates, axis=1))
image_splits = tf.split(value=image, num_or_size_splits=total_bins, axis=3)
image_crops = []
for (split, box) in zip(image_splits, position_sensitive_boxes):
crop = tf.image.crop_and_resize(split, box, box_ind, bin_crop_size,
extrapolation_value=extrapolation_value)
image_crops.append(crop)
if global_pool:
# Average over all bins.
position_sensitive_features = tf.add_n(image_crops) / len(image_crops)
# Then average over spatial positions within the bins.
position_sensitive_features = tf.reduce_mean(
position_sensitive_features, [1, 2], keep_dims=True)
else:
# Reorder height/width to depth channel.
block_size = bin_crop_size[0]
if block_size >= 2:
image_crops = [tf.space_to_depth(
crop, block_size=block_size) for crop in image_crops]
# Pack image_crops so that first dimension is for position-senstive boxes.
position_sensitive_features = tf.stack(image_crops, axis=0)
# Unroll the position-sensitive boxes to spatial positions.
position_sensitive_features = tf.squeeze(
tf.batch_to_space_nd(position_sensitive_features,
block_shape=[1] + num_spatial_bins,
crops=tf.zeros((3, 2), dtype=tf.int32)),
squeeze_dims=[0])
# Reorder back the depth channel.
if block_size >= 2:
position_sensitive_features = tf.depth_to_space(
position_sensitive_features, block_size=block_size)
return position_sensitive_features
def reframe_box_masks_to_image_masks(box_masks, boxes, image_height,
image_width):
"""Transforms the box masks back to full image masks.
Embeds masks in bounding boxes of larger masks whose shapes correspond to
image shape.
Args:
box_masks: A tf.float32 tensor of size [num_masks, mask_height, mask_width].
boxes: A tf.float32 tensor of size [num_masks, 4] containing the box
corners. Row i contains [ymin, xmin, ymax, xmax] of the box
corresponding to mask i. Note that the box corners are in
normalized coordinates.
image_height: Image height. The output mask will have the same height as
the image height.
image_width: Image width. The output mask will have the same width as the
image width.
Returns:
A tf.float32 tensor of size [num_masks, image_height, image_width].
"""
# TODO: Make this a public function.
def transform_boxes_relative_to_boxes(boxes, reference_boxes):
boxes = tf.reshape(boxes, [-1, 2, 2])
min_corner = tf.expand_dims(reference_boxes[:, 0:2], 1)
max_corner = tf.expand_dims(reference_boxes[:, 2:4], 1)
transformed_boxes = (boxes - min_corner) / (max_corner - min_corner)
return tf.reshape(transformed_boxes, [-1, 4])
box_masks = tf.expand_dims(box_masks, axis=3)
num_boxes = tf.shape(box_masks)[0]
unit_boxes = tf.concat(
[tf.zeros([num_boxes, 2]), tf.ones([num_boxes, 2])], axis=1)
reverse_boxes = transform_boxes_relative_to_boxes(unit_boxes, boxes)
image_masks = tf.image.crop_and_resize(image=box_masks,
boxes=reverse_boxes,
box_ind=tf.range(num_boxes),
crop_size=[image_height, image_width],
extrapolation_value=0.0)
return tf.squeeze(image_masks, axis=3)
def merge_boxes_with_multiple_labels(boxes, classes, num_classes):
"""Merges boxes with same coordinates and returns K-hot encoded classes.
Args:
boxes: A tf.float32 tensor with shape [N, 4] holding N boxes.
classes: A tf.int32 tensor with shape [N] holding class indices.
The class index starts at 0.
num_classes: total number of classes to use for K-hot encoding.
Returns:
merged_boxes: A tf.float32 tensor with shape [N', 4] holding boxes,
where N' <= N.
class_encodings: A tf.int32 tensor with shape [N', num_classes] holding
k-hot encodings for the merged boxes.
merged_box_indices: A tf.int32 tensor with shape [N'] holding original
indices of the boxes.
"""
def merge_numpy_boxes(boxes, classes, num_classes):
"""Python function to merge numpy boxes."""
if boxes.size < 1:
return (np.zeros([0, 4], dtype=np.float32),
np.zeros([0, num_classes], dtype=np.int32),
np.zeros([0], dtype=np.int32))
box_to_class_indices = {}
for box_index in range(boxes.shape[0]):
box = tuple(boxes[box_index, :].tolist())
class_index = classes[box_index]
if box not in box_to_class_indices:
box_to_class_indices[box] = [box_index, np.zeros([num_classes])]
box_to_class_indices[box][1][class_index] = 1
merged_boxes = np.vstack(box_to_class_indices.keys()).astype(np.float32)
class_encodings = [item[1] for item in box_to_class_indices.values()]
class_encodings = np.vstack(class_encodings).astype(np.int32)
merged_box_indices = [item[0] for item in box_to_class_indices.values()]
merged_box_indices = np.array(merged_box_indices).astype(np.int32)
return merged_boxes, class_encodings, merged_box_indices
merged_boxes, class_encodings, merged_box_indices = tf.py_func(
merge_numpy_boxes, [boxes, classes, num_classes],
[tf.float32, tf.int32, tf.int32])
merged_boxes = tf.reshape(merged_boxes, [-1, 4])
class_encodings = tf.reshape(class_encodings, [-1, num_classes])
merged_box_indices = tf.reshape(merged_box_indices, [-1])
return merged_boxes, class_encodings, merged_box_indices
|
[
"n.o.id.z.m@gmail.com"
] |
n.o.id.z.m@gmail.com
|
9d6be18ab0536568627e74f04f6d5fac3aacefbc
|
4ac330ae846ac3d593dca27ae34a5712a6de3a6c
|
/04_函数/hm_08_打印分割线.py
|
8f07d4fa0d6f3120716baedd355900f642e50bb5
|
[] |
no_license
|
gaoshang18/python
|
5999c420be0bf020759fa0bb0bc2251532d5c779
|
e73aa15e71eea54699215f0ff6279ad9b8417be2
|
refs/heads/master
| 2020-05-01T03:35:32.480839
| 2019-08-26T02:02:55
| 2019-08-26T02:02:55
| 177,248,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
# 函数的返回值 return
def print_line(char, times):
print(char * times)
print_line("hi", 4)
|
[
"noreply@github.com"
] |
gaoshang18.noreply@github.com
|
d3bdbc5461a26a5c7fe0183620159be6662d508c
|
a9a2f66671fadf765d7feb511a4a5d9b9f4ef362
|
/test/agent/server/local_elastic_agent_test.py
|
d86d3dc297c43d5bceef33af548cc125f0b9dd84
|
[
"BSD-3-Clause"
] |
permissive
|
BobLiu20/elastic
|
64885d164d485976ea8740672c454c212bab4ff8
|
e371fe57672aea91d2466f5e04884028d8dca649
|
refs/heads/master
| 2022-11-11T10:22:20.181835
| 2020-07-01T23:30:44
| 2020-07-01T23:32:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,667
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import time
import unittest
import uuid
from unittest.mock import patch
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torchelastic.rendezvous.etcd_rendezvous # noqa: F401
from test_utils import is_tsan
from torch.distributed.rpc.backend_registry import BackendType
from torchelastic.agent.server.api import (
WorkerGroupFailureException,
WorkerSpec,
WorkerState,
)
from torchelastic.agent.server.local_elastic_agent import LocalElasticAgent
from torchelastic.rendezvous.etcd_server import EtcdServer
def _happy_function():
return
def _sad_function():
raise RuntimeError("sad because i throw")
def _bipolar_function():
rank = int(os.environ["RANK"])
if rank % 2 == 0:
_happy_function()
else:
_sad_function()
def _distributed_sum(wait):
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
dist.init_process_group(backend="gloo")
t = torch.tensor(rank)
time.sleep(wait)
dist.all_reduce(t, op=dist.reduce_op.SUM)
expected_sum = sum(range(world_size))
actual = t.item()
if expected_sum != actual:
raise RuntimeError(f"Expected rank sum {expected_sum}, got {actual}")
def _simulate_work(wait):
time.sleep(wait)
rank = int(os.environ["RANK"])
return rank
def _check_rank_assignment():
group_rank = int(os.environ["GROUP_RANK"])
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
role_rank = int(os.environ["ROLE_RANK"])
role_world_size = int(os.environ["ROLE_WORLD_SIZE"])
return (group_rank, rank, world_size, role_rank, role_world_size)
def echo(msg):
return msg
def _return_rank_times(a):
return int(os.environ["RANK"]) * a
def _check_env_function():
# just check these env vars exist, os.environ[...] will naturally throw
# if the variable does not exist
os.environ["RANK"]
os.environ["LOCAL_RANK"]
os.environ["ROLE_RANK"]
os.environ["GROUP_RANK"]
os.environ["LOCAL_WORLD_SIZE"]
os.environ["ROLE_WORLD_SIZE"]
os.environ["WORLD_SIZE"]
os.environ["MASTER_ADDR"]
os.environ["MASTER_PORT"]
os.environ["TORCHELASTIC_RESTART_COUNT"]
os.environ["TORCHELASTIC_MAX_RESTARTS"]
os.environ["TORCHELASTIC_RUN_ID"]
def _run_agent(
run_id,
etcd_host,
etcd_port,
min_size,
max_size,
func_to_run,
args,
local_world_size=8,
role="test_trainer",
output_dict=None,
agent_barrier_timeout=300,
):
rdzv_handler = dist.rendezvous(
f"etcd://{etcd_host}:{etcd_port}/{run_id}"
f"?min_workers={min_size}"
f"&max_workers={max_size}"
)
spec = WorkerSpec(
role=role,
local_world_size=local_world_size,
fn=func_to_run,
args=args,
rdzv_handler=rdzv_handler,
max_restarts=2,
monitor_interval=1,
)
agent = LocalElasticAgent(
spec, start_method="fork", exit_barrier_timeout=agent_barrier_timeout
)
res = agent.run()
if output_dict is not None:
key = str(uuid.uuid4().int)
output_dict[key] = (role, res)
class LocalElasticAgentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_happy_function(self):
spec = self._get_worker_spec(fn=_happy_function)
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
def _get_worker_spec(
self,
fn,
args=(),
max_restarts=1,
num_agents=1,
monitor_interval=0.1,
local_world_size=8,
):
run_id = str(uuid.uuid4().int)
rdzv_handler = dist.rendezvous(
f"etcd://{self._etcd_server.get_endpoint()}/{run_id}"
f"?min_workers={num_agents}"
f"&max_workers={num_agents}"
)
spec = WorkerSpec(
role="test_trainer",
local_world_size=local_world_size,
fn=fn,
args=args,
rdzv_handler=rdzv_handler,
max_restarts=max_restarts,
monitor_interval=monitor_interval,
)
return spec
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_distributed_sum(self):
spec = self._get_worker_spec(fn=_distributed_sum, args=(0,))
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
class RoleConfig:
__slots__ = ["role", "workers", "num_agents", "workers_num", "role_size"]
def __init__(
self, role: str, workers=None, num_agents: int = 0, workers_num: int = 0
):
self.role = role
self.workers = workers
if workers_num != 0 and num_agents != 0:
self.workers = [workers_num] * num_agents
self.role_size = sum(self.workers)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_correct_rank_assignment_heterogeneous(self):
roles_config = [
self.RoleConfig("trainer", workers=[1, 2, 3, 4]),
self.RoleConfig("ps", workers=[5, 2]),
# split configuration to run the last one on the main process
self.RoleConfig("master", workers=[8]),
]
self.run_configuration(roles_config, 25)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_correct_rank_assignment_homogeneous(self):
num_workers = 4
roles_config = [
self.RoleConfig("trainer", num_agents=4, workers_num=num_workers),
self.RoleConfig("ps", num_agents=2, workers_num=num_workers),
# split configuration to run the last one on the main process
self.RoleConfig("master", num_agents=1, workers_num=num_workers),
]
self.run_configuration(roles_config, 28)
def run_configuration(self, roles_config, expected_world_size):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = sum(len(cfg.workers) for cfg in roles_config)
run_id = str(uuid.uuid4().int)
procs = []
manager = multiprocessing.Manager()
return_dict = manager.dict()
default_args = (run_id, host, port, nnodes, nnodes, _check_rank_assignment, ())
for ind in range(len(roles_config) - 1):
config = roles_config[ind]
for num_workers in config.workers:
p = multiprocessing.Process(
target=_run_agent,
args=(*default_args, num_workers, config.role, return_dict),
)
procs.append(p)
p.start()
# run one on the main process for debugging
config = roles_config[len(roles_config) - 1]
_run_agent(*default_args, config.workers[0], config.role, return_dict)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
role_info_dict = {role_info.role: role_info for role_info in roles_config}
self.verify_rank_consistency(return_dict, role_info_dict, expected_world_size)
def verify_rank_consistency(self, return_dict, role_info_dict, expected_world_size):
role_ranks = {}
global_ranks = []
grouped_ranks = {}
for role, res in return_dict.values():
for (
group_rank,
rank,
world_size,
role_rank,
role_world_size,
) in res.values():
role_info_config = role_info_dict[role]
self.assertEqual(expected_world_size, world_size)
self.assertEqual(role_info_config.role_size, role_world_size)
if group_rank not in grouped_ranks:
grouped_ranks[group_rank] = []
grouped_ranks[group_rank].append((rank, role_rank))
global_ranks.append(rank)
if role not in role_ranks:
role_ranks[role] = []
role_ranks[role].append(role_rank)
global_ranks = sorted(global_ranks)
self.assertEqual(list(range(0, expected_world_size)), global_ranks)
for role, role_config_info in role_info_dict.items():
self.assertEqual(
list(range(0, role_config_info.role_size)), sorted(role_ranks[role])
)
# Make sure that each agent assignes consecutive ranks to workes
# The first argument is the global_rank and the second argument
# is role_rank
for ranks_lst in grouped_ranks.values():
self.verify_ranks_sequential(ranks_lst, 0)
self.verify_ranks_sequential(ranks_lst, 1)
def verify_ranks_sequential(self, ranks_pairs, rank_idx):
ranks = sorted(rank_pair[rank_idx] for rank_pair in ranks_pairs)
start_rank, end_rank = ranks[0], ranks[-1]
self.assertEqual(list(range(start_rank, end_rank + 1)), ranks)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_distributed_sum_heterogenous(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 4
run_id = str(uuid.uuid4().int)
procs = []
default_args = (run_id, host, port, nnodes, nnodes, _distributed_sum, (0,))
for ind in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent, args=(*default_args, ind + 1)
)
procs.append(p)
p.start()
# run one on the main process for debugging
_run_agent(*default_args, 8)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_sad_function(self):
spec = self._get_worker_spec(fn=_sad_function, max_restarts=2)
agent = LocalElasticAgent(spec, start_method="fork")
with self.assertRaises(WorkerGroupFailureException) as cm:
agent.run()
excs = cm.exception.get_worker_exceptions()
for i in range(spec.local_world_size):
self.assertTrue(isinstance(excs[i], Exception))
self.assertEqual(WorkerState.FAILED, agent.get_worker_group().state)
self.assertEqual(0, agent._remaining_restarts)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_bipolar_function(self):
spec = self._get_worker_spec(fn=_bipolar_function, max_restarts=2)
agent = LocalElasticAgent(spec, start_method="fork")
with self.assertRaises(Exception):
agent.run()
self.assertEqual(WorkerState.FAILED, agent.get_worker_group().state)
self.assertEqual(0, agent._remaining_restarts)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_check_env_function(self):
spec = self._get_worker_spec(fn=_check_env_function, max_restarts=2)
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_check_run_id(self):
def return_run_id():
return os.environ["TORCHELASTIC_RUN_ID"]
spec = self._get_worker_spec(fn=return_run_id, max_restarts=0)
agent = LocalElasticAgent(spec, start_method="fork")
ret = agent.run()
for i in range(spec.local_world_size):
self.assertEqual(spec.rdzv_handler.get_run_id(), ret[i])
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_get_worker_return_values(self):
spec = self._get_worker_spec(fn=_return_rank_times, args=(2,))
agent = LocalElasticAgent(spec, start_method="fork")
ret_vals = agent.run()
self.assertEqual(spec.local_world_size, len(ret_vals))
for i in range(spec.local_world_size):
self.assertEqual(i * 2, ret_vals[i])
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_double_agent_happy(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
for _ in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,)),
)
procs.append(p)
p.start()
# run one on the main process for debugging
_run_agent(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,))
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_double_agent_fault_tolerance(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
for _ in range(nnodes):
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,)),
)
procs.append(p)
p.start()
# restart odd agents
for i in range(nnodes):
if i % 2 != 0:
procs[i].kill()
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,)),
)
procs[i] = p
p.start()
for i in range(nnodes):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_double_agent_elastic(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
min_size = 1
max_size = 2
run_id = str(uuid.uuid4().int)
procs = []
for _ in range(max_size):
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, min_size, max_size, _distributed_sum, (0,)),
)
procs.append(p)
p.start()
# kill odd agents
for i in range(max_size):
if i % 2 != 0:
procs[i].kill()
for i in range(max_size):
if i % 2 == 0:
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_torch_rpc(self):
"""
Simple torch rpc example with torchelastic.
Creates two agents (to simulate two node job),
each agent runs a single worker. worker0 calls an rpc_sync on
worker1.
"""
# TODO upstream this to torch.distributed.rpc so that users do not have
# to redundantly set rank as part of name (e.g. worker0) AND also pass
# it explicitly as an argument to rpc.init_rpc
def init_rpc(name_prefix, backend):
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
rpc.init_rpc(
name=f"{name_prefix}{rank}",
backend=backend,
rank=rank,
world_size=world_size,
)
def worker_0(queue, msg):
init_rpc("worker", BackendType.PROCESS_GROUP)
ret = rpc.rpc_sync(to="worker1", func=echo, args=(msg,))
queue.put(ret)
rpc.shutdown()
def worker_1():
init_rpc("worker", BackendType.PROCESS_GROUP)
rpc.shutdown()
def run_agent(
run_id, etcd_host, etcd_port, start_method, worker_fn, worker_args=()
):
rdzv_handler = dist.rendezvous(
f"etcd://{etcd_host}:{etcd_port}/{run_id}"
f"?min_workers=2"
f"&max_workers=2"
)
spec = WorkerSpec(
role="test_trainer",
local_world_size=1,
fn=worker_fn,
args=worker_args,
rdzv_handler=rdzv_handler,
max_restarts=3,
monitor_interval=1,
)
agent = LocalElasticAgent(spec, start_method)
agent.run()
run_id = str(uuid.uuid4().int)
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
start_method = "fork"
msg = "hello world"
mp_queue = multiprocessing.get_context(start_method).Queue()
agent0 = multiprocessing.Process(
target=run_agent,
args=(run_id, host, port, start_method, worker_0, (mp_queue, msg)),
)
agent1 = multiprocessing.Process(
target=run_agent, args=(run_id, host, port, start_method, worker_1, ())
)
agent0.start()
agent1.start()
agent0.join()
agent1.join()
self.assertEqual(msg, mp_queue.get())
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_workers_drift_success(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
default_args = (run_id, host, port, nnodes, nnodes, _simulate_work)
for _ in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent,
args=(*default_args, (10,), 2, "test_trainer", {}, 30),
)
procs.append(p)
p.start()
_run_agent(*default_args, (1,), 2, "test_trainer", {}, 30)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@patch("torchelastic.utils.store.barrier")
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_workers_drift_fail(self, barrier_mock):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
default_args = (run_id, host, port, nnodes, nnodes, _simulate_work)
for _ in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent,
args=(*default_args, (60,), 2, "test_trainer", {}, 10),
)
procs.append(p)
p.start()
_run_agent(*default_args, (1,), 2, "test_trainer", {}, 10)
barrier_mock.assert_called_once()
@patch("torchelastic.utils.store.barrier")
def test_barrier_failed(self, barrier_mock):
barrier_mock.side_effect = RuntimeError("test error")
spec = self._get_worker_spec(fn=_happy_function)
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
barrier_mock.assert_called_once()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
a5c4df71ef52999cba5ee63c82c678407d13a844
|
3bb74bbfca7ca25eb82358181cf94d456811fdbd
|
/HW-04/main.py
|
4acd1a7618c182c57c3b34fb1adc79cda73df475
|
[] |
no_license
|
WNick-main/GB_Python
|
3b4a67ea55cfe69fb672a7d3417883e48dda3b06
|
5f6f80e108e69ac998196e3d390971c4e15caebe
|
refs/heads/main
| 2023-07-28T03:39:37.714168
| 2021-09-12T17:26:52
| 2021-09-12T17:26:52
| 398,079,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,556
|
py
|
"""
1. Реализовать скрипт, в котором должна быть предусмотрена функция расчета заработной платы сотрудника.
В расчете необходимо использовать формулу: (выработка в часах*ставка в час) + премия.
Для выполнения расчета для конкретных значений необходимо запускать скрипт с параметрами.
"""
import sys
import ex1_lib
try:
file, hours, rate, bonus = sys.argv
except ValueError:
print("Invalid args")
exit()
print('--- Ex 1 ---')
print(ex1_lib.calculate2(int(hours), int(rate), int(bonus)))
assert ex1_lib.calculate(10) == 8.7, "ex1_lib.calculate(10)"
assert ex1_lib.calculate2(8, 1000, 2000) == 10000, "ex1_lib.calculate2(8,1000,2000)"
# --------------------------------------------------------------------------
"""
2. Представлен список чисел. Необходимо вывести элементы исходного списка, значения которых больше предыдущего элемента.
Подсказка: элементы, удовлетворяющие условию, оформить в виде списка. Для формирования списка использовать генератор.
Пример исходного списка: [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55].
Результат: [12, 44, 4, 10, 78, 123].
"""
import random
print('--- Ex 2 ---')
f_list = list(random.randint(1, 300) for x in range(0, 15))
ed_list = [x for c, x in enumerate(f_list) if c > 0 and x > f_list[c - 1]]
print(f_list)
print(ed_list)
# --------------------------------------------------------------------------
"""
3. Для чисел в пределах от 20 до 240 найти числа, кратные 20 или 21. Необходимо решить задание в одну строку.
Подсказка: использовать функцию range() и генератор.
"""
print('--- Ex 3 ---')
f_list = list(x for x in range(20, 241) if x % 20 == 0 or x % 21 == 0)
print(f_list)
# --------------------------------------------------------------------------
"""
4. Представлен список чисел. Определить элементы списка, не имеющие повторений.
Сформировать итоговый массив чисел, соответствующих требованию.
Элементы вывести в порядке их следования в исходном списке.
Для выполнения задания обязательно использовать генератор.
Пример исходного списка: [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11].
Результат: [23, 1, 3, 10, 4, 11]
"""
print('--- Ex 4 ---')
f_list = list(random.randint(1, 20) for x in range(0, 15))
ed_list = [x for x in f_list if f_list.count(x) == 1]
print(f_list)
print(ed_list)
# --------------------------------------------------------------------------
"""
5. Реализовать формирование списка, используя функцию range() и возможности генератора.
В список должны войти четные числа от 100 до 1000 (включая границы).
Необходимо получить результат вычисления произведения всех элементов списка.
Подсказка: использовать функцию reduce().
"""
from functools import reduce
print('--- Ex 5 ---')
f_list = list(x for x in range(100, 1001) if x % 2 == 0)
total_amount = reduce(lambda total, amount: total * amount, f_list)
print(f_list)
print(total_amount)
# --------------------------------------------------------------------------
"""
6. Реализовать два небольших скрипта:
а) итератор, генерирующий целые числа, начиная с указанного,
б) итератор, повторяющий элементы некоторого списка, определенного заранее.
Подсказка: использовать функцию count() и cycle() модуля itertools.
Обратите внимание, что создаваемый цикл не должен быть бесконечным. Необходимо предусмотреть условие его завершения.
Например, в первом задании выводим целые числа, начиная с 3, а при достижении числа 10 завершаем цикл.
Во втором также необходимо предусмотреть условие, при котором повторение элементов списка будет прекращено.
"""
from itertools import count, cycle
print('--- Ex 6 ---')
def f_gen(start):
for num in count(start, 1):
yield num
for i, num in enumerate(f_gen(5)):
print(num)
if i > 3:
break
print('--- ---')
# --------------------------------------------------------------------------
def s_gen(list):
for item in cycle(list):
yield item
my_list = ['1st', '2nd', '3rd']
for i, item in enumerate(s_gen(my_list)):
print(item)
if i > 3:
break
# --------------------------------------------------------------------------
"""
7. Реализовать генератор с помощью функции с ключевым словом yield, создающим очередное значение.
При вызове функции должен создаваться объект-генератор. Функция должна вызываться следующим образом: for el in fact(n).
Функция отвечает за получение факториала числа, а в цикле необходимо выводить только первые n чисел, начиная с 1! и до n!.
Подсказка: факториал числа n — произведение чисел от 1 до n. Например, факториал четырёх 4! = 1 * 2 * 3 * 4 = 24.
"""
print('--- Ex 7 ---')
def fact(end):
for item in range(1, end+1):
yield reduce(lambda total, amount: total * amount, range(1, item+1))
n = 4
for el in fact(n):
print(el)
|
[
"wnick@bk.ru"
] |
wnick@bk.ru
|
058213adb8930b755a1e1a227d1503946eb6a378
|
ea064463fd3e7f341a8d1a902db85b859a184838
|
/pySAM/tests/test_utils.py
|
113918f9557da77882e0400b2e0a50f87718797b
|
[] |
no_license
|
sososquirrel/SAM_project
|
66660c83831ff95465393d13181369412e91cda7
|
6fa411e2a8e1a6966df44c6ed2a30e0d919887ed
|
refs/heads/main
| 2023-04-12T16:10:43.590668
| 2022-10-19T13:41:50
| 2022-10-19T13:41:50
| 322,825,960
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
import time
import numpy as np
import pySAM
import pytest
from pySAM.utils import make_parallel
def test_make_parallel():
def yoo(a, b):
for i in range(1000000):
a + b
return 1
list_iterate = np.array(
[1, 3, 5, 8, 10, 3, 5, 4, 12, 23, 45, 56, 76, 89, 98, 89, 87, 65, 45, 34]
)
constant_params = 5
parallel_yoo = make_parallel(yoo, nprocesses=8)
a = time.time()
parallel_yoo(list_iterate, constant_params)
print("TIME WITH PARALLEL ", time.time() - a)
a = time.time()
for i in list_iterate:
yoo(i, 7)
print("TIME WITHOUT PARALLEL", time.time() - a)
|
[
"sophie.abramian@gmail.com"
] |
sophie.abramian@gmail.com
|
a626adc7102a1a6b908acacffa6e1e0b37933a75
|
8e75c045b657680591b787bf9a1506dd3aed2415
|
/address_plate.py
|
8fd3b10fe9450a242099fca07b72db71b257c271
|
[] |
no_license
|
SergeyFromOdessa/address_plate_modern
|
87c97bc3431700ced539ece56149264c89a8a61d
|
08ccaa98050b247fe38fae9e7b167ca6a388debc
|
refs/heads/master
| 2022-11-10T22:17:44.826529
| 2020-07-06T08:38:09
| 2020-07-06T08:38:09
| 277,486,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,542
|
py
|
from argparse import ArgumentParser
import io
import pickle
import re
from reportlab.pdfgen import canvas
from reportlab.lib.colors import PCMYKColor
import sys
import textwrap
def pt(mm: float) -> float:
""" mm to pt
"""
return mm*2.834645669 # 72/25.4
def _load_path():
with open('paths' + '.pkl', 'rb') as f:
return pickle.load(f)
THIN = 'thin'
WIDE = 'wide'
COLOR_WHITE = PCMYKColor(0, 0, 0, 0)
COLOR_DARK_BLUE = PCMYKColor(75, 65, 0, 75)
# пока будет так, если цвета не подойдут поменяю
HOUSE_NUMBER_RE_TUPLE = (
re.compile(r'^(?P<lvl1>[1-9]\d*(-[1-9]\d*)?)$'),
re.compile(r'^(?P<lvl1>[1-9]\d*(-[1-9]\d*)?)(?P<lvl2c>[А-Я]+)$'),
re.compile(r'^(?P<lvl1>[1-9]\d*(-[1-9]\d*)?)(?P<lvl2_slash>/)(?P<lvl2s>[1-9]\d*)(?P<lvl3>[А-Я]*)$'),
re.compile(r'^(?P<lvl1>[1-9]\d*(-[1-9]\d*)?)(?P<lvl2c> к[1-9]\d*)$'),
)
HOUSE_NUMBER_ARROW_RE_TUPLE = (
re.compile(r'(?P<lvl_a1>^[1-9]\d*(-[1-9]\d*)?)(?P<lvl_a2c>[А-Я]+)?$'),
)
LVL1 = 'lvl1' # (основной)
SLASH = 'lvl2_slash'
LVL2C = 'lvl2c' # (литера)
LVL2S = 'lvl2s' # (номер после дроби)
LVL3 = 'lvl3' # (Літера номеру після дробу)
LVL_A1 = 'lvl_a1'
LVL_A2C = 'lvl_a2c'
SIZES_PT = {
'thin_round_radius': pt(15.0),
'wide_round_radius': pt(22.5),
'thin_margin': pt(40),
'thin_height': pt(215),
'wide_margin': pt(60),
'wide_height': pt(320),
'thin_street_type_font': {'face': 'regular', 'size': 90.0},
'thin_street_type_bl': pt(215-173),
'thin_street_name_font': {'face': 'semi-bold', 'size': 220.0},
'thin_street_name_bl': pt(215-94),
'thin_street_line_width': 4.0,
'thin_street_line_bl': pt(215-62),
'thin_street_translit_font': {'face': 'regular', 'size': 90.0},
'thin_street_translit_bl': pt(215-24),
'wide_street_type_font': {'face': 'regular', 'size': 135.0},
'wide_street_type_bl': pt(320-260),
'wide_street_name_font': {'face': 'semi-bold', 'size': 330.0},
'wide_street_name_bl': pt(320-140),
'wide_street_line_width': 6.0,
'wide_street_line_bl': pt(320-92),
'wide_street_translit_font': {'face': 'regular', 'size': 135.0},
'wide_street_translit_bl': pt(320-36),
'thin_house_number_width': (pt(215), pt(280), pt(380), pt(440), pt(520)),
'thin_house_number_bl': pt(215-50),
'thin_house_number_font_lvl1': {'face': 'semi-bold', 'size': 480.0},
'thin_house_number_font_lvl2c': {'face': 'bold', 'size': 300.0},
'thin_house_number_font_lvl2s': {'face': 'semi-bold', 'size': 300.0},
'thin_house_number_font_lvl3': {'face': 'semi-bold', 'size': 220.0},
'thin_house_number_slash_size': {'face': 'slash', 'size': 80},
# 'thin_house_number_slash_size': (pt(12), 75, pt(80), pt(6)),
'wide_house_number_width': (pt(320), pt(420), pt(565), pt(640), pt(720)),
'wide_house_number_bl': pt(320-75),
'wide_house_number_font_lvl1': {'face': 'semi-bold', 'size': 720.0},
'wide_house_number_font_lvl2c': {'face': 'bold', 'size': 450.0},
'wide_house_number_font_lvl2s': {'face': 'bold', 'size': 450.0},
'wide_house_number_font_lvl3': {'face': 'bold', 'size': 330.0},
'wide_house_number_slash_size': {'face': 'slash', 'size': 125},
# 'wide_house_number_slash_size': (pt(20), 75, pt(125), pt(9)),
'thin_house_number_arrow_width': (pt(215), pt(215), pt(280), pt(340), pt(440)),
'thin_house_number_arrow_bl': pt(215 - 90),
'thin_house_number_arrow_font_lvl1': {'face': 'semi-bold', 'size': 380.0},
'thin_house_number_arrow_font_lvl2c': {'face': 'bold', 'size': 240.0},
'thin_house_number_arrow_font_lvl2s': {'face': 'bold', 'size': 240.0},
'thin_house_number_arrow_font_lvl3': {'face': 'semi-bold', 'size': 140.0},
'thin_house_number_arrow_slash_size': {'face': 'slash', 'size': 65},
# 'thin_house_number_arrow_slash_size': (pt(10), 75, pt(65), pt(5)),
'thin_house_number_arrow_arrow_bl': pt(215-62),
'thin_house_number_arrow_arrow_size': {'line_width': 4, 'length': pt(9.8),
'half_height': pt(8.5/2), 'half_space': pt(15/2)},
'thin_house_number_arrow_number_bl': pt(215-24),
'thin_house_number_arrow_number_font_lvl_a1': {'face': 'regular', 'size': 90.0},
'thin_house_number_arrow_number_font_lvl_a2c': {'face': 'semi-bold', 'size': 50.0},
'wide_house_number_arrow_width': (pt(320), pt(320), pt(420), pt(510), pt(660)),
'wide_house_number_arrow_bl': pt(320 - 135),
'wide_house_number_arrow_font_lvl1': {'face': 'semi-bold', 'size': 570.0},
'wide_house_number_arrow_font_lvl2c': {'face': 'bold', 'size': 330.0},
'wide_house_number_arrow_font_lvl2s': {'face': 'bold', 'size': 330.0},
'wide_house_number_arrow_font_lvl3': {'face': 'bold', 'size': 210.0},
'wide_house_number_arrow_slash_size': {'face': 'slash', 'size': 98},
# 'wide_house_number_arrow_slash_size': (pt(12), 75, pt(98), pt(7.5)),
'wide_house_number_arrow_arrow_bl': pt(320-94),
'wide_house_number_arrow_arrow_size': {'line_width': 6, 'length': pt(18.8),
'half_height': pt(14.8/2), 'half_space': pt(22.5/2)},
'wide_house_number_arrow_number_bl': pt(320-37),
'wide_house_number_arrow_number_font_lvl_a1': {'face': 'regular', 'size': 135.0},
'wide_house_number_arrow_number_font_lvl_a2c': {'face': 'semi-bold', 'size': 75.0},
'thin_vertical_width': pt(360),
'thin_vertical_height': pt(480),
'thin_vertical_margin': pt(36),
'thin_vertical_street_type_font': {'face': 'regular', 'size': 65.0},
'thin_vertical_street_type_bl': pt(36.0) + 32.625,
'thin_vertical_street_name_font': {'face': 'semi-bold', 'size': 110.0, 'leading': 120.0},
'thin_vertical_street_name_translate': pt(18) + 77.984375,
'thin_vertical_street_line_width': pt(2.0),
'thin_vertical_street_line_translate': pt(24.0),
'thin_vertical_street_name_max_char': 15,
'thin_vertical_street_translit_font': {'face': 'regular', 'size': 65.0, 'leading': 78.0},
'thin_vertical_street_translit_translate': pt(24) + 32.625,
'thin_vertical_street_translit_max_char': 30,
'thin_vertical_house_number_bl': pt(480 - 48),
'thin_vertical_house_number_font_lvl1': {'face': 'semi-bold', 'size': 540.0},
'thin_vertical_house_number_font_lvl2c': {'face': 'bold', 'size': 312.0},
'thin_vertical_house_number_font_lvl2s': {'face': 'bold', 'size': 312.0},
'thin_vertical_house_number_font_lvl3': {'face': 'bold', 'size': 220.0},
'thin_vertical_house_number_slash_size': {'face': 'slash', 'size': 80},
# 'thin_vertical_house_number_slash_size': (pt(12), 75, pt(80), pt(6)),
'wide_vertical_width': pt(540),
'wide_vertical_height': pt(720),
'wide_vertical_margin': pt(54),
'wide_vertical_street_type_font': {'face': 'regular', 'size': 100.0},
'wide_vertical_street_type_bl': pt(54.0) + 50.203125,
'wide_vertical_street_name_font': {'face': 'semi-bold', 'size': 165.0, 'leading': 180.0},
'wide_vertical_street_name_translate': pt(18) + 116.984375,
'wide_vertical_street_name_max_char': 15,
'wide_vertical_street_line_width': pt(3.0),
'wide_vertical_street_line_translate': pt(36.0),
'wide_vertical_street_translit_font': {'face': 'regular', 'size': 100.0, 'leading': 120.0},
'wide_vertical_street_translit_translate': pt(57.0),
'wide_vertical_street_translit_max_char': 30,
'wide_vertical_house_number_bl': pt(720 - 72),
'wide_vertical_house_number_font_lvl1': {'face': 'semi-bold', 'size': 810.0},
'wide_vertical_house_number_font_lvl2c': {'face': 'bold', 'size': 470.0},
'wide_vertical_house_number_font_lvl2s': {'face': 'bold', 'size': 470.0},
'wide_vertical_house_number_font_lvl3': {'face': 'bold', 'size': 330.0},
'wide_vertical_house_number_slash_size': {'face': 'slash', 'size': 80},
# 'wide_vertical_house_number_slash_size': (pt(12), 75, pt(80), pt(6)),
}
def main():
parser = ArgumentParser()
parser.add_argument('--wide', help='Wide street', action='store_true')
sub_parser = parser.add_subparsers(title='Address plate', description='Address plate description')
street_name_parser = sub_parser.add_parser('name', help='Street name')
street_name_parser.add_argument('--street_type', help='Street type', type=str, required=True)
street_name_parser.add_argument('--street_name', help='Street name', type=str, required=True)
street_name_parser.add_argument('--street_translit', help='Street translit', type=str, required=True)
street_name_parser.set_defaults(func=StreetName)
street_number_parser = sub_parser.add_parser('number', help='House number')
street_number_parser.add_argument('--house_num', help='House number', type=str, required=True)
street_number_parser.add_argument('--left_num', help='Left arrow', type=str)
street_number_parser.add_argument('--right_num', help='Right arrow', type=str)
street_number_parser.set_defaults(func=StreetNumber)
vertical_parser = sub_parser.add_parser('vertical', help='Vertical')
vertical_parser.add_argument('--street_type', help='Street type', type=str, required=True)
vertical_parser.add_argument('--street_name', help='Street name', type=str, required=True)
vertical_parser.add_argument('--street_translit', help='Street translit', type=str, required=True)
vertical_parser.add_argument('--house_num', help='House number', type=str, required=True)
vertical_parser.set_defaults(func=Vertical)
args = parser.parse_args()
func_args = dict(vars(args))
func_args['wide'] = WIDE if args.wide else THIN
del func_args['func']
plate = args.func(**func_args)
pdf = plate.pdf()
if True:
sys.stdout = sys.stdout.detach()
sys.stdout.write(pdf.read())
else:
with open('test.pdf', 'wb') as out:
out.write(pdf.read())
class BasePlate:
def __init__(self):
self.margin = self.width = self.height = self.radius = 0
self.canvas = None
self._init_margin()
self._init_width()
self._init_height()
self._init_radius()
self.width_without_margin = self.width - self.margin * 2
def _init_margin(self):
""" must be override in children class
"""
pass
def _init_width(self):
""" must be override in children class
"""
pass
def _init_height(self):
""" must be override in children class
"""
pass
def _init_radius(self):
""" must be override in children class
"""
pass
def _draw_face(self):
""" must be override in children class
"""
pass
def _draw_background(self):
self.canvas.roundRect(0, 0, self.width, self.height, self.radius, stroke=0, fill=1)
@staticmethod
def parse_house_number(house_num_str, regex_tuple):
for r in regex_tuple:
match_res = r.match(house_num_str)
if match_res:
return match_res.groupdict()
return None
def pdf(self):
pdf = io.BytesIO()
self.canvas = canvas.Canvas(pdf, (self.width, self.height), bottomup=0)
self.canvas.setFillColor(COLOR_DARK_BLUE)
self._draw_background()
self.canvas.setFillColor(COLOR_WHITE)
self.canvas.setStrokeColor(COLOR_WHITE)
self.canvas.translate(self.margin, 0)
self._draw_face()
self.canvas.showPage()
self.canvas.save()
self.canvas = None
pdf.seek(0)
return pdf
class StreetName(BasePlate):
def __init__(self, street_type: str, street_name: str, street_translit: str, wide: str = THIN):
self.street_type_text_path = TextPaths(text=street_type, font=SIZES_PT[f'{wide}_street_type_font'])
self.street_name_text_path = TextPaths(text=street_name, font=SIZES_PT[f'{wide}_street_name_font'])
self.street_translit_text_path = TextPaths(text=street_translit, font=SIZES_PT[f'{wide}_street_translit_font'])
self.wide = wide
super().__init__()
def _init_margin(self):
self.margin = SIZES_PT[f'{self.wide}_margin']
def _init_width(self):
self.width = ((max([text_path.get_path_extents()[2] for text_path in [
self.street_translit_text_path, self.street_name_text_path, self.street_translit_text_path
]])+self.margin*0.7)//self.margin+2)*self.margin
def _init_height(self):
self.height = SIZES_PT[f'{self.wide}_height']
def _init_radius(self):
self.radius = SIZES_PT[f'{self.wide}_round_radius']
def _draw_face(self):
self._draw_street_type()
self._draw_street_name()
self._draw_line()
self._draw_street_translit()
def _draw_line(self):
self.canvas.saveState()
self.canvas.setLineWidth(SIZES_PT[f'{self.wide}_street_line_width'])
self.canvas.line(0, SIZES_PT[f'{self.wide}_street_line_bl'],
self.width - SIZES_PT[f'{self.wide}_margin'] * 2, SIZES_PT[f'{self.wide}_street_line_bl'])
self.canvas.restoreState()
def _draw_street_type(self):
self.canvas.saveState()
self.canvas.translate(0, SIZES_PT[f'{self.wide}_street_type_bl'])
self.street_type_text_path.draw(self.canvas)
self.canvas.restoreState()
def _draw_street_name(self):
self.canvas.saveState()
self.canvas.translate(0, SIZES_PT[f'{self.wide}_street_name_bl'])
self.street_name_text_path.draw(self.canvas)
self.canvas.restoreState()
def _draw_street_translit(self):
self.canvas.saveState()
self.canvas.translate(0, SIZES_PT[f'{self.wide}_street_translit_bl'])
self.street_translit_text_path.draw(self.canvas)
self.canvas.restoreState()
class StreetNumber(BasePlate):
def __init__(self, house_num: str, left_num: str = None, right_num: str = None, wide: str = THIN):
self.house_num = house_num
self.left_num = left_num
self.right_num = right_num
self.wide = wide
self.arrow = '_arrow' if self.left_num or self.right_num else ''
super().__init__()
def _init_margin(self):
self.margin = SIZES_PT[f'{self.wide}_margin']
def _init_width(self):
self.width = SIZES_PT[f'{self.wide}_house_number{self.arrow}_width'][min(len(self.house_num) - 1, 4)]
def _init_height(self):
self.height = SIZES_PT[f'{self.wide}_height']
def _init_radius(self):
self.radius = SIZES_PT[f'{self.wide}_round_radius']
def _draw_face(self):
self._draw_number()
if self.left_num or self.right_num:
self._draw_arrows()
if self.left_num:
self._draw_left_num()
if self.right_num:
self._draw_right_num()
def _draw_number(self):
house_number_dict = self.parse_house_number(self.house_num, HOUSE_NUMBER_RE_TUPLE)
text_paths = {}
house_number_width = 0
for key in sorted(house_number_dict.keys()):
font = SIZES_PT[f'{self.wide}_house_number{self.arrow}_font_{key}'] if key != SLASH else \
SIZES_PT[f'{self.wide}_house_number_slash_size']
text_paths[key] = TextPaths(house_number_dict[key], font)
house_number_width += text_paths[key].get_current_point()[0]
width = self.width_without_margin
translate_x = (width - house_number_width)/2
self.canvas.saveState()
self.canvas.translate(0, SIZES_PT[f'{self.wide}_house_number{self.arrow}_bl'])
if translate_x >= 0:
self.canvas.translate(translate_x, 0)
else:
scale = width/house_number_width
# work_canvas.translate(SIZES_PT[f'{wide}_margin']*(1-scale), 0)
self.canvas.scale(scale, scale)
after_slash = False
for key in text_paths.keys():
if after_slash:
self.canvas.translate(-text_paths[key].get_path_extents()[0], 0)
after_slash = False
text_paths[key].draw(self.canvas)
self.canvas.translate(text_paths[key].get_current_point()[0], 0)
if key == SLASH:
after_slash = True
self.canvas.restoreState()
def _draw_arrow(self, x: float, y: float, k: int, length: float, half_height: float):
""" k= -1 or 1
"""
p = self.canvas.beginPath()
p.moveTo(x, y)
p.lineTo(x + k * length, y + half_height)
p.lineTo(x + k * length, y - half_height)
p.close()
self.canvas.drawPath(p, fill=1, stroke=0)
def _draw_arrows(self):
arrow_size = SIZES_PT[f'{self.wide}_house_number_arrow_arrow_size']
base_line = SIZES_PT[f'{self.wide}_house_number_arrow_arrow_bl']
width = self.width_without_margin
self.canvas.saveState()
self.canvas.setLineWidth(arrow_size['line_width'])
if self.left_num:
self._draw_arrow(0, base_line, 1, arrow_size['length'], arrow_size['half_height'])
self.canvas.line(arrow_size['length'], base_line, width/2-arrow_size['half_space'], base_line)
else:
self.canvas.line(0, base_line, width/2+arrow_size['half_space'], base_line)
if self.right_num:
self._draw_arrow(width, base_line, -1, arrow_size['length'], arrow_size['half_height'])
self.canvas.line(width/2 + arrow_size['half_space'], base_line, width - arrow_size['length'], base_line)
else:
self.canvas.line(width/2 - arrow_size['half_space'], base_line, width, base_line)
self.canvas.restoreState()
def _draw_left_num(self):
left_num_dict = self.parse_house_number(self.left_num, HOUSE_NUMBER_ARROW_RE_TUPLE)
self.canvas.saveState()
self.canvas.translate(0, SIZES_PT[f'{self.wide}_house_number_arrow_number_bl'])
lvl_a1_path = TextPaths(left_num_dict[LVL_A1], SIZES_PT[f'{self.wide}_house_number_arrow_number_font_{LVL_A1}'])
lvl_a1_path.draw(self.canvas)
if left_num_dict[LVL_A2C]:
lvl_a2c_path = TextPaths(left_num_dict[LVL_A2C],
SIZES_PT[f'{self.wide}_house_number_arrow_number_font_{LVL_A2C}'])
self.canvas.translate(lvl_a1_path.get_current_point()[0], 0)
lvl_a2c_path.draw(self.canvas)
self.canvas.restoreState()
def _draw_right_num(self):
right_num_dict = self.parse_house_number(self.right_num, HOUSE_NUMBER_ARROW_RE_TUPLE)
self.canvas.saveState()
self.canvas.translate(self.width_without_margin, SIZES_PT[f'{self.wide}_house_number_arrow_number_bl'])
if right_num_dict[LVL_A2C]:
lvl_a2c_path = TextPaths(right_num_dict[LVL_A2C],
SIZES_PT[f'{self.wide}_house_number_arrow_number_font_{LVL_A2C}'])
self.canvas.translate(-lvl_a2c_path.get_current_point()[0], 0)
lvl_a2c_path.draw(self.canvas)
lvl_a1_path = TextPaths(right_num_dict[LVL_A1],
SIZES_PT[f'{self.wide}_house_number_arrow_number_font_{LVL_A1}'])
self.canvas.translate(-lvl_a1_path.get_current_point()[0], 0)
lvl_a1_path.draw(self.canvas)
self.canvas.restoreState()
class Vertical(BasePlate):
def __init__(self, street_type: str, street_name: str, street_translit: str, house_num: str, wide: str = THIN):
self.street_type = street_type
self.street_name = street_name
self.street_translit = street_translit
self.house_num = house_num
self.wide = wide
super().__init__()
def _init_margin(self):
self.margin = SIZES_PT[f'{self.wide}_vertical_margin']
def _init_width(self):
self.width = SIZES_PT[f'{self.wide}_vertical_width']
def _init_height(self):
self.height = SIZES_PT[f'{self.wide}_vertical_height']
def _init_radius(self):
self.radius = SIZES_PT[f'{self.wide}_round_radius']
def _draw_face(self):
self._draw_street_type()
self._draw_street_name()
self._draw_line()
self._draw_translit()
self._draw_number()
def _draw_street_type(self):
self.canvas.saveState()
self.canvas.translate(0, SIZES_PT[f'{self.wide}_vertical_street_type_bl'])
street_type_text_path = TextPaths(text=self.street_type,
font=SIZES_PT[f'{self.wide}_vertical_street_type_font'])
street_type_text_path.draw(self.canvas)
def _draw_street_name(self):
street_name_text_path = TextPaths(text=self.street_name,
font=SIZES_PT[f'{self.wide}_vertical_street_name_font'])
self.canvas.translate(0, SIZES_PT[f'{self.wide}_vertical_street_name_translate'])
if street_name_text_path.get_path_extents()[2] < self.width_without_margin:
street_name_text_path.draw(self.canvas)
else:
str_list = textwrap.wrap(self.street_name, width=SIZES_PT[f'{self.wide}_vertical_street_name_max_char'],
break_long_words=False)
str_path_list = [TextPaths(text=s,
font=SIZES_PT[f'{self.wide}_vertical_street_name_font']) for s in str_list]
scale = min(1, self.width_without_margin / max([path.get_path_extents()[2] for path in str_path_list]))
self.canvas.scale(scale, scale)
for path in str_path_list:
path.draw(self.canvas)
if path != str_path_list[-1]:
self.canvas.translate(0, SIZES_PT[f'{self.wide}_vertical_street_name_font']['leading'])
self.canvas.scale(1, 1)
def _draw_line(self):
self.canvas.translate(0, SIZES_PT[f'{self.wide}_vertical_street_line_translate'])
self.canvas.setLineWidth(SIZES_PT[f'{self.wide}_vertical_street_line_width'])
self.canvas.line(0, 0, self.width_without_margin, 0)
def _draw_translit(self):
street_translit_text_path = TextPaths(text=self.street_translit,
font=SIZES_PT[f'{self.wide}_vertical_street_translit_font'])
self.canvas.translate(0, SIZES_PT[f'{self.wide}_vertical_street_translit_translate'])
if street_translit_text_path.get_path_extents()[2] < self.width_without_margin:
street_translit_text_path.draw(self.canvas)
else:
str_list = textwrap.wrap(self.street_translit,
width=SIZES_PT[f'{self.wide}_vertical_street_translit_max_char'],
break_long_words=False)
str_path_list = [TextPaths(text=s,
font=SIZES_PT[f'{self.wide}_vertical_street_translit_font']) for s in str_list]
scale = min(1, self.width_without_margin / max([path.get_path_extents()[2] for path in str_path_list]))
self.canvas.scale(scale, scale)
for path in str_path_list:
path.draw(self.canvas)
if path != str_path_list[-1]:
self.canvas.translate(0, SIZES_PT[f'{self.wide}_vertical_street_translit_font']['leading'])
self.canvas.scale(1, 1)
self.canvas.restoreState()
def _draw_number(self):
house_number_dict = self.parse_house_number(self.house_num, HOUSE_NUMBER_RE_TUPLE)
text_paths = {}
house_number_width = 0
for key in sorted(house_number_dict.keys()):
font = SIZES_PT[f'{self.wide}_vertical_house_number_font_{key}'] if key != SLASH else \
SIZES_PT[f'{self.wide}_vertical_house_number_slash_size']
text_paths[key] = TextPaths(house_number_dict[key], font)
house_number_width += text_paths[key].get_current_point()[0]
self.canvas.saveState()
self.canvas.translate(0, SIZES_PT[f'{self.wide}_vertical_house_number_bl'])
if house_number_width > self.width_without_margin:
scale = self.width_without_margin / house_number_width
self.canvas.scale(scale, scale)
after_slash = False
for key in sorted(house_number_dict.keys()):
if after_slash:
self.canvas.translate(-text_paths[key].get_path_extents()[0], 0)
after_slash = False
text_paths[key].draw(self.canvas)
self.canvas.translate(text_paths[key].get_current_point()[0], 0)
if key == SLASH:
after_slash = True
self.canvas.restoreState()
class TextPaths:
"""
{
'face_size_char': ([(operation_type, (points))], (current_point), (path_extents))
'slash_size_/': ([(operation_type, (points))], (current_point), (path_extents))
}
"""
path_dict = _load_path()
def __init__(self, text: str, font: dict):
self.text = text
self.face = font['face']
self.size = font['size']
self.operations = []
self.current_point = (0, 0)
self.path_extents = (0, 0, 0, 0)
self._init_path()
def _init_path(self):
for char in self.text:
self._append_char(char)
def _append_char(self, char: str):
char_operations, char_current_point, char_path_extends = self.path_dict[f"{self.face}_{self.size}_{char}"]
self._appends_operations(char_operations)
self._calc_path_extends(char_path_extends)
self._calc_current_point(char_current_point)
def _appends_operations(self, char_operations):
for type_op, points in char_operations:
self.operations.append((type_op, self._sum_points(self.current_point, points)))
def _calc_current_point(self, char_current_point):
self.current_point = self._sum_points(self.current_point, char_current_point)
def _calc_path_extends(self, char_path_extends):
path_extends = self._sum_points(self.current_point, char_path_extends)
# may be change min-max ?
self.path_extents = (
min(self.path_extents[0], path_extends[0]),
min(self.path_extents[1], path_extends[1]),
max(self.path_extents[2], path_extends[2]),
max(self.path_extents[3], path_extends[3]),
)
@staticmethod
def _sum_points(point: tuple, points: tuple) -> tuple:
"""
point (x, y)
points (x1, y1, ..., xn, yn)
"""
return tuple(x + y for x, y in zip(point*(len(points)//2), points))
def draw(self, work_canvas: canvas.Canvas):
p = work_canvas.beginPath()
for type_op, points in self.operations:
if type_op == 'moveTo':
p.moveTo(*points)
elif type_op == 'lineTo':
p.lineTo(*points)
elif type_op == 'curveTo':
p.curveTo(*points)
elif type_op == 'close':
p.close()
work_canvas.drawPath(p, fill=1, stroke=0)
def get_path_extents(self):
"""
x1: left of the resulting extents
y1: top of the resulting extents
x2: right of the resulting extents
y2: bottom of the resulting extents
:return: (x1, y1, x2, y2), all float
"""
return self.path_extents
def get_current_point(self):
"""
:return: (x, y), both float
"""
return self.current_point
if __name__ == '__main__':
main()
|
[
"sergey@example.net"
] |
sergey@example.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.