blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
77ac69ff892baccedaf8082796e6192fe6a242dd
|
0a5f0ab9d8962d0a1c0e0f585baec7a3ab8321d1
|
/basic/forms.py
|
e42c34d2a248b38958dd429206634d76aa68f315
|
[] |
no_license
|
saap1tech/courses_web
|
d98405535c336d84296aa96e81095b10959a0b75
|
0aa4ae97a7a5ac62e82dc395e3bb7401c4af7a44
|
refs/heads/master
| 2023-06-02T10:46:02.653514
| 2021-06-14T12:19:24
| 2021-06-14T12:19:24
| 350,124,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
from django import forms
from . import models
class AddVideo(forms.ModelForm):
class Meta:
model = models.Videos
fields = ['video']
def __init__(self, *args, **kwargs):
super(AddVideo, self).__init__(*args, **kwargs)
self.fields['video'].widget.attrs.update({
'id': 'video'
})
|
[
"saap1426@gmail.com"
] |
saap1426@gmail.com
|
548053fb510f44628c5bba5b2b7d3b962e5a86e1
|
b0b87924d07101e25fa56754ceaa2f22edc10208
|
/workspace/python_study/python_gspark/15-2.py
|
88ec8fdb1e8e43f9901bf9017a64fa128a312bad
|
[] |
no_license
|
SoheeKwak/Python
|
2295dd03e5f235315d07355cbe72998f8b86c147
|
e1a5f0ecf31e926f2320c5df0e3416306b8ce316
|
refs/heads/master
| 2020-04-02T13:49:58.367361
| 2018-11-23T09:33:23
| 2018-11-23T09:33:23
| 154,499,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,204
|
py
|
import numpy as np
a1 = np.arange(24)
a2 = np.arange(24).reshape((4,6))
a3 = np.arange(24).reshape((2,4,3))
a1[5]=1000
a2[0,1]=1000
a3[1,0,1]=1000 #2번째 행, 1번째열, 2번째depth
print(a1)
print(a2)
print(a2[1:3,1:5])
print(a2[1:-1,1:-1])
print(a2[:,1:3])
a2[:,1:3]=99
print(a2)
a1 = np.arange(1,25).reshape(4,6)
even_a = a1%2==0
print(a1[even_a])
print("="*50)
import pandas as pd
rain = pd.read_csv("seattle.csv")
print(rain)
print("="*50)
rain_r = rain['PRCP']
print(rain_r)
print(type(rain_r)) #<class 'pandas.core.series.Series'>
print("="*50)
rain_r = rain['PRCP'].values
print(rain_r)
print(type(rain_r)) #<class 'numpy.ndarray'>
print("데이터 크기:",len(rain_r))
days_a = np.arange(0,365)
con_jan = days_a < 31 #True:31개 False:334개
print(con_jan[:40]) #1월1일부터 40일간의 강수량 데이터
print("="*50)
print(con_jan) #1월 한달간(31일간) 강수량 데이터
print(np.sum(rain_r[con_jan]))#1월달 강수량의 총합
print(np.mean(rain_r[con_jan])) #1월달 평균 강수량
a = np.arange(1,25).reshape((4,6))
# 팬시 인덱싱: 배열에 인덱스 배열을 전달해서 데이터를 참조
print(a)
print(a[0,0],a[1,1],a[2,2],a[3,3])
print(a[[0,1,2,3],[0,1,2,3]])
print(a[:,[1,2]])#대괄호 안에 콜론없이 지정되면 범위가 아닌, 그 해당 열만 출력
print(a[:,[1,3]])
print("="*50)
#ravel(배열을 1차원으로)
a = np.random.randint(1,10,(2,3))
print(a)
print(a.ravel())
#resize:배열크기 변경(요소 수 변경), reshape:배열변경(요소 수 변경X)
print(a.shape)
a.resize((2,2))
print(a)
print("="*50)
a = np.random.randint(1,10,(2,6))
print(a)
a.resize((2,10)) #사이즈가 커지면 늘어난 요소만큼 채워지고 0으로 초기화
print(a)
a.resize((3,3)) # 사이즈가 줄어들면 순서대로 요소가 들어가고 나머지 삭제됨
print(a)
print("="*50)
a = np.arange(1,10).reshape(3,3)
b = np.arange(10,19).reshape(3,3)
res = np.append(a,b)
print(res) #1차원으로 출력
print(a)
print(b)
print("="*50)
res = np.append(a,b, axis=0) #행방향 2차원 배열
print(res)
print("="*50)
a = np.arange(1,10).reshape(3,3)
res = np.arange(10,20).reshape(2,5)
b = np.arange(10,19).reshape(3,3)
# np.append(a,res,axis=0) #기준축과 Shape다르면 append 오류 발생
# print(res)
print(a)
res = np.append(a,b,axis=1) #열방향, 2차원 배열
print(res)
print(b)
res = np.append(a,b,axis=0) #행방향, 2차원 배열
print(res)
# x = np.arange(10,20).reshape(2,5)
# np.append(res,x,axis=1) #shape이 다르므로 오류
a = np.arange(1,10).reshape(3,3)
print(a)
a = np.insert(a,3,99) #1차원, 99를 3번째 자리에 넣어라
print(a)
a = np.arange(1,10).reshape(3,3)
a = np.insert(a,2,99, axis=0) #행을 따라 2번째 줄에 99를 추가로 넣어라
print(a)
a = np.arange(1,10).reshape(3,3)
a = np.insert(a,1,99, axis=1) #열을 따라 2번째 줄에 99를 추가로 넣어라
print(a)
print("="*50)
a = np.arange(1,10).reshape(3,3)
print(a)
print(np.delete(a,3)) #1차원, 3번째 자리 요소를 지워라
#a배열의 1번 인덱스 행 제거한 후 출력
print(np.delete(a,1,axis=0))
#a배열의 1번 인덱스 열 제거한 후 출력
print(np.delete(a,1,axis=1))
print("="*50)
#배열 간의 결합(concatenate, vstack, hastack)
a = np.arange(1,7).reshape(2,3)
print(a)
b = np.arange(7,13).reshape(2,3)
print(b)
res = np.concatenate((a,b))
print(res)
print("="*50)
a = np.arange(1,7).reshape(2,3)
b = np.arange(7,13).reshape(2,3)
print(np.vstack((a,b)))
print(np.vstack((a,b,a,b))) #vertical 수직방향으로 붙음
print("="*50)
a = np.arange(1,7).reshape(2,3)
b = np.arange(7,13).reshape(2,3)
print(np.hstack((a,b))) #horizontal 수평방향으로 붙음
print(np.hstack((a,b,a,b,a,b)))
print("="*50)
a = np.arange(1,25).reshape(4,6)
print(a)
res = np.hsplit(a,2) #a를 두개의 그룹으로 좌우로 나눔
print(res)
res = np.hsplit(a,3)
print(res)
res = np.vsplit(a,2) #a를 두개의 그룹으로 상하로 나눔
print(res)
#
print("="*50)
x = np.array([1,2])
print(x)
print(x.dtype)
x = np.array([1.,2.])
print(x.dtype)
x = np.array([1,2],dtype=np.int64)
print(x.dtype)
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
v = np.array([9,10])
w = np.array([11,12])
#벡터의 내적
print(np.dot(v,w)) #9*11+10*12=219
print(v.dot(w))
#행렬과 벡터의 곱
print(x.dot(v)) #[1,2]*[9,10]+[3,4]*[9,10]=[29,67]
#행렬곱
print(x)
print(y)
print(np.dot(x,y)) #1*5+2*7, 1*6+2*8, 3*5+4*7, 3*6+4*8
x = np.array([[1,2],[3,4]])
print(x)
print(x.T) #transpose 대칭되는 요소끼리 묶어줌
print("="*50)
x = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
print(x)
v = np.array([1,0,1])
y = np.empty_like(x) #x와 같은 shape을 만들어 준다
print(y)
print("="*50)
for i in range(4):
y[i,:] = x[i,:]+v #[2,2,4]=[1,2,3]+[1,0,1]
print(y)
print("="*50)
x = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
v = np.array([1,0,1])
vv = np.tile(v,(4,1)) #열방향으로 v를 4번 반복
print(vv)
vv = np.tile(v,(4,2))
print(vv)
vv = np.tile(v,(4,5))
print(vv)
a = np.array([[1,2],[4,5]])
s = np.prod(a) #각각의 요소에 대해 곱셈
print(s)
s = np.prod(a,axis=0)
print(s)
s = np.prod(a,axis=1)
print(s)
s = np.max(np.prod(a,axis=1))
print(s)
|
[
"soheekwak728@gmail.com"
] |
soheekwak728@gmail.com
|
20d61a63e03337d309b237ee3953113c24a5f720
|
f14ca4895dfd375d87b98101cf0d5a61c84a6f25
|
/app/core/serializers/bank3_serializer.py
|
a2719709a35177f67017b38a97b49f474c24d34c
|
[] |
no_license
|
rozhaev/bank_parser
|
f56e1ef86c4fbb62a9f585d51cbf0e3c371a8478
|
7b2727b8b1545207f518aa52a128d388a709cd81
|
refs/heads/main
| 2023-06-16T16:16:09.505628
| 2021-07-08T05:12:49
| 2021-07-08T05:12:49
| 384,005,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
from decimal import Decimal
from typing import List
from .base_serializer import BaseSerializer
class Bank3Serializer(BaseSerializer):
header = ["date_readable", "type", "euro", "cents", "to", "from"]
@staticmethod
def _get_amount(data: List) -> Decimal:
return Decimal(data[2]) + Decimal(data[3]) / 100
@staticmethod
def _get_field_from(data: List) -> str:
return data[5]
@staticmethod
def _get_date_pattern() -> str:
return "%d %b %Y"
|
[
"dmitry.rozhaev@gmail.com"
] |
dmitry.rozhaev@gmail.com
|
5a3d6c50cb8da0cc9244a7fe21ab684b3a0ed899
|
3af9361ad31a35d13ae7be20900861a2c2e486d8
|
/For_next_page.py
|
88cc9d11e4065317017b93afe8f8c11d15636958
|
[] |
no_license
|
NikhilLamba24/Web-Scraping
|
b9b93f74b8479721611ce5f5250bfb71831c423d
|
70964e2bbfc9e36979e294cf2232e797bd8191ae
|
refs/heads/master
| 2023-04-10T00:24:51.151797
| 2021-04-21T16:40:39
| 2021-04-21T16:40:39
| 272,225,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
from bs4 import BeautifulSoup
import requests
url = "https://boston.craigslist.org/search/npo"
job_no = 0
while True:
response = requests.get(url)
data = response.text
soup = BeautifulSoup(data,'html.parser')
jobs = soup.find_all('p',{'class':'result-info'})
for job in jobs:
title = job.find('a',{'class':'result-title'}).text
location_tag = job.find('span',{'class':'result-hood'})
location = location_tag.text[2:-1] if location_tag else "N/A"
date = job.find('time', {'class': 'result-date'}).text
link = job.find('a', {'class': 'result-title'}).get('href')
job_response = requests.get(link)
job_data = job_response.text
job_soup = BeautifulSoup(job_data, 'html.parser')
job_description = job_soup.find('section',{'id':'postingbody'}).text
job_attributes_tag = job_soup.find('p',{'class':'attrgroup'})
job_attributes = job_attributes_tag.text if job_attributes_tag else "N/A"
job_no+=1
print('Job Title:', title, '\nLocation:', location, '\nDate:', date, '\nLink:', link,"\n", job_attributes, '\nJob Description:', job_description,'\n---')
url_tag = soup.find('a',{'title':'next page'})
if url_tag.get('href'):
url= 'https://boston.craigslist.org' + url_tag.get('href')
print(url)
else:
break
print("Total Jobs:", job_no)
|
[
"lamba.nikhil24@gmail.com"
] |
lamba.nikhil24@gmail.com
|
a7e3abcba7aa13ffeb4b8a1700c45db001e2c394
|
6f691b2472b56c9aac31237ce6cf96908963b4e4
|
/my_project/my_app/views.py
|
4894812f5cb3ecc9f2986ea0b1a94bdf9d665a19
|
[] |
no_license
|
Vivek-100/Python-Django-
|
303a96d38cb951d77d4e659a983200482cf33ff3
|
14dfecc142b418c17a295e6ea4ad24f9447a0a6a
|
refs/heads/master
| 2021-01-10T15:23:05.245481
| 2016-01-04T20:25:57
| 2016-01-04T20:25:57
| 49,020,911
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,113
|
py
|
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from my_app.serializers import UserSerializer, GroupSerializer, TestSerializer, OrganizationSerializer,\
ProjectMemberSerializer, ProjectSerializer, ProjectMemberRoleSerializer, ProjectSupplementSerializer
from my_app.models import Testtable, Organization,ProjectMember, Project, ProjectMemberRole, ProjectSupplement
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class TestViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Testtable.objects.all()
serializer_class = TestSerializer
class OrganizationViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Organization.objects.all()
serializer_class = OrganizationSerializer
class ProjectMemberViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = ProjectMember.objects.all()
serializer_class = ProjectMemberSerializer
class ProjectViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Project.objects.all()
serializer_class = ProjectSerializer
class ProjectMemberRoleViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = ProjectMemberRole.objects.all()
serializer_class = ProjectMemberRoleSerializer
class ProjectSupplementViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = ProjectSupplement.objects.all()
serializer_class = ProjectSupplementSerializer
|
[
"Vivek.200786@gmail.com"
] |
Vivek.200786@gmail.com
|
3513c399877852d2b68054d474a187e373787f47
|
f10506a518f3f1b383e0d74771b7a841464ad5f7
|
/src/realtweetornotbot/utils/urlutils.py
|
c06e053bad901ac22724019c413dbb60356201cf
|
[
"MIT"
] |
permissive
|
giulionf/realtweetornotbot
|
65017926744a627af971a7a846e73aa20df9cd00
|
4c6765a6992201b4b1d404cfec08d7193c52d750
|
refs/heads/master
| 2022-02-18T05:04:34.452064
| 2022-01-25T11:39:40
| 2022-01-25T11:39:40
| 162,637,686
| 89
| 7
|
MIT
| 2022-01-25T11:40:09
| 2018-12-20T22:27:13
|
Python
|
UTF-8
|
Python
| false
| false
| 655
|
py
|
import requests
IMAGE_FORMATS = ("image/png", "image/jpeg", "image/jpg", "image/webp")
class UrlUtils:
""" Helper class for URLs """
@staticmethod
def is_imgur_url(url):
""" Returns true, if an image url is an IMGUR image or album """
return "imgur.com" in url
@staticmethod
def is_image_url(url):
""" Returns true if the url is to an image file """
try:
r = requests.head(url)
if r.headers.get("content-type") in IMAGE_FORMATS:
return True
except requests.exceptions.MissingSchema:
print("Missing Schema Exception")
return False
|
[
"giulio.nf@googlemail.com"
] |
giulio.nf@googlemail.com
|
2c9587a9d350295ea691599e0cf1b8f8e1d52223
|
7558e719e2ca69404f099c90cd93f3ddda8d0614
|
/wind_repower_usa/config.py
|
a9b7e3ac6aec162a510a72b5a93c0a4ac2fd338d
|
[
"MIT"
] |
permissive
|
inwe-boku/wind-repowering-usa
|
d082699283d0a7184ec8e8be267dfb99df11f486
|
2b85dc290a80fdd60c065ebfd84def7b43466449
|
refs/heads/master
| 2021-06-18T22:18:57.234103
| 2021-02-12T16:20:42
| 2021-02-12T16:20:42
| 180,792,842
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
import pathlib
NUM_PROCESSES = 8
# used for downloading, calculation of time series etc
YEARS = range(2000, 2019)
MONTHS = range(1, 13)
DISTANCE_FACTORS = 2, 3, 4, 6
LOG_FILE = pathlib.Path(__file__).parent.parent / 'data' / 'logfile.log'
INTERIM_DIR = pathlib.Path(__file__).parent.parent / 'data' / 'interim'
EXTERNAL_DIR = pathlib.Path(__file__).parent.parent / 'data' / 'external'
FIGURES_DIR = pathlib.Path(__file__).parent.parent / 'figures'
FIGSIZE = (12, 7.5)
# are computations for constant distance factors obsolete? if yes, could be complete removed
COMPUTE_CONSTANT_DISTANCE_FACTORS = False
|
[
"lumbric@gmail.com"
] |
lumbric@gmail.com
|
7abcfbf6270ec44fffdbbd024bf266c3138059db
|
8a642f6b7f7555b5d022674f0dfcc546873554cc
|
/cf_users/models.py
|
b00af2862014cdde141663e6a0ceabc424c7628c
|
[] |
no_license
|
robertwhaskell/CodeFellowsUser
|
cecb90630681d1636c21aa82aa676ca5e0938f2f
|
b2160c9ed22b62a1c1948f3786a86c2c5a9b1cc0
|
refs/heads/master
| 2021-03-12T21:34:07.891495
| 2014-11-18T17:51:08
| 2014-11-18T17:51:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
from django.db import models
class CFUser(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
user_email = models.CharField(max_length=200)
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
|
[
"robertwhaskell@gmail.com"
] |
robertwhaskell@gmail.com
|
ad5e9d5c0758fadfff36a115114fadf5e4cf2117
|
d150557a870155e6b13803ccbf6a00baeeaa2938
|
/neural_nets_dsr/optim/gradient_descent_momentum.py
|
e180d353b7418adacfc3013eec1643064f75efa4
|
[] |
no_license
|
dstilesr/neural-nets-dsr
|
7b07dd88d46d8c4fe55bb1a7524d0549578b08fc
|
43880f8626fbfe4229932dca9a3dc8feab3f3256
|
refs/heads/master
| 2023-01-19T16:20:53.435702
| 2020-11-03T15:02:08
| 2020-11-03T15:02:08
| 297,650,650
| 3
| 0
| null | 2020-11-03T15:02:09
| 2020-09-22T13:03:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,802
|
py
|
import numpy as np
from ..network import NeuralNet
from typing import Union, List, Tuple
from ..cost_functions import CostFunction
from ..utils import ExpAvgAccumulator as ExpAvg
from .regularized_gradient_descent import GradientDescentL2
class GradientDescentWithMomentum(GradientDescentL2):
"""
Mini batch gradient descent with momentum.
"""
def __init__(
self, cost_func: Union[str, CostFunction],
epochs: int = 600,
learning_rate: float = 0.1,
l2_param: float = 0.025,
batch_size: int = 512,
beta: float = 0.9,
axis: int = 1,
verbose: bool = False):
"""
:param cost_func: Cost function to optimize.
:param epochs: Number of full train set passes to perform.
:param learning_rate:
:param l2_param: Parameter for L2 regularization.
:param batch_size: Minibatch size.
:param beta: Meta parameter for momentum term.
:param axis:
:param verbose: Print cost every 100 epochs.
"""
assert 0. < beta < 1., "Invalid beta parameter! Must satisfy 0 < beta < 1."
super().__init__(
cost_func,
epochs,
batch_size=batch_size,
learning_rate=learning_rate,
l2_param=l2_param,
axis=axis,
verbose=verbose
)
self._batch_size = batch_size
self._beta = beta
self._mom_b: List[ExpAvg] = []
self._mom_w: List[ExpAvg] = []
@property
def beta(self) -> float:
return self._beta
def get_updates(
self,
w: np.ndarray,
b: np.ndarray,
dw: np.ndarray,
db: np.ndarray,
lyr_index: int = -1) -> Tuple[np.ndarray, np.ndarray]:
"""
:param w:
:param b:
:param dw:
:param db:
:param lyr_index:
:return:
"""
self._mom_w[lyr_index].update_value(dw)
self._mom_b[lyr_index].update_value(db)
wreg = self.l2_param * w
wnew = w - self.learning_rate * (self._mom_w[lyr_index].value + wreg)
bnew = b - self.learning_rate * self._mom_b[lyr_index].value
return wnew, bnew
def __call__(
self,
network: NeuralNet,
x: np.ndarray,
y: np.ndarray) -> NeuralNet:
"""
:param network:
:param x:
:param y:
:return:
"""
for lyr in network.layers:
self._mom_w.append(
ExpAvg.create(lyr.weights.shape, self.beta)
)
self._mom_b.append(
ExpAvg.create(lyr.biases.shape, self.beta)
)
return super().__call__(network, x, y)
|
[
"d.stiles.r@hotmail.com"
] |
d.stiles.r@hotmail.com
|
7168ef1d67ba0aec51dcd92359bfc7c437414145
|
b00a2c1e97bb52cdab1fa217797ae1bb42fea759
|
/Endpoint.py
|
025ce7e9d95801700620322ff1ce0df35ed7e65d
|
[] |
no_license
|
nunu2021/DijkstraPathFinding
|
365ccfa5d3dc10db9758f6c8b6d559dadded8cdb
|
fc116bc6e7f8e2ee2f839635dfe548f9d7223433
|
refs/heads/master
| 2023-06-06T03:42:55.686814
| 2021-07-03T17:19:57
| 2021-07-03T17:19:57
| 382,671,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
class Endpoint:
def __init__(self, which_endpoint, color, x,y, complete_bool):
self.whichendpoint = which_endpoint
self.color = color
self.position = tuple(x,y)
self.complete = complete_bool
def movePosition(self, x, y):
L1 = list(self.position)
L1[0] = L1[0] + x
L1[1] = L1[1] + y
self.position = tuple(L1)
|
[
"labdhijain753@gmail.com"
] |
labdhijain753@gmail.com
|
c067f5c56b4b21b157bbfbec677d387146de15be
|
8b52e3aec569de2ead2ab25136ae40f579bfd1a6
|
/simpleResult.py
|
db872d3978959084cd03b871dbc27eb072d803b6
|
[] |
no_license
|
k-simons/fantasy
|
4dc0201245222e59f8d52788c740e1f82f20cfee
|
f8b0bb9cf19b551c67ba260305f5a48f1d83835b
|
refs/heads/master
| 2021-09-01T00:03:11.031647
| 2017-12-23T16:50:57
| 2017-12-23T16:50:57
| 115,209,122
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
class SimpleResult:
def __init__(self, myPoints, opponentPoints, opponentId):
self.myPoints = myPoints
self.opponentPoints = opponentPoints
self.opponentId = opponentId
def __str__(self):
return "myPoints: " + str(self.myPoints) + ", opponentPoints: " + str(self.opponentPoints) + ", opponentId: " + str(self.opponentId)
|
[
"ksimons@palantir.com"
] |
ksimons@palantir.com
|
2a267560a422f7c6eff4da4d5177892beb9c99f9
|
abeec076f89231c4dd589e84def8301e653d6e20
|
/orders/views.DEP.py
|
9ac624bc2133c17490ffaf2dc25abdf9178452e3
|
[] |
no_license
|
gibil5/pcm_restaurant
|
1cde6ee2780d3aa39dbc26dd9583f8465a1ff13a
|
a56ec01c533ed2b6e198de9813f9518a3eca2d14
|
refs/heads/master
| 2020-08-29T20:10:13.606229
| 2019-12-01T19:48:47
| 2019-12-01T19:48:47
| 218,160,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
def add_order(request, employee_id):
print()
print('Add order')
title = 'Add Order'
cook = get_object_or_404(Employee, pk=employee_id) # Get Object
print(cook)
table = Table.objects.first()
# Create and populate
if request.method == 'POST':
print('Create and populate')
form = lib.NewOrderForm(request.POST)
if form.is_valid():
form_instance = lib.NewOrderForm(request.POST)
form_instance.cook_id = 1
new_order = form_instance.save()
return HttpResponseRedirect('/orders/thanks/')
# Create a blank form
else:
order = Order()
#order = Order.objects.create(cook=cook)
#order.save()
#form = lib.NewOrderForm(instance=order)
form = lib.NewOrderForm(
instance=order,
initial={
'cook': cook,
'table': table,
},
)
#form.cook = cook
ctx = {
'title': title,
'form': form,
}
output = render(request, 'orders/add.html', ctx)
return HttpResponse(output)
|
[
"jrevilla55@gmail.com"
] |
jrevilla55@gmail.com
|
3057eac6538051f09b7e1a9b13902ca179d67bc2
|
6fe54e2c73fab48becf7d3d0f974c801a15429c1
|
/ControleFinanceiro/sistema/models.py
|
5c77148fd52e05aa076778c6126be5d36ed4d763
|
[] |
no_license
|
fczanardo/django
|
be134bd7ca9b98af2dc7b0a20d5c134b8fd2b2de
|
9bddaa45e010cc589cc9fa011083e137182dc9c9
|
refs/heads/master
| 2021-01-10T12:12:39.284682
| 2015-10-17T13:25:15
| 2015-10-17T13:25:15
| 44,437,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
class TipoDespesa(models.Model):
nome = models.CharField(max_length=100)
def __str__(self):
return self.nome.decode("utf-8")
class Despesa(models.Model):
tipoDespesa = models.ForeignKey(TipoDespesa)
data = models.DateField()
valor = models.FloatField()
class Ganho(models.Model):
tipo = models.CharField(max_length=100)
data = models.DateField()
valor = models.FloatField()
class GeraRelatorio(models.Model):
data = models.DateField()
|
[
"fczanardo@gmail.com"
] |
fczanardo@gmail.com
|
79062eb15e440d5eabf1579ae5b439589bb6db1b
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/statsmodels/2015/8/ar_model.py
|
087a9e037a3426c2ccc54e0b8158c690ff99e06c
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 34,034
|
py
|
from __future__ import division
from statsmodels.compat.python import iteritems, range, string_types, lmap
import numpy as np
from numpy import dot, identity
from numpy.linalg import inv, slogdet
from scipy.stats import norm
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.model as base
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly, cache_writable)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tsa.kalmanf.kalmanfilter import KalmanFilter
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.base.datetools import _index_date
__all__ = ['AR']
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=0)
def _check_ar_start(start, k_ar, method, dynamic):
if (method == 'cmle' or dynamic) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _validate(start, k_ar, dates, method):
"""
Checks the date and then returns an integer
"""
from datetime import datetime
if isinstance(start, (string_types, datetime)):
start_date = start
start = _index_date(start, dates)
if 'mle' not in method and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE or "
"dynamic forecast. Got %s" % start_date)
return start
def _ar_predict_out_of_sample(y, params, p, k_trend, steps, start=0):
mu = params[:k_trend] or 0 # only have to worry about constant
arparams = params[k_trend:][::-1] # reverse for dot
# dynamic endogenous variable
endog = np.zeros(p + steps) # this is one too big but doesn't matter
if start:
endog[:p] = y[start-p:start]
else:
endog[:p] = y[-p:]
forecast = np.zeros(steps)
for i in range(steps):
fcast = mu + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i + p] = fcast
return forecast
class AR(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : "Autoregressive AR(p) model",
"params" : """endog : array-like
1-d endogenous response variable. The independent variable.""",
"extra_params" : base._missing_param_doc,
"extra_sections" : ""}
def __init__(self, endog, dates=None, freq=None, missing='none'):
super(AR, self).__init__(endog, None, dates, freq, missing=missing)
endog = self.endog # original might not have been an ndarray
if endog.ndim == 1:
endog = endog[:, None]
self.endog = endog # to get shapes right
elif endog.ndim > 1 and endog.shape[1] != 1:
raise ValueError("Only the univariate case is implemented")
def initialize(self):
pass
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
p = self.k_ar
k = self.k_trend
newparams = params.copy()
newparams[k:k+p] = _ar_transparams(params[k:k+p].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
p = self.k_ar
k = self.k_trend
newparams = start_params.copy()
newparams[k:k+p] = _ar_invtransparams(start_params[k:k+p].copy())
return newparams
def _presample_fit(self, params, start, p, end, y, predictedvalues):
"""
Return the pre-sample predicted values using the Kalman Filter
Notes
-----
See predict method for how to use start and p.
"""
k = self.k_trend
# build system matrices
T_mat = KalmanFilter.T(params, p, k, p)
R_mat = KalmanFilter.R(params, p, k, 0, p)
# Initial State mean and variance
alpha = np.zeros((p, 1))
Q_0 = dot(inv(identity(p**2)-np.kron(T_mat, T_mat)),
dot(R_mat, R_mat.T).ravel('F'))
Q_0 = Q_0.reshape(p, p, order='F') # TODO: order might need to be p+k
P = Q_0
Z_mat = KalmanFilter.Z(p)
for i in range(end): # iterate p-1 times to fit presample
v_mat = y[i] - dot(Z_mat, alpha)
F_mat = dot(dot(Z_mat, P), Z_mat.T)
Finv = 1./F_mat # inv. always scalar
K = dot(dot(dot(T_mat, P), Z_mat.T), Finv)
# update state
alpha = dot(T_mat, alpha) + dot(K, v_mat)
L = T_mat - dot(K, Z_mat)
P = dot(dot(T_mat, P), L.T) + dot(R_mat, R_mat.T)
#P[0,0] += 1 # for MA part, R_mat.R_mat.T above
if i >= start - 1: # only record if we ask for it
predictedvalues[i + 1 - start] = dot(Z_mat, alpha)
def _get_predict_start(self, start, dynamic):
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if method == 'mle' and not dynamic:
start = 0
else: # can't do presample fit for cmle or dynamic
start = k_ar
elif isinstance(start, int):
start = super(AR, self)._get_predict_start(start)
else: # should be a date
start = _validate(start, k_ar, self.data.dates, method)
start = super(AR, self)._get_predict_start(start)
_check_ar_start(start, k_ar, method, dynamic)
self._set_predict_start_date(start)
return start
def predict(self, params, start=None, end=None, dynamic=False):
"""
Returns in-sample and out-of-sample prediction.
Parameters
----------
params : array
The fitted model parameters.
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
dynamic : bool
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
Returns
-------
predicted values : array
Notes
-----
The linear Gaussian Kalman filter is used to return pre-sample fitted
values. The exact initial Kalman Filter is used. See Durbin and Koopman
in the references for more information.
"""
# will return an index of a date
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end)
if start - end > 1:
raise ValueError("end is before start")
k_ar = self.k_ar
k_trend = self.k_trend
method = self.method
endog = self.endog.squeeze()
if dynamic:
out_of_sample += end - start + 1
return _ar_predict_out_of_sample(endog, params, k_ar,
k_trend, out_of_sample, start)
predictedvalues = np.zeros(end + 1 - start)
# fit pre-sample
if method == 'mle': # use Kalman Filter to get initial values
if k_trend:
mu = params[0]/(1-np.sum(params[k_trend:]))
# modifies predictedvalues in place
if start < k_ar:
self._presample_fit(params, start, k_ar, min(k_ar-1, end),
endog[:k_ar] - mu, predictedvalues)
predictedvalues[:k_ar-start] += mu
if end < k_ar:
return predictedvalues
# just do the whole thing and truncate
fittedvalues = dot(self.X, params)
pv_start = max(k_ar - start, 0)
fv_start = max(start - k_ar, 0)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[pv_start:] = fittedvalues[fv_start:fv_end]
if out_of_sample:
forecastvalues = _ar_predict_out_of_sample(endog, params,
k_ar, k_trend,
out_of_sample)
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
def _presample_varcov(self, params):
"""
Returns the inverse of the presample variance-covariance.
Notes
-----
See Hamilton p. 125
"""
k = self.k_trend
p = self.k_ar
p1 = p+1
# get inv(Vp) Hamilton 5.3.7
params0 = np.r_[-1, params[k:]]
Vpinv = np.zeros((p, p), dtype=params.dtype)
for i in range(1, p1):
Vpinv[i-1, i-1:] = np.correlate(params0, params0[:i],
old_behavior=False)[:-1]
Vpinv[i-1, i-1:] -= np.correlate(params0[-i:], params0,
old_behavior=False)[:-1]
Vpinv = Vpinv + Vpinv.T - np.diag(Vpinv.diagonal())
return Vpinv
def _loglike_css(self, params):
"""
Loglikelihood of AR(p) process using conditional sum of squares
"""
nobs = self.nobs
Y = self.Y
X = self.X
ssr = sumofsq(Y.squeeze() - np.dot(X, params))
sigma2 = ssr/nobs
return (-nobs/2 * (np.log(2 * np.pi) + np.log(sigma2)) -
ssr/(2 * sigma2))
def _loglike_mle(self, params):
"""
Loglikelihood of AR(p) process using exact maximum likelihood
"""
nobs = self.nobs
X = self.X
endog = self.endog
k_ar = self.k_ar
k_trend = self.k_trend
# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
if self.transparams:
params = self._transparams(params)
# get mean and variance for pre-sample lags
yp = endog[:k_ar].copy()
if k_trend:
c = [params[0]] * k_ar
else:
c = [0]
mup = np.asarray(c / (1 - np.sum(params[k_trend:])))
diffp = yp - mup[:, None]
# get inv(Vp) Hamilton 5.3.7
Vpinv = self._presample_varcov(params)
diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))
# concentrating the likelihood means that sigma2 is given by
sigma2 = 1./nobs * (diffpVpinv + ssr)
self.sigma2 = sigma2
logdet = slogdet(Vpinv)[1] # TODO: add check for singularity
loglike = -1/2. * (nobs * (np.log(2 * np.pi) + np.log(sigma2)) -
logdet + diffpVpinv / sigma2 + ssr / sigma2)
return loglike
def loglike(self, params):
"""
The loglikelihood of an AR(p) process
Parameters
----------
params : array
The fitted parameters of the AR model
Returns
-------
llf : float
The loglikelihood evaluated at `params`
Notes
-----
Contains constant term. If the model is fit by OLS then this returns
the conditonal maximum likelihood.
.. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}
If it is fit by MLE then the (exact) unconditional maximum likelihood
is returned.
.. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}
where
:math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
variance-covariance matrix of the first `p` observations.
"""
#TODO: Math is on Hamilton ~pp 124-5
if self.method == "cmle":
return self._loglike_css(params)
else:
return self._loglike_mle(params)
def score(self, params):
"""
Return the gradient of the loglikelihood at params.
Parameters
----------
params : array-like
The parameter values at which to evaluate the score function.
Notes
-----
Returns numerical gradient.
"""
loglike = self.loglike
return approx_fprime(params, loglike, epsilon=1e-8)
def information(self, params):
"""
Not Implemented Yet
"""
return
def hessian(self, params):
"""
Returns numerical hessian for now.
"""
loglike = self.loglike
return approx_hess(params, loglike)
def _stackX(self, k_ar, trend):
"""
Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.
"""
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend)
self.k_trend = k_trend
return X
def select_order(self, maxlag, ic, trend='c', method='mle'):
"""
Select the lag order according to the information criterion.
Parameters
----------
maxlag : int
The highest lag length tried. See `AR.fit`.
ic : str {'aic','bic','hqic','t-stat'}
Criterion used for selecting the optimal lag length.
See `AR.fit`.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
Returns
-------
bestlag : int
Best lag according to IC.
"""
endog = self.endog
# make Y and X with same nobs to compare ICs
Y = endog[maxlag:]
self.Y = Y # attach to get correct fit stats
X = self._stackX(maxlag, trend) # sets k_trend
self.X = X
k = self.k_trend # k_trend set in _stackX
k = max(1, k) # handle if startlag is 0
results = {}
if ic != 't-stat':
for lag in range(k, maxlag+1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag-lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=100, disp=0)
results[lag] = eval('fit.'+ic)
bestic, bestlag = min((res, k) for k, res in iteritems(results))
else: # choose by last t-stat.
stop = 1.6448536269514722 # for t-stat, norm.ppf(.95)
for lag in range(maxlag, k - 1, -1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag - lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=35, disp=-1)
if np.abs(fit.tvalues[-1]) >= stop:
bestlag = lag
break
return bestlag
def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
transparams=True, start_params=None, solver='lbfgs', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hqic - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See also
--------
statsmodels.base.model.LikelihoodModel.fit
"""
method = method.lower()
if method not in ['cmle', 'yw', 'mle']:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
if maxlag is None:
maxlag = int(round(12*(nobs/100.)**(1/4.)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ['aic', 'bic', 'hqic', 't-stat']:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:, :]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if method == "cmle": # do OLS
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr/arfit.nobs # needed for predict fcasterr
elif method == "mle":
solver = solver.lower()
self.nobs = nobs
if start_params is None:
start_params = OLS(Y, X).fit().params
else:
if len(start_params) != k_trend + k_ar:
raise ValueError("Length of start params is %d. There"
" are %d parameters." %
(len(start_params), k_trend + k_ar))
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(AR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
# don't use yw, because we can't estimate the constant
#elif method == "yw":
# params, omega = yule_walker(endog, order=maxlag,
# method="mle", demean=False)
# how to handle inference after Yule-Walker?
# self.params = params #TODO: don't attach here
# self.omega = omega
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(self, params, normalized_cov_params)
if method == 'mle' and full_output:
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
return ARResultsWrapper(arfit)
class ARResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an AR model.
Parameters
----------
model : AR Model instance
Reference to the model that is fit.
params : array
The fitted parameters from the AR Model.
normalized_cov_params : array
inv(dot(X.T,X)) where X is the lagged values.
scale : float, optional
An estimate of the scale of the model.
Returns
-------
**Attributes**
aic : float
Akaike Information Criterion using Lutkephol's definition.
:math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`
bic : float
Bayes Information Criterion
:math:`\\log(\\sigma) + (1 + k_ar + k_trend)*\\log(nobs)/nobs`
bse : array
The standard errors of the estimated parameters. If `method` is 'cmle',
then the standard errors that are returned are the OLS standard errors
of the coefficients. If the `method` is 'mle' then they are computed
using the numerical Hessian.
fittedvalues : array
The in-sample predicted values of the fitted AR model. The `k_ar`
initial values are computed via the Kalman Filter if the model is
fit by `mle`.
fpe : float
Final prediction error using Lutkepohl's definition
((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma
hqic : float
Hannan-Quinn Information Criterion.
k_ar : float
Lag length. Sometimes used as `p` in the docs.
k_trend : float
The number of trend terms included. 'nc'=0, 'c'=1.
llf : float
The loglikelihood of the model evaluated at `params`. See `AR.loglike`
model : AR model instance
A reference to the fitted AR model.
nobs : float
The number of available observations `nobs` - `k_ar`
n_totobs : float
The number of total observations in `endog`. Sometimes `n` in the docs.
params : array
The fitted parameters of the model.
pvalues : array
The p values associated with the standard errors.
resid : array
The residuals of the model. If the model is fit by 'mle' then the
pre-sample residuals are calculated using fittedvalues from the Kalman
Filter.
roots : array
The roots of the AR process are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
scale : float
Same as sigma2
sigma2 : float
The variance of the innovations (residuals).
trendorder : int
The polynomial order of the trend. 'nc' = None, 'c' or 't' = 0,
'ct' = 1, etc.
tvalues : array
The t-values associated with `params`.
"""
_cache = {} # for scale setter
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARResults, self).__init__(model, params, normalized_cov_params,
scale)
self._cache = resettable_cache()
self.nobs = model.nobs
n_totobs = len(model.endog)
self.n_totobs = n_totobs
self.X = model.X # copy?
self.Y = model.Y
k_ar = model.k_ar
self.k_ar = k_ar
k_trend = model.k_trend
self.k_trend = k_trend
trendorder = None
if k_trend > 0:
trendorder = k_trend - 1
self.trendorder = trendorder
#TODO: cmle vs mle?
self.df_model = k_ar + k_trend
self.df_resid = self.model.df_resid = n_totobs - self.df_model
@cache_writable()
def sigma2(self):
model = self.model
if model.method == "cmle": # do DOF correction
return 1. / self.nobs * sumofsq(self.resid)
else:
return self.model.sigma2
@cache_writable() # for compatability with RegressionResults
def scale(self):
return self.sigma2
@cache_readonly
def bse(self): # allow user to specify?
if self.model.method == "cmle": # uses different scale/sigma def.
resid = self.resid
ssr = np.dot(resid, resid)
ols_scale = ssr / (self.nobs - self.k_ar - self.k_trend)
return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
else:
hess = approx_hess(self.params, self.model.loglike)
return np.sqrt(np.diag(-np.linalg.inv(hess)))
@cache_readonly
def pvalues(self):
return norm.sf(np.abs(self.tvalues))*2
@cache_readonly
def aic(self):
#JP: this is based on loglike with dropped constant terms ?
# Lutkepohl
#return np.log(self.sigma2) + 1./self.model.nobs * self.k_ar
# Include constant as estimated free parameter and double the loss
return np.log(self.sigma2) + 2 * (1 + self.df_model)/self.nobs
# Stata defintion
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * (self.k_ar+self.k_trend)/nobs
@cache_readonly
def hqic(self):
nobs = self.nobs
# Lutkepohl
# return np.log(self.sigma2)+ 2 * np.log(np.log(nobs))/nobs * self.k_ar
# R uses all estimated parameters rather than just lags
return (np.log(self.sigma2) + 2 * np.log(np.log(nobs))/nobs *
(1 + self.df_model))
# Stata
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * np.log(np.log(nobs))/nobs * \
# (self.k_ar + self.k_trend)
@cache_readonly
def fpe(self):
nobs = self.nobs
df_model = self.df_model
#Lutkepohl
return ((nobs+df_model)/(nobs-df_model))*self.sigma2
@cache_readonly
def bic(self):
nobs = self.nobs
# Lutkepohl
#return np.log(self.sigma2) + np.log(nobs)/nobs * self.k_ar
# Include constant as est. free parameter
return np.log(self.sigma2) + (1 + self.df_model) * np.log(nobs)/nobs
# Stata
# return -2 * self.llf/nobs + np.log(nobs)/nobs * (self.k_ar + \
# self.k_trend)
@cache_readonly
def resid(self):
#NOTE: uses fittedvalues because it calculate presample values for mle
model = self.model
endog = model.endog.squeeze()
if model.method == "cmle": # elimate pre-sample
return endog[self.k_ar:] - self.fittedvalues
else:
return model.endog.squeeze() - self.fittedvalues
#def ssr(self):
# resid = self.resid
# return np.dot(resid, resid)
@cache_readonly
def roots(self):
k = self.k_trend
return np.roots(np.r_[1, -self.params[k:]]) ** -1
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params)
def predict(self, start=None, end=None, dynamic=False):
params = self.params
predictedvalues = self.model.predict(params, start, end, dynamic)
return predictedvalues
#start = self.model._get_predict_start(start)
#end, out_of_sample = self.model._get_predict_end(end)
##TODO: return forecast errors and confidence intervals
#from statsmodels.tsa.arima_process import arma2ma
#ma_rep = arma2ma(np.r_[1,-params[::-1]], [1], out_of_sample)
#fcasterr = np.sqrt(self.sigma2 * np.cumsum(ma_rep**2))
preddoc = AR.predict.__doc__.split('\n')
extra_doc = (""" confint : bool, float
Whether to return confidence intervals. If `confint` == True,
95 % confidence intervals are returned. Else if `confint` is a
float, then it is assumed to be the alpha value of the confidence
interval. That is confint == .05 returns a 95% confidence
interval, and .10 would return a 90% confidence interval."""
).split('\n')
#ret_doc = """
# fcasterr : array-like
# confint : array-like
#"""
predict.__doc__ = '\n'.join(preddoc[:5] + preddoc[7:20] + extra_doc +
preddoc[20:])
class ARResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARResultsWrapper, ARResults)
if __name__ == "__main__":
import statsmodels.api as sm
sunspots = sm.datasets.sunspots.load()
# Why does R demean the data by defaut?
ar_ols = AR(sunspots.endog)
res_ols = ar_ols.fit(maxlag=9)
ar_mle = AR(sunspots.endog)
res_mle_bfgs = ar_mle.fit(maxlag=9, method="mle", solver="bfgs",
maxiter=500, gtol=1e-10)
# res_mle2 = ar_mle.fit(maxlag=1, method="mle", maxiter=500, penalty=True,
# tol=1e-13)
# ar_yw = AR(sunspots.endog)
# res_yw = ar_yw.fit(maxlag=4, method="yw")
# # Timings versus talkbox
# from timeit import default_timer as timer
# print "Time AR fit vs. talkbox"
# # generate a long series of AR(2) data
#
# nobs = 1000000
# y = np.empty(nobs)
# y[0:2] = 0
# for i in range(2,nobs):
# y[i] = .25 * y[i-1] - .75 * y[i-2] + np.random.rand()
#
# mod_sm = AR(y)
# t = timer()
# res_sm = mod_sm.fit(method="yw", trend="nc", demean=False, maxlag=2)
# t_end = timer()
# print str(t_end - t) + " seconds for sm.AR with yule-walker, 2 lags"
# try:
# import scikits.talkbox as tb
# except:
# raise ImportError("You need scikits.talkbox installed for timings")
# t = timer()
# mod_tb = tb.lpc(y, 2)
# t_end = timer()
# print str(t_end - t) + " seconds for talkbox.lpc"
# print """For higher lag lengths ours quickly fills up memory and starts
#thrashing the swap. Should we include talkbox C code or Cythonize the
#Levinson recursion algorithm?"""
## Try with a pandas series
import pandas
import scikits.timeseries as ts
d1 = ts.Date(year=1700, freq='A')
#NOTE: have to have yearBegin offset for annual data until parser rewrite
#should this be up to the user, or should it be done in TSM init?
#NOTE: not anymore, it's end of year now
ts_dr = ts.date_array(start_date=d1, length=len(sunspots.endog))
pandas_dr = pandas.DateRange(start=d1.datetime,
periods=len(sunspots.endog), timeRule='A@DEC')
#pandas_dr = pandas_dr.shift(-1, pandas.datetools.yearBegin)
dates = np.arange(1700, 1700 + len(sunspots.endog))
dates = ts.date_array(dates, freq='A')
#sunspots = pandas.TimeSeries(sunspots.endog, index=dates)
#NOTE: pandas only does business days for dates it looks like
import datetime
dt_dates = np.asarray(lmap(datetime.datetime.fromordinal,
ts_dr.toordinal().astype(int)))
sunspots = pandas.TimeSeries(sunspots.endog, index=dt_dates)
#NOTE: pandas can't handle pre-1900 dates
mod = AR(sunspots, freq='A')
res = mod.fit(method='mle', maxlag=9)
# some data for an example in Box Jenkins
IBM = np.asarray([460, 457, 452, 459, 462, 459, 463, 479, 493, 490.])
w = np.diff(IBM)
theta = .5
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
b0b6434f95799d95378d985546b2c841c783c9f2
|
ddeab2d71d438f58745d3e7e75ab71fa8fba1abd
|
/pcdet/models/dense_heads/point_head_template.py
|
baa9bd9ee64ded6363b9ffdb7cdd952a9fcabda5
|
[
"Apache-2.0"
] |
permissive
|
penghao1990/SGNet
|
3de6ec203b3d56f39ec88f48aae14e4ba2264c33
|
99b9126a4b8ae85ba258a4150cc756ac73a7b7fb
|
refs/heads/main
| 2023-04-09T21:12:12.381568
| 2021-12-09T11:41:58
| 2021-12-09T11:41:58
| 427,582,195
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,097
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, loss_utils
class PointHeadTemplate(nn.Module):
def __init__(self, model_cfg, num_class):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module(
'cls_loss_func',
loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0)
)
reg_loss_type = losses_cfg.get('LOSS_REG', None)
if reg_loss_type == 'smooth-l1':
self.reg_loss_func = F.smooth_l1_loss
elif reg_loss_type == 'l1':
self.reg_loss_func = F.l1_loss
elif reg_loss_type == 'WeightedSmoothL1Loss':
self.reg_loss_func = loss_utils.WeightedSmoothL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None)
)
else:
self.reg_loss_func = F.smooth_l1_loss
@staticmethod
def make_fc_layers(fc_cfg, input_channels, output_channels, end_bn_active=False):
fc_layers = []
c_in = input_channels
for k in range(0, fc_cfg.__len__()):
fc_layers.extend([
nn.Linear(c_in, fc_cfg[k], bias=False),
nn.BatchNorm1d(fc_cfg[k]),
nn.ReLU(),
])
c_in = fc_cfg[k]
if end_bn_active:
fc_layers.append(nn.Linear(c_in, output_channels, bias=False)),
fc_layers.append(nn.BatchNorm1d(output_channels, eps=1e-5, momentum=0.01))
fc_layers.append(nn.ReLU())
else:
fc_layers.append(nn.Linear(c_in, output_channels, bias=True))
return nn.Sequential(*fc_layers)
def assign_stack_targets(self, points, gt_boxes, extend_gt_boxes=None,
ret_box_labels=False, ret_part_labels=False,
set_ignore_flag=True, use_ball_constraint=False, central_radius=2.0):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extend_gt_boxes: [B, M, 8]
ret_box_labels:
ret_part_labels:
set_ignore_flag:
use_ball_constraint:
central_radius:
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_box_labels: (N1 + N2 + N3 + ..., code_size)
"""
assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)
assert len(gt_boxes.shape) == 3 and gt_boxes.shape[2] == 8, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3 and extend_gt_boxes.shape[2] == 8, \
'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape)
assert set_ignore_flag != use_ball_constraint, 'Choose one only!'
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = points.new_zeros(points.shape[0]).long()
point_box_labels = gt_boxes.new_zeros((points.shape[0], 8)) if ret_box_labels else None
point_part_labels = gt_boxes.new_zeros((points.shape[0], 3)) if ret_part_labels else None
#############
# assert len(end_points.shape) == 2 and end_points.shape[1] == 4, 'end_points.shape=%s' % str(end_points.shape)
#############
for k in range(batch_size):
bs_mask = (bs_idx == k)
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
if set_ignore_flag:
extend_box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), extend_gt_boxes[k:k+1, :, 0:7].contiguous()
).long().squeeze(dim=0)
fg_flag = box_fg_flag
ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
elif use_ball_constraint:
box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone()
box_centers[:, 2] += gt_boxes[k][box_idxs_of_pts][:, 5] / 2
ball_flag = ((box_centers - points_single).norm(dim=1) < central_radius)
fg_flag = box_fg_flag & ball_flag
else:
raise NotImplementedError
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
point_cls_labels_single[fg_flag] = 1 if self.num_class == 1 else gt_box_of_fg_points[:, -1].long()
point_cls_labels[bs_mask] = point_cls_labels_single
if ret_box_labels and gt_box_of_fg_points.shape[0] > 0:
point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), 8))
fg_point_box_labels = self.box_coder.encode_torch(
gt_boxes=gt_box_of_fg_points[:, :-1], points=points_single[fg_flag],
gt_classes=gt_box_of_fg_points[:, -1].long()
)
point_box_labels_single[fg_flag] = fg_point_box_labels
point_box_labels[bs_mask] = point_box_labels_single
# end_points_single = end_points[bs_mask][:, 1:4]
# box_idxs_of_end_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
# end_points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous()
# ).long().squeeze(dim=0)
# end_flag = (box_idxs_of_end_pts >= 0)
# gt_box_of_end_fg_points = gt_boxes[k][box_idxs_of_end_pts[end_flag]]
if ret_part_labels:
point_part_labels_single = point_part_labels.new_zeros((bs_mask.sum(), 3))
transformed_points = points_single[fg_flag] - gt_box_of_fg_points[:, 0:3]
transformed_points = common_utils.rotate_points_along_z(
transformed_points.view(-1, 1, 3), -gt_box_of_fg_points[:, 6]
).view(-1, 3)
# offset = torch.tensor([0.5, 0.5, 0.5]).view(1, 3).type_as(transformed_points)
# point_part_labels_single[fg_flag] = (transformed_points / gt_box_of_fg_points[:, 3:6]) + offset
temp = torch.abs(((torch.abs(transformed_points / gt_box_of_fg_points[:, 3:6])*2)-0.5)*2)
hot_up_mask = temp > 0.75
temp[hot_up_mask] = 1
hot_down_mask = temp < 0.25
temp[hot_down_mask] = 0
interval_mask = ~(hot_up_mask | hot_down_mask)
temp[interval_mask] = temp[interval_mask]*2-0.5
# temp = torch.clamp(temp, min=0.15)
point_part_labels_single[fg_flag] = temp
point_part_labels[bs_mask] = point_part_labels_single
targets_dict = {
'point_cls_labels': point_cls_labels,
'point_box_labels': point_box_labels,
'point_part_labels': point_part_labels
}
return targets_dict
def get_cls_layer_loss(self, tb_dict=None):
point_cls_labels = self.forward_ret_dict['point_cls_labels'].view(-1)
point_cls_preds = self.forward_ret_dict['point_cls_preds'].view(-1, self.num_class)
positives = (point_cls_labels > 0)
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
pos_normalizer = positives.sum(dim=0).float()
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class + 1)
one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(point_cls_preds, one_hot_targets, weights=cls_weights)
point_loss_cls = cls_loss_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_cls = point_loss_cls * loss_weights_dict['point_cls_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({
'p_cls': point_loss_cls.item(),
'p_pos_num': pos_normalizer.item()
})
return point_loss_cls, tb_dict
def get_part_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['point_cls_labels'] > 0
pos_normalizer = max(1, (pos_mask > 0).sum().item())
point_part_labels = self.forward_ret_dict['point_part_labels']
point_part_preds = self.forward_ret_dict['point_part_preds']
point_loss_part = F.binary_cross_entropy(torch.sigmoid(point_part_preds), point_part_labels, reduction='none')
point_loss_part = (point_loss_part.sum(dim=-1) * pos_mask.float()).sum() / (3 * pos_normalizer)
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_part = point_loss_part * loss_weights_dict['point_part_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'p_part': point_loss_part.item()})
return point_loss_part, tb_dict
def get_box_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['point_cls_labels'] > 0
point_box_labels = self.forward_ret_dict['point_box_labels']
point_box_preds = self.forward_ret_dict['point_box_preds']
reg_weights = pos_mask.float()
pos_normalizer = pos_mask.sum().float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
point_loss_box_src = self.reg_loss_func(
point_box_preds[None, ...], point_box_labels[None, ...], weights=reg_weights[None, ...]
)
point_loss_box = point_loss_box_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_box = point_loss_box * loss_weights_dict['point_box_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'p_box': point_loss_box.item()})
return point_loss_box, tb_dict
def generate_predicted_boxes(self, points, point_cls_preds, point_box_preds):
"""
Args:
points: (N, 3)
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
Returns:
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
"""
_, pred_classes = point_cls_preds.max(dim=-1)
point_box_preds = self.box_coder.decode_torch(point_box_preds, points, pred_classes + 1)
return point_cls_preds, point_box_preds
def forward(self, **kwargs):
raise NotImplementedError
|
[
"noreply@github.com"
] |
noreply@github.com
|
cc33fe1f9e4951a96748c625f6e5551b73f5a8c7
|
ac7419a4d5d25ee9c6ce821081c9d9134702f3c7
|
/bin/classes_ex.py
|
34c145cb7e2383e2be8a12244603909f9f0cc817
|
[] |
no_license
|
rajatgoel789/Python_basics
|
a82215feb2b333d14e42883439bd097250a98ab4
|
22650ecb785c1debcb614cf60f1f18e3e30133c3
|
refs/heads/master
| 2020-07-31T07:30:53.358059
| 2019-09-27T08:23:00
| 2019-09-27T08:23:00
| 210,530,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,132
|
py
|
# multiple Objects
# Inheritance
# Operator Overloading
x = 10
y = 20
# print(x + y)
# print(x.__add__(y))
class Account1:
bankname = "ICICI" # class variable
def adduser(self, n):
# print(self)
self.name = n
def viewuser(self):
return self.name
@classmethod
def bankrules(cls, area):
return "B Rules" + area
@staticmethod
def bankinfo():
return "Bank Info"
def __init__(self):
print("SB LOGIC HERE ")
acc = Account1()
acc1 = Account1()
acc.adduser("Rajat") # adduser(acc,"Rajat")
# It will create a instance variable
acc1.adduser("Goel")
print(acc.viewuser(), acc1.viewuser(), acc.bankname, Account1.bankname, Account1.adduser, sep="\n")
print(acc1.bankrules("BLR"))
print(Account1.bankrules("BLR"))
print(Account1.bankinfo())
print(acc1.bankinfo())
class Account2(Account1):
def addAdhar(self, a):
self.adhar = a
def viewAdhar(self):
return self.adhar
def viewuser(self):
return "Welcome" + self.name
def __init__(self):
super().__init__()
# Account1.__init__(self)
print("CA LOGIC TO BE HERE")
print("---------------------------------")
cus3 = Account2()
cus3.adduser("Raj")
cus3.addAdhar("w737237823")
print(cus3.viewAdhar(), cus3.viewuser(), sep="\n")
class RBI:
def viewrules(self):
return "RBI RULES"
class Account3(Account1, RBI):
pass
class GOV:
def viewrules(self):
return "GOVT RULES"
class Account4(Account3, GOV):
pass
print("*******************************")
cus4 = Account3();
print(cus4.viewrules())
print("*******************************")
cus5 = Account4()
print(cus5.viewrules())
print(GOV.viewrules(cus5))
#############
class Account5(Account3):
def __init__(self):
self.gov = GOV
cus6 = Account5()
print(cus6.viewrules())
# print(cus6.gov.viewrules())
class Account6:
def __init__(self, a):
self.name = a
def __add__(self, x):
return "Hello" + self.name + x.name
def __str__(self):
return self.name
def __len__(self):
return len(self.name)
def __iter__(self):
self.count = 0
return self
def __next__(self):
c = self.count
if c < len(self.name):
self.count +=1
return self.name[c]
else:
raise StopIteration
print("+++++++++++++++++++++++++++++++++++++++++++++++++++")
cus7 = Account6("c7")
cus8 = Account6("c8")
result = cus7 + cus8
print("result=", result)
print("cust7 = ", cus7)
print("len of cust7 = ", len(cus7))
for i in cus7:
print("i=", i)
# Generic Class
from abc import ABC, abstractmethod
class Account(ABC):
def adduser(self, a):
self.name = a
@abstractmethod
def viewuser(self):
pass
class MyAccount(Account):
def viewuser(self):
return self.name;
a = MyAccount()
a.adduser("Raj")
print(a.viewuser())
|
[
"noreply@github.com"
] |
noreply@github.com
|
4f5bb79b857664488c0166556416293328d76651
|
b45230162af7ea65416f61cbbbcf1011a422692b
|
/tests/test_pygrade.py
|
3508d0f3021904959e65229d569c26efa43bef25
|
[
"ISC"
] |
permissive
|
Joaron4/pygrade
|
47a12ce4e8925d20e0d4384f4f39a102bf149f97
|
68416ba92afd3ef634a83560935941d03265df8f
|
refs/heads/master
| 2023-03-16T18:47:48.576434
| 2020-12-01T02:52:15
| 2020-12-01T02:52:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pygrade
----------------------------------
Tests for `pygrade` module.
"""
import unittest
from pygrade import pygrade
class TestPygrade(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
[
"aronwc@gmail.com"
] |
aronwc@gmail.com
|
b8e22c9f0854b5dda5191d086ca45baaa3e98d35
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/twelve-days/5d8ab06a7a6b4acdb6be11d098786e90.py
|
8c8955c509d158350c07858c3b2a1c0d850b89cb
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 2,893
|
py
|
#twelve-days
def verse(day):
day = day
if day == 1:
return "On the first day of Christmas my true love gave to me, a Partridge in a Pear Tree.\n"
elif day == 2:
return "On the second day of Christmas my true love gave to me, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==3:
return "On the third day of Christmas my true love gave to me, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==4:
return "On the fourth day of Christmas my true love gave to me, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==5:
return "On the fifth day of Christmas my true love gave to me, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==6:
return "On the sixth day of Christmas my true love gave to me, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==7:
return "On the seventh day of Christmas my true love gave to me, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==8:
return "On the eighth day of Christmas my true love gave to me, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==9:
return "On the ninth day of Christmas my true love gave to me, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==10:
return "On the tenth day of Christmas my true love gave to me, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==11:
return "On the eleventh day of Christmas my true love gave to me, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
else:
return "On the twelfth day of Christmas my true love gave to me, twelve Drummers Drumming, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
def verses(start, end):
result = ''
for i in range (start,end+1):
result += verse(i)+'\n'
return result
def sing():
return verses(1,12)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
a02d7df63661429a939a5c439eb0d240cd5fa637
|
593b4bee32a0041f441a9b8b899f7644c51db35d
|
/mandelbrot_set.py
|
01b16f22520eed42a133c1dd5989c6f732ebc862
|
[] |
no_license
|
fomindanny/Mandelbrot-Set
|
9ee26dd19de694a895875ff13edf7ce584ad3fa2
|
30771387972c92e580669359c55715e904c55596
|
refs/heads/master
| 2023-04-14T01:16:28.594646
| 2021-04-25T17:04:12
| 2021-04-25T17:04:12
| 360,883,267
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,047
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def iterations_until_diverge(complex_number: complex, threshold: int) -> int:
"""Returns an amount of iterations until function diverges.
Function is z(n + 1) = z ^ 2 + c.
Function diverges when |z| > 4.
"""
z = complex(0, 0)
for iteration in range(threshold):
z = z**2 + complex_number
if abs(z) > 4:
break
return iteration
def mandelbrot_set(threshold: int, density: int):
"""Mandelbrot Set realisation.
Saves an image to the program directory.
"""
real_axis = np.linspace(-2, 1, density)
imaginary_axis = np.linspace(-1.5, 1.5, density)
matrix = np.empty((density, density))
for row in range(density):
for col in range(density):
complex_number = complex(real_axis[row], imaginary_axis[col])
matrix[row, col] = iterations_until_diverge(complex_number, threshold)
file_name = "mandelbrot_set.png"
plt.imsave(file_name, matrix.T, cmap="magma")
def mandelbrot_set_animation(density: int):
"""Mandelbrot Set animation.
Saves an animation to the program directory.
"""
real_axis = np.linspace(-2, 1, density)
imaginary_axis = np.linspace(-1.5, 1.5, density)
fig = plt.figure()
fig.set_size_inches(10, 10)
axes = plt.Axes(fig, [0, 0, 1, 1])
fig.add_axes(axes)
def animate(i):
matrix = np.empty((density, density))
threshold = i + 1
for row in range(density):
for col in range(density):
complex_number = complex(real_axis[row], imaginary_axis[col])
matrix[row, col] = iterations_until_diverge(complex_number, threshold)
image = axes.imshow(matrix.T, interpolation="bicubic", cmap="magma")
return [image]
figure_animation = animation.FuncAnimation(fig, animate, frames=120, interval=40, blit=True)
file_name = "mandelbrot_set.gif"
figure_animation.save(file_name, writer="imagemagick")
|
[
"dannyfomin@gmail.com"
] |
dannyfomin@gmail.com
|
d374e8946c982e03abe6e0ae97d1ad02333e3bc6
|
af082ba017c5963a966242235b6da037bf65b036
|
/Chapter08/chapter8_tutorials/fetch_ros/fetch_calibration/scripts/calibrate_robot
|
c13fb7bbf481a74f45de2639afbd206ad786d36e
|
[
"MIT"
] |
permissive
|
PacktPublishing/Robot-Operating-System-Cookbook
|
226f885b17cb92b169c09aed3b8c54974269b0a4
|
c5ee97aa8c2c3a3ee7c0feb8e3020e94dddefe0f
|
refs/heads/master
| 2023-01-29T19:37:29.163372
| 2023-01-18T10:21:21
| 2023-01-18T10:21:21
| 139,707,483
| 82
| 38
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,941
|
#!/usr/bin/env python
# Copyright (C) 2015 Fetch Robotics Inc
# Copyright (C) 2014 Unbounded Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Michael Ferguson
import argparse
import os
import rospkg
import subprocess
import time
import yaml
from lxml import etree
from time import strftime
def stop_drivers(rosdistro):
return (subprocess.call(["sudo", "service", "robot", "stop"]) == 0)
def start_drivers(rosdistro):
return (subprocess.call(["sudo", "service", "robot", "start"]) == 0)
def restart_drivers(rosdistro):
ok = stop_drivers(rosdistro)
time.sleep(1.0) # nodelet unload/load needs time
return (ok and start_drivers(rosdistro))
def copy_file(current, new):
if new.startswith("/etc"):
return (subprocess.call(["sudo", "cp", current, new]) == 0)
else:
return (subprocess.call(["cp", current, new]) == 0)
## @brief Updates the robot launch file
## @param launch The current robot launch file
## @param calibration The calibration data (a dictionary)
## @param install_dir The directory to install to
def update_robot_launch(launch, calibration, install_dir):
# backup robot.launch
if not copy_file(launch, "%s.bk" % launch):
print("Failed to backup %s" % launch)
launch_xml = etree.parse(launch)
for child in launch_xml.getroot():
try:
if child.tag == "param":
if child.get("name") == "calibration_date":
child.set("value", strftime("%Y-%m-%d %H:%M:%S"))
elif child.get("name") == "robot_description":
child.set("textfile", os.path.join(install_dir, calibration["robot_description"]))
elif child.get("name") in calibration.keys():
child.set("value", calibration[child.get("name")])
if child.tag == "arg":
if child.get("name") == "rgb_camera_info_url":
child.set("default", "file://%s" % os.path.join(install_dir, calibration["depth_camera_info_url"]))
elif child.get("name") == "depth_camera_info_url":
child.set("default", "file://%s" % os.path.join(install_dir, calibration["depth_camera_info_url"]))
except KeyError:
pass
# Generate output to a local temp file
temp_launch = "/tmp/robot"+strftime("%Y_%m_%d_%H_%M_%S")+".launch"
launch_xml.write(temp_launch)
# Copy to system (using sudo if necessary)
copy_file(temp_launch, os.path.join(install_dir, "robot.launch"))
def get_calibration_dict(directory, calibration = None):
if calibration == None:
calibration = dict()
# Load file
try:
# Get name of latest file
files = [f for f in os.listdir(directory) if f.startswith("calibration_") and f.endswith(".yaml")]
files.sort()
# Open it
calfile = open(os.path.join(directory, files[-1]))
except IndexError, IOError:
print("Cannot open calibration.yaml")
return calibration
# Parse YAML
try:
calyaml = yaml.load(calfile)
except:
print("Cannot parse calibration.yaml")
return calibration
# Get dictionary data
try:
calibration["head_camera/driver/z_offset_mm"] = str(int(calyaml["camera_z_offset"]*1000.0))
except KeyError:
calibration["head_camera/driver/z_offset_mm"] = "0"
try:
calibration["head_camera/driver/z_scaling"] = str(1.0 + calyaml["camera_z_scaling"])
except KeyError:
calibration["head_camera/driver/z_scaling"] = "1.0"
calibration["depth_camera_info_url"] = calyaml["depth_info"]
calibration["rgb_camera_info_url"] = calyaml["rgb_info"]
calibration["robot_description"] = calyaml["urdf"]
return calibration
def get_base_calibration_dict(directory, calibration = None):
if calibration == None:
calibration = dict()
# Load file
try:
# Get name of latest file
files = [f for f in os.listdir(directory) if f.startswith("base_calibration_") and f.endswith(".yaml")]
files.sort()
# Open it
calfile = open(os.path.join(directory, files[-1]))
except IndexError, IOError:
print("Cannot open base_calibration.yaml")
return calibration
# Parse YAML
try:
calyaml = yaml.load(calfile)
except:
print("Cannot parse base_calibration.yaml")
return calibration
# Get dictionary data
calibration["/base_controller/track_width"] = str(calyaml["odom"])
calibration["/imu/gyro_scale"] = str(calyaml["imu"])
return calibration
def move_calibration(current_directory, new_directory):
calibration = get_calibration_dict(current_directory)
calibration = get_base_calibration_dict(current_directory, calibration)
# install urdf, depth, rgb files
for name in ["depth_camera_info_url", "rgb_camera_info_url", "robot_description"]:
try:
copy_file(os.path.join(current_directory, calibration[name]),
os.path.join(new_directory, calibration[name]))
except:
print("Unable to copy %s" % name)
# update the robot.launch
update_robot_launch(os.path.join(new_directory, "robot.launch"), calibration, new_directory)
def get_calibration_date(rosdistro):
launch_xml = etree.parse("/etc/ros/%s/robot.launch" % rosdistro)
for child in launch_xml.getroot():
if child.tag == "param" and child.get("name") == "calibration_date":
return child.get("value")
return "Calibration date not found in robot.launch"
if __name__ == "__main__":
# Parse the arguments
parser = argparse.ArgumentParser(description="Calibrate the robot, update files in /etc/ros")
parser.add_argument("--arm", help="Capture arm/head calibration data", action="store_true")
parser.add_argument("--base", help="Captyure base calibration data", action="store_true")
parser.add_argument("--install", help="Install new calibration to /etc/ros (restarts drivers)", action="store_true")
parser.add_argument("--reset", help="Reset the calibration to factory defaults (restarts drivers)", action="store_true")
parser.add_argument("--restore", help="Restore the previous calibration", action="store_true")
parser.add_argument("--date", help="Get the timestamp of the current calibration", action="store_true")
parser.add_argument("--directory", help="Directory to load calibration from or backup to", default="/tmp")
args = parser.parse_args()
restart = False
rosdistro = "indigo"
etc_launch = "/etc/ros/%s/robot.launch" % rosdistro
if args.date:
print(get_calibration_date(rosdistro))
exit(0)
if args.restore:
print("Calibration date: %s" % get_calibration_date(rosdistro))
if subprocess.call(["sudo", "cp", "%s.bk" % etc_launch, etc_launch]) != 0:
print("Failed to restore calibration")
exit(-1)
print("Restored calibration to %s" % get_calibration_date(rosdistro))
restart = True
if args.reset:
# Can we get to fetch_bringup?
try:
rp = rospkg.RosPack()
bringup = rp.get_path("fetch_bringup")
except:
print("\nCould not find fetch_bringup package, is your ROS path correct?\n")
exit(-1)
# Reset the robot.launch to factory defaults
copy_file(os.path.join(bringup, "launch/fetch.launch"), etc_launch)
restart = True
if args.arm:
try:
rp = rospkg.RosPack()
cal_pkg = rp.get_path("robot_calibration")
except:
print("\nCould not find robot_calibration package, is your ROS path correct?")
exit(-1)
if subprocess.call(["roslaunch", "fetch_calibration", "capture.launch"]) != 0:
print("Failed to run calibrate")
exit(-1)
if args.base:
if subprocess.call(["rosrun", "robot_calibration", "calibrate_base"]) != 0:
print("Failed to run calibrate_Base")
exit(-1)
if args.install or args.arm or args.base:
move_calibration(args.directory, "/etc/ros/%s" % rosdistro)
restart = True
if restart:
print("Restarting drivers so that calibration will take effect...")
if not restart_drivers(rosdistro):
print("\nWARNING: drivers may not have restarted\n")
exit(0)
parser.print_help()
|
[
"nileshs@packtpub.com"
] |
nileshs@packtpub.com
|
|
55d1af3af949c3e159d60b095ce259600e812de8
|
156f5362e7381b96f3b2839f94de8778b005274d
|
/tests/bindings/test_bindings.py
|
99e0ca3c3d74647d7e7e35d5cb0769064383656b
|
[
"MIT",
"CC-BY-3.0"
] |
permissive
|
sairam4123/godot-python
|
3f8bfcd989ae1b06ec6bf5e01462895b9f5f5fe0
|
a95ed14f6e53ae4eb59e6bd03efb0db90b070bc6
|
refs/heads/master
| 2021-05-20T20:43:18.764693
| 2020-03-02T13:49:06
| 2020-03-02T13:49:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,157
|
py
|
import pytest
import godot
from godot import Vector3, Object, Node, Node2D, PluginScript, OK
def test_free_node():
v = Node.new()
v.free()
# `check_memory_leak` auto fixture will do the bookkeeping
def test_expose_contains_constant():
assert "OK" in dir(godot)
assert OK is not None
def test_expose_contains_class():
assert "Node" in dir(godot)
assert Node is not None
def test_expose_contains_builtins():
assert "Vector3" in dir(godot)
assert Vector3 is not None
def test_call_one_arg_short(current_node):
with pytest.raises(TypeError) as exc:
current_node.get_child()
assert str(exc.value) == "get_child() takes exactly one argument (0 given)"
def test_call_too_few_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.move_child()
assert (
str(exc.value) == "move_child() takes exactly 2 positional arguments (0 given)"
)
def test_call_with_defaults_and_too_few_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.add_child()
assert (
str(exc.value) == "add_child() takes at least 1 positional argument (0 given)"
)
def test_call_none_in_base_type_args(current_node):
with pytest.raises(TypeError) as exc:
# signature: def get_child(self, godot_int idx)
current_node.get_child(None)
assert str(exc.value) == "an integer is required"
def test_call_none_in_builtin_args(current_node):
with pytest.raises(TypeError) as exc:
# signature: def get_node(self, NodePath path not None)
current_node.get_node(None)
assert str(exc.value) == "Invalid value None, must be str or NodePath"
def test_call_none_in_bindings_args(current_node):
with pytest.raises(TypeError) as exc:
# signature: def get_path_to(self, Node node not None)
current_node.get_path_to(None)
assert (
str(exc.value)
== "Argument 'node' has incorrect type (expected godot.bindings.Node, got NoneType)"
)
def test_call_too_many_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.get_child(1, 2)
assert str(exc.value) == "get_child() takes exactly one argument (2 given)"
def test_call_with_default_and_too_many_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.add_child(1, 2, 3)
assert (
str(exc.value) == "add_child() takes at most 2 positional arguments (3 given)"
)
def test_call_with_defaults(generate_obj):
node = generate_obj(Node)
child = generate_obj(Node)
# signature: void add_child(Node node, bool legible_unique_name=false)
node.add_child(child)
# legible_unique_name is False by default, check name is not human-redable
children_names = [str(x.name) for x in node.get_children()]
assert children_names == ["@@2"]
def test_call_with_kwargs(generate_obj):
node = generate_obj(Node)
child = generate_obj(Node)
new_child = generate_obj(Node)
node.add_child(child, legible_unique_name=True)
# Check name is readable
children_names = [str(x.name) for x in node.get_children()]
assert children_names == ["Node"]
# Kwargs are passed out of order
node.add_child_below_node(legible_unique_name=True, child_node=new_child, node=node)
# Check names are still readable
children_names = [str(x.name) for x in node.get_children()]
assert children_names == ["Node", "Node2"]
def test_inheritance(generate_obj):
node = generate_obj(Node)
node2d = generate_obj(Node2D)
isinstance(node, Object)
isinstance(node2d, Object)
isinstance(node2d, Node)
def test_call_with_refcounted_return_value(current_node):
script = current_node.get_script()
assert isinstance(script, PluginScript)
def test_call_with_refcounted_param_value(generate_obj):
node = generate_obj(Node)
script = PluginScript.new()
node.set_script(script)
def test_create_refcounted_value(current_node):
script1_ref1 = PluginScript.new()
script2_ref1 = PluginScript.new()
script1_ref2 = script1_ref1
script2_ref2 = script2_ref1
del script1_ref1
|
[
"emmanuel.leblond@gmail.com"
] |
emmanuel.leblond@gmail.com
|
ce03108274b37dc8809c8883264cd853956d525c
|
17f918c06ca476f79d28d712abfa356b2dcfb6c7
|
/koishi/plugins/automation_touhou_feed/events.py
|
c0284afe3effa51079eda45d6079ea30d3d6ee10
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
HuyaneMatsu/Koishi
|
eb87693ad34da2483efe2b6bdaa4f3fae417e491
|
74f92b598e86606ea3a269311316cddd84a5215f
|
refs/heads/master
| 2023-08-23T22:54:37.006530
| 2023-08-23T20:26:49
| 2023-08-23T20:26:49
| 163,678,458
| 17
| 6
|
NOASSERTION
| 2023-06-14T14:18:27
| 2018-12-31T15:18:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
__all__ = ()
from ...bots import SLASH_CLIENT
from ..automation_core import get_touhou_feed_enabled
from .logic import (
reset_touhou_feeders, reset_channel, should_touhou_feed_in_channel, try_remove_channel, try_remove_guild,
try_update_channel, try_update_guild
)
@SLASH_CLIENT.events
async def channel_create(client, channel):
if get_touhou_feed_enabled(channel.guild_id):
if should_touhou_feed_in_channel(client, channel):
try_update_channel(channel)
@SLASH_CLIENT.events
async def channel_delete(client, channel):
try_remove_channel(channel)
@SLASH_CLIENT.events
async def channel_edit(client, channel, old_parameters):
if get_touhou_feed_enabled(channel.guild_id):
reset_channel(client, channel)
@SLASH_CLIENT.events
async def guild_create(client, guild):
if get_touhou_feed_enabled(guild.id):
try_update_guild(client, guild)
@SLASH_CLIENT.events
async def guild_delete(client, guild, guild_profile):
if get_touhou_feed_enabled(guild.id):
try_remove_guild(guild)
@SLASH_CLIENT.events
async def ready(client):
client.events.remove(ready)
reset_touhou_feeders(client)
|
[
"re.ism.tm@gmail.com"
] |
re.ism.tm@gmail.com
|
dc00c7b46443eedbd055a6a450758ba00349fc60
|
fd00c22c5abbf2f213ef0840a33b68fab7ad3ad0
|
/contact-tracing-demo/src/prox/place.py
|
293db88cab5848d7bdb0ad018782462001f0eb6e
|
[
"MIT"
] |
permissive
|
diffix/coronaVirus
|
c1591f916ae9bbe846d48d50727d3e030f5b768e
|
2ec2a845b02e6354d82fc086c06a07b5ad7c99b9
|
refs/heads/master
| 2022-07-15T16:49:22.237979
| 2022-06-08T15:11:34
| 2022-06-08T15:11:34
| 250,565,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,284
|
py
|
import datetime
import json
import random
from common import WarningCounter
class PlaceInfo:
"""
"""
placeTypes = {
'home': {
'work': {
'minCapacity': 1,
'maxCapacity': 6,
'minArrive': None,
'maxArrive': None,
'minLengthMin': None,
'maxLengthMin': None,
'open': 1.0,
'happening': 1.0,
},
'visit': {
'minCapacity': 1,
'maxCapacity': 10,
'minArrive': '10:00:00',
'maxArrive': '21:00:00',
'minLengthMin': 5,
'maxLengthMin': 480,
'avgOccupancy': 1.0,
'lowFreqDay': 30,
'highFreqDay': 10,
'open': 1.0,
'happening': 1.0,
},
'encountersPerPairPerHour': 0.1,
'rank': 0
},
'school': {
'work': {
'minCapacity': 20,
'maxCapacity': 150,
'minArrive': '08:00:00',
'maxArrive': '09:00:00',
'minLengthMin': 360,
'maxLengthMin': 480,
'open': 1.0,
'happening': 1.0,
},
'visit': {
'minCapacity': 10,
'maxCapacity': 100,
'minArrive': '10:00:00',
'maxArrive': '14:00:00',
'minLengthMin': 5,
'maxLengthMin': 60,
'avgOccupancy': 1.0,
'lowFreqDay': 30,
'highFreqDay': 15,
'open': 1.0,
'happening': 1.0,
},
'encountersPerPairPerHour': 0.05,
'rank': 1
},
'office': {
'work': {
'minCapacity': 20,
'maxCapacity': 200,
'minArrive': '08:00:00',
'maxArrive': '10:00:00',
'minLengthMin': 360,
'maxLengthMin': 480,
'open': 1.0,
'happening': 1.0,
},
'visit': {
'minCapacity': 2,
'maxCapacity': 10,
'minArrive': '09:00:00',
'maxArrive': '16:00:00',
'minLengthMin': 5,
'maxLengthMin': 120,
'avgOccupancy': 1.0,
'lowFreqDay': 30,
'highFreqDay': 3,
'open': 1.0,
'happening': 1.0,
},
'encountersPerPairPerHour': 0.05,
'rank': 2
},
'sport': {
'work': {
'minCapacity': 3,
'maxCapacity': 6,
'minArrive': '08:00:00',
'maxArrive': '12:00:00',
'minLengthMin': 360,
'maxLengthMin': 480,
'open': 1.0,
'happening': 1.0,
},
'visit': {
'minCapacity': 10,
'maxCapacity': 50,
'minArrive': '09:00:00',
'maxArrive': '20:00:00',
'minLengthMin': 30,
'maxLengthMin': 60,
'avgOccupancy': 0.2,
'lowFreqDay': 5,
'highFreqDay': 1,
'open': 1.0,
'happening': 1.0,
},
'encountersPerPairPerHour': 0.1,
'rank': 3
},
'super': {
'work': {
'minCapacity': 10,
'maxCapacity': 40,
'minArrive': '08:00:00',
'maxArrive': '12:00:00',
'minLengthMin': 360,
'maxLengthMin': 480,
'open': 1.0,
'happening': 1.0,
},
'visit': {
'minCapacity': 50,
'maxCapacity': 550,
'minArrive': '09:00:00',
'maxArrive': '19:00:00',
'minLengthMin': 10,
'maxLengthMin': 90,
'avgOccupancy': 0.25,
'lowFreqDay': 7,
'highFreqDay': 1,
'open': 1.0,
'happening': 1.0,
},
'encountersPerPairPerHour': 0.1,
'rank': 4
},
'store': {
'work': {
'minCapacity': 2,
'maxCapacity': 10,
'minArrive': '08:00:00',
'maxArrive': '10:00:00',
'minLengthMin': 360,
'maxLengthMin': 480,
'open': 1.0,
'happening': 1.0,
},
'visit': {
'minCapacity': 8,
'maxCapacity': 80,
'minArrive': '10:00:00',
'maxArrive': '17:00:00',
'minLengthMin': 5,
'maxLengthMin': 90,
'avgOccupancy': 0.1,
'lowFreqDay': 7,
'highFreqDay': 1,
'open': 1.0,
'happening': 1.0,
},
'encountersPerPairPerHour': 0.1,
'rank': 5
},
'restaurant': {
'work': {
'minCapacity': 2,
'maxCapacity': 20,
'minArrive': '11:00:00',
'maxArrive': '12:00:00',
'minLengthMin': 360,
'maxLengthMin': 480,
'open': 1.0,
'happening': 1.0,
},
'visit': {
'minCapacity': 5,
'maxCapacity': 50,
'minArrive': '12:00:00',
'maxArrive': '20:00:00',
'minLengthMin': 30,
'maxLengthMin': 90,
'avgOccupancy': 0.15,
'lowFreqDay': 14,
'highFreqDay': 1,
'open': 1.0,
'happening': 1.0,
},
'encountersPerPairPerHour': 0.05,
'rank': 6
},
}
"""
Households in Kaiserslautern:
| total | single | couple no kids | couple with kids | single parents | others |
| ----- | ------ | -------------- | ---------------- | -------------- | ------ |
| 50816 | 23910 | 12187 | 8828 | 4040 | 1851 |
| 100 | 47 | 24 | 17.4 | 8 | 3.6 |
"""
homeTypes = {
(1, 0): 0.47, # single
(1, 1): 0.07, # single parents a
(1, 2): 0.01, # single parents b
(2, 0): 0.24, # couple no kids
(2, 1): 0.10, # couple with kids a
(2, 2): 0.05, # couple with kids b
(2, 3): 0.02, # couple with kids c
(2, 4): 0.004, # couple with kids d
(3, 0): 0.03, # others a
(3, 1): 0.005, # others b
(3, 2): 0.001, # others c
}
"""
Num apartments per building in Kaiserslautern:
| total | 1 apt | 2 apt | 3-6 apt | 7-12 apt | 13+ apt |
| ----- | ----- | ----- | ------- | -------- | ------- |
| 20302 | 11115 | 3456 | 3983 | 1336 | 412 |
| 100 | 54.8 | 17 | 19.6 | 6.6 | 2 |
We adjust the numbers as there is more addresses than houses, which means the same house has multiple addresses.
"""
houseTypes = {
1: 54.8 / 2,
2: 17 * 2,
3: 19.6 * 2,
7: 6.6 * 1.5,
13: 2 * 1.5,
}
storeTypes = {
'store': 283,
'clothes': 87,
'hairdresser': 51,
'bakery': 30,
'beauty': 28,
'kiosk': 27,
'car': 18,
'jewelry': 17,
'mobile_phone': 15,
'car_repair': 13,
'travel_agency': 13,
'furniture': 13,
'shoes': 13,
'optician': 13,
'convenience': 12,
'florist': 11,
'butcher': 10,
'electronics': 10,
'gift': 9,
'variety_store': 8,
'beverages': 7,
}
@staticmethod
def getPlaceNames():
""" Returns list of place types names
"""
pn = set(list(PlaceInfo.placeTypes.keys()))
for st in PlaceInfo.storeTypes.keys():
n = st
if st != 'store':
n = 'store_' + st
pn.add(n)
return list(pn)
@staticmethod
def getEncounterRate(placeName):
if placeName.startswith('store_'):
placeName = 'store'
return PlaceInfo.placeTypes[placeName]['encountersPerPairPerHour']
@staticmethod
def getWorkPlaceNames():
""" Returns list of work place types names
"""
workPlaces = PlaceInfo.getPlaceNames()
workPlaces.remove("home")
return workPlaces
@staticmethod
def getRankedPlaceNames():
ranked = [k for k, v in sorted(PlaceInfo.placeTypes.items(), key=lambda item: item[1]['rank'])]
placeNames = []
for r in ranked:
if r == 'store':
for n in [t for t, c in sorted(list(PlaceInfo.storeTypes.items()), key=lambda e: e[1], reverse=True)]:
placeNames.append(n)
else:
placeNames.append(r)
return placeNames
@staticmethod
def assignVisitFreq(placeName):
low = PlaceInfo.placeTypes['store']['visit']['lowFreqDay']
high = PlaceInfo.placeTypes['store']['visit']['highFreqDay']
visitFreq = random.randint(high, low)
if placeName.startswith('store'):
storeType = 'store'
if placeName.startswith('store_'):
storeType = placeName[6:]
visitFreq = visitFreq * sum(PlaceInfo.storeTypes.values()) / PlaceInfo.storeTypes[storeType]
return visitFreq
@staticmethod
def assignVisitFreqs():
return {placeName: PlaceInfo.assignVisitFreq(placeName) for placeName in PlaceInfo.getPlaceNames()}
@staticmethod
def assignWorkCapacity(placeName):
if placeName.startswith('store_'):
placeName = 'store'
low = PlaceInfo.placeTypes[placeName]['work']['minCapacity']
high = PlaceInfo.placeTypes[placeName]['work']['maxCapacity']
return random.randint(low, high)
@staticmethod
def assignVisitCapacity(placeName):
if placeName.startswith('store_'):
placeName = 'store'
low = PlaceInfo.placeTypes[placeName]['visit']['minCapacity']
high = PlaceInfo.placeTypes[placeName]['visit']['maxCapacity']
return random.randint(low, high)
@staticmethod
def assignHomeTypes(numPeople):
percentagePeople = dict()
weightTotal = 0.0
for homeType, weight in PlaceInfo.homeTypes.items():
newWeight = weight * (homeType[0] + homeType[1])
percentagePeople[homeType] = newWeight
weightTotal += newWeight
for homeType, weight in percentagePeople.items():
percentagePeople[homeType] = weight / weightTotal
homeTypes = dict()
totalPeople = 0
sortedHomeTypes = sorted(PlaceInfo.homeTypes.keys(), key=lambda h: h[0] + h[1], reverse=True)
for homeType in sortedHomeTypes:
numPeopleInHome = homeType[0] + homeType[1]
numHomes = int((numPeople * percentagePeople[homeType]) / numPeopleInHome)
while totalPeople + numHomes * numPeopleInHome > numPeople:
numHomes -= 1
homeTypes[homeType] = numHomes
totalPeople += numHomes * numPeopleInHome
while totalPeople + sortedHomeTypes[len(sortedHomeTypes) - 1][0] + sortedHomeTypes[len(sortedHomeTypes) - 1][1]\
<= numPeople:
homeTypes[sortedHomeTypes[len(sortedHomeTypes) - 1]] += 1
totalPeople += sortedHomeTypes[len(sortedHomeTypes) - 1][0] + sortedHomeTypes[len(sortedHomeTypes) - 1][1]
return homeTypes
@staticmethod
def _assignTimes(placeName, date, timeType):
if placeName.startswith('store_'):
placeName = 'store'
timeLow = datetime.datetime.strptime(PlaceInfo.placeTypes[placeName][timeType]['minArrive'], "%H:%M:%S").time()
timeHigh = datetime.datetime.strptime(PlaceInfo.placeTypes[placeName][timeType]['maxArrive'], "%H:%M:%S").time()
dateLow = datetime.datetime.combine(date, timeLow)
dateHigh = datetime.datetime.combine(date, timeHigh)
begin = dateLow + datetime.timedelta(seconds=random.randint(0, int((dateHigh - dateLow).total_seconds())))
lengthLow = PlaceInfo.placeTypes[placeName][timeType]['minLengthMin']
lengthHigh = PlaceInfo.placeTypes[placeName][timeType]['maxLengthMin']
end = begin + datetime.timedelta(minutes=random.randint(lengthLow, lengthHigh))
return begin, end
@staticmethod
def assignWorkTimes(placeName, date):
return PlaceInfo._assignTimes(placeName, date, 'work')
@staticmethod
def assignVisitTimes(placeName, date):
return PlaceInfo._assignTimes(placeName, date, 'visit')
@staticmethod
def getAverageVisitMin(placeName):
if placeName.startswith('store_'):
placeName = 'store'
low = PlaceInfo.placeTypes[placeName]['visit']['minLengthMin']
high = PlaceInfo.placeTypes[placeName]['visit']['maxLengthMin']
return int((high - low) / 2.0)
@staticmethod
def getDailyVisitMin(placeName):
if placeName.startswith('store_'):
placeName = 'store'
start = datetime.datetime.strptime(PlaceInfo.placeTypes[placeName]['visit']['minArrive'], "%H:%M:%S")
end = datetime.datetime.strptime(PlaceInfo.placeTypes[placeName]['visit']['maxArrive'], "%H:%M:%S")
return int((end - start).total_seconds() / 60.0 * PlaceInfo.placeTypes[placeName]['visit']['avgOccupancy'])
@staticmethod
def defaultNormalPolicyData():
policy = dict()
for placeName, placeDict in PlaceInfo.placeTypes.items():
if placeName == 'store':
for pn in PlaceInfo.sortedStoreTypes():
policy[pn] = {
'work': {
'open': placeDict['work']['open'],
'happening': placeDict['work']['happening'],
},
'visit': {
'open': placeDict['visit']['open'],
'happening': placeDict['visit']['happening'],
},
}
else:
policy[placeName] = {
'work': {
'open': placeDict['work']['open'],
'happening': placeDict['work']['happening'],
},
'visit': {
'open': placeDict['visit']['open'],
'happening': placeDict['visit']['happening'],
},
}
return policy
@staticmethod
def assignNumApt():
r = random.choices(list(PlaceInfo.houseTypes.keys()), weights=PlaceInfo.houseTypes.values(), k=1)[0]
if r == 1 or r == 2:
return r
elif r == 3:
return random.randint(3, 6)
# return random.choices(range(3, 7), weights=[9, 5.6, 3, 2], k=1)[0]
elif r == 7:
return random.randint(7, 12)
# return random.choices(range(7, 13), weights=[2, 1, 1, 0.8, 0.6, 0.6], k=1)[0]
else:
return random.randint(13, 80)
# return random.choices(range(13, 21), weights=[0.4, 0.4, 0.3, 0.3, 0.2, 0.2, 0.1, 0.1], k=1)[0]
@staticmethod
def sortedStoreTypes():
return [f"store_{t}" if t != 'store' else 'store' for t, c in sorted(list(PlaceInfo.storeTypes.items()),
key=lambda e: e[1], reverse=True)]
class Place:
""" Represents a single place. A place has a name and two capacities, a capacity for workers and one for visitors.
The name of a place is formed as "placeName-number", e.g., "office-42" for the 43rd office created. Capacities are
assigned utilizing a PlaceInfo object.
"""
@staticmethod
def _dailyAmounts(people):
dailyWorkSchool = 0
dailyWorkNonSchool = 0
dailyVisits = dict()
for placeName in PlaceInfo.getWorkPlaceNames():
if placeName not in dailyVisits:
dailyVisits[placeName] = 0
for person in people:
if person.works:
if person.hasSchoolAge():
dailyWorkSchool = dailyWorkSchool + 1
else:
dailyWorkNonSchool = dailyWorkNonSchool + 1
if person.visits:
for placeName in PlaceInfo.getWorkPlaceNames():
if person.visitFreqs[placeName] > 0:
dailyVisits[placeName] = dailyVisits[placeName] + (1.0 / person.visitFreqs[placeName])
return dailyWorkSchool, dailyWorkNonSchool, dailyVisits
@staticmethod
def generateWorkPlaces(people, placeAddressTracker):
"""
Returns dictionary with one list of places per place type. Takes a iterable of Person objects.
Each Person object must have methods "int works(atPlaceName=None)" and "int visitFreqs(placeName)".
This method assumes that place types 'school' and 'office' exist.
:param people: Iterable of Person objects.
:param placeAddressTracker: PlaceAddressTracker instance
:return: Dictionary with one list of places per place type.
"""
WarningCounter.reset()
dailyWorkSchool, dailyWorkNonSchool, dailyVisits = Place._dailyAmounts(people)
dailyWorkSchoolRemaining = dailyWorkSchool
dailyWorkOfficeRemaining = dailyWorkNonSchool
places = dict()
for placeName in PlaceInfo.getWorkPlaceNames():
places[placeName] = []
dailyVisitMin = PlaceInfo.getDailyVisitMin(placeName)
averageVisitMin = PlaceInfo.getAverageVisitMin(placeName)
dailyVisitCapacity = 0
while dailyVisitCapacity * dailyVisitMin < dailyVisits[placeName] * averageVisitMin:
lat, lon = placeAddressTracker.getAddress(placeName, force=True)
place = Place(len(places[placeName]), placeName, lat, lon)
places[placeName].append(place)
dailyVisitCapacity = dailyVisitCapacity + place.visitCapacity
if placeName == 'school':
dailyWorkSchoolRemaining = dailyWorkSchoolRemaining - place.workCapacity
else:
dailyWorkOfficeRemaining = dailyWorkOfficeRemaining - place.workCapacity
while dailyWorkSchoolRemaining > 0:
lat, lon = placeAddressTracker.getAddress('school', force=True)
place = Place(len(places["school"]), "school", lat, lon)
places["school"].append(place)
dailyWorkSchoolRemaining = dailyWorkSchoolRemaining - place.workCapacity
while dailyWorkOfficeRemaining > 0:
lat, lon = placeAddressTracker.getAddress('office', force=True)
place = Place(len(places["office"]), "office", lat, lon)
places["office"].append(place)
dailyWorkOfficeRemaining = dailyWorkOfficeRemaining - place.workCapacity
WarningCounter.print()
return places
@staticmethod
def generateHomes(numPeople, placeAddressTracker):
"""
Returns dictionary with one list of homes per type of homes. Takes a number of people.
Home types are tuples (a, c) where a is the number of adults and c the number of children in the home.
:param numPeople: Number of people.
:param placeAddressTracker: PlaceAddressTracker instance
:return: Dictionary with one list of homes per type of homes.
"""
WarningCounter.reset()
homeTypes = PlaceInfo.assignHomeTypes(numPeople)
homes = dict()
count = 0
for homeType, numHomes in homeTypes.items():
homes[homeType] = []
numMembers = homeType[0] + homeType[1]
for i in range(numHomes):
lat, lon = placeAddressTracker.getAddress('home', force=True)
homes[homeType].append(Place(count, 'home', lat, lon, workCapacity=numMembers))
count += 1
WarningCounter.print()
return homes
@staticmethod
def populateHomesDb(homes, cur, conn):
"""
Creates a 'homes' table in the database and populates it with homes data
"""
sql = '''DROP TABLE IF EXISTS homes;'''
cur.execute(sql)
sql = '''CREATE TABLE IF NOT EXISTS homes(
name text,
adults int,
children int,
people int,
lat real,
lon real,
work_capacity int,
visit_capacity int
);'''
cur.execute(sql)
for homeType in homes:
(adults, children) = homeType
people = adults + children
for home in homes[homeType]:
sql = f'''
INSERT INTO homes VALUES(
'{home.name}',
{adults},
{children},
{people},
{home.lat},
{home.lon},
{home.workCapacity},
{home.visitCapacity}
);
'''
cur.execute(sql)
conn.commit()
return
@staticmethod
def populateWorkPlacesDb(workPlaces, cur, conn):
"""
Creates a 'workPlaces' table in the database and populates it with workPlaces data
"""
sql = '''DROP TABLE IF EXISTS workPlaces;'''
cur.execute(sql)
sql = '''CREATE TABLE IF NOT EXISTS workPlaces(
name text,
type text,
lat real,
lon real,
work_capacity int,
visit_capacity int
);'''
cur.execute(sql)
for placeType in workPlaces:
for place in workPlaces[placeType]:
sql = f'''
INSERT INTO workPlaces VALUES(
'{place.name}',
'{placeType}',
{place.lat},
{place.lon},
{place.workCapacity},
{place.visitCapacity}
);
'''
cur.execute(sql)
conn.commit()
return
def __init__(self, number, placeName, lat, lon, workCapacity=None, visitCapacity=None):
self.name = f"{placeName}-{number}"
self.placeName = placeName
self.lat = lat
self.lon = lon
if workCapacity is None:
workCapacity = PlaceInfo.assignWorkCapacity(placeName)
self.workCapacity = workCapacity
if visitCapacity is None:
visitCapacity = PlaceInfo.assignVisitCapacity(placeName)
self.visitCapacity = visitCapacity
class PlaceCapacityTracker:
"""
Helps keeping track of place capacities while assigning people to places as workers or visitors.
"""
def __init__(self):
self.places = dict()
def _checkExists(self, place):
if place not in self.places:
self.places[place] = [place.workCapacity, place.visitCapacity * PlaceInfo.getDailyVisitMin(place.placeName)]
def remainingWorkCapacity(self, place):
self._checkExists(place)
return self.places[place][0]
def remainingVisitCapacity(self, place):
self._checkExists(place)
return self.places[place][1]
def hasWorkCapacity(self, place):
if self.remainingWorkCapacity(place) > 0:
return True
return False
def hasVisitCapacity(self, place):
if self.remainingVisitCapacity(place) > 0:
return True
return False
def hasVisitCapacityFor(self, place, requiredCapacity):
if self.remainingVisitCapacity(place) >= requiredCapacity * PlaceInfo.getAverageVisitMin(place.placeName):
return True
return False
def useWorkCapacity(self, place):
if not self.hasWorkCapacity(place):
raise ValueError(f"Not enough work capacity in place {place.name}")
self.places[place][0] -= 1
def useVisitCapacity(self, place, requiredCapacity, force=False):
if not force and not self.hasVisitCapacityFor(place, requiredCapacity):
raise ValueError(f"Not enough visit capacity in place {place.name}")
self.places[place][1] -= requiredCapacity * PlaceInfo.getAverageVisitMin(place.placeName)
class PlaceAddressTracker:
"""
Helps assigning addresses to different types of places
"""
order = {
'home': ['home', 'other'],
'school': ['school', 'store', 'other'],
'office': ['office', 'store', 'other'],
'sport': ['sport', 'school', 'other'],
'super': ['store', 'other'],
'store': ['store', 'other'],
'restaurant': ['restaurant', 'other'],
}
gps = {
'minLat': 49.423043,
'maxLat': 49.454434,
'minLon': 7.726833,
'maxLon': 7.808753,
}
@staticmethod
def fakeGpsForPlace():
latRange = PlaceAddressTracker.gps['maxLat'] - PlaceAddressTracker.gps['minLat']
lonRange = PlaceAddressTracker.gps['maxLon'] - PlaceAddressTracker.gps['minLon']
lat = (random.random() * latRange) + PlaceAddressTracker.gps['minLat']
lon = (random.random() * lonRange) + PlaceAddressTracker.gps['minLon']
return lat, lon
@staticmethod
def loadAddresses(pathToAddresses):
with open(pathToAddresses) as f:
return PlaceAddressTracker(json.load(f))
def __init__(self, addresses):
self._statistics = {pn: [len(lst), len(lst)] for pn, lst in addresses.items()}
self._order = PlaceAddressTracker.order.copy()
if 'home' in addresses:
newHomes = []
for home in addresses['home']:
for _ in range(PlaceInfo.assignNumApt()):
newHomes.append(home)
addresses['home'] = newHomes
self._statistics['home'][1] = len(newHomes)
if 'store' in addresses:
for pn, lst in self._splitStores(addresses['store']).items():
addresses[pn] = lst
self._statistics[pn] = [len(lst), len(lst)]
for pn, lst in addresses.items():
random.shuffle(lst)
self._addresses = addresses
def _splitStores(self, stores):
newStores = dict()
for store in stores:
labels = list()
if 'shop' in store:
if store['shop'] in PlaceInfo.storeTypes:
labels.append(store['shop'])
if 'amenity' in store:
if store['amenity'] in PlaceInfo.storeTypes:
labels.append(store['amenity'])
if not labels:
labels.append('store')
for label in labels:
newLabel = 'store_' + label
if label == 'store':
newLabel = 'store'
if newLabel not in newStores:
newStores[newLabel] = list()
if newLabel not in self._order:
self._order[newLabel] = [newLabel, 'store', 'other']
newStores[newLabel].append(store)
return newStores
def hasAddress(self, placeName):
if placeName not in self._addresses:
return False
if self._addresses[placeName]:
return True
return False
def getAddress(self, placeName, force=False):
if self.hasAddress(placeName):
place = self._addresses[placeName].pop(0)
return float(place['lat']), float(place['lon'])
if not force:
raise ValueError("No address available.")
if placeName in self._order:
for pn in self._order[placeName]:
if self.hasAddress(pn):
place = self._addresses[pn].pop(0)
return float(place['lat']), float(place['lon'])
WarningCounter.count(f"Failed to get address for place type {placeName}. Returned fake address.")
return PlaceAddressTracker.fakeGpsForPlace()
def printStats(self):
print(f"| ADDRESS Statistics")
for pn, lst in self._addresses.items():
print(f"| {pn}: {self._statistics[pn][1] - len(lst)} "
f"({(self._statistics[pn][1] - len(lst)) / self._statistics[pn][1] * 100:.1f}%) used of "
f"{self._statistics[pn][1]} (originally, {self._statistics[pn][0]} distinct).")
if __name__ == '__main__':
print(PlaceInfo.getPlaceNames())
print("assignVisitFreq():")
print(f" office: {PlaceInfo.assignVisitFreq('office')}")
print(f" school: {PlaceInfo.assignVisitFreq('school')}")
|
[
"paul@francis.com"
] |
paul@francis.com
|
93d2a93c3766b10060f1163b0518cd03a037d4de
|
e2468c60810764971f2dae2b959650b553042810
|
/1859_sortingTheSentence.py
|
e35fc60b5dd3422f73069456f2c324e9ddef7fc4
|
[] |
no_license
|
awesome-liuxiao/leetcodesolution
|
9a01b6f36266149ae7fe00625785d1ada41f190a
|
3637cd1347b5153daeeb855ebc44cfea5649fc90
|
refs/heads/master
| 2023-06-08T13:42:14.653688
| 2023-06-01T08:39:35
| 2023-06-01T08:39:35
| 213,380,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
class Solution:
def sortSentence(self, s: str) -> str:
ls = s.split()
res = [""]*len(ls)
for word in ls:
res[int(word[-1])-1] = word[0:len(word)-1]
return ' '.join(res)
X = Solution()
s = "is2 sentence4 This1 a3"
print(X.sortSentence(s))
s = "Myself2 Me1 I4 and3"
print(X.sortSentence(s))
|
[
"lio4072@hotmail.com"
] |
lio4072@hotmail.com
|
43d0b28b0b29cb0bb324df2d02c8001c4efe022f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_sting.py
|
bafade4da0ff73720d1509ad0c570e87d50fe446
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
#calss header
class _STING():
def __init__(self,):
self.name = "STING"
self.definitions = [u'If an insect, plant, or animal stings, it produces a small but painful injury, usually with a poison, by brushing against the skin or making a very small hole in the skin: ', u'to cause sharp but usually temporary pain: ', u"If someone's unkind remarks sting, they make you feel upset and annoyed: ", u'to charge someone a surprisingly large amount of money for something: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
3f47139f79628be8532328c2add281c440ca5961
|
7f03e2dab9450d44ef82aa4a671705539d0923c9
|
/自动化工具/__init__.py
|
cc591a1ebeb27b4f0c741b550ba0545314ab3f74
|
[] |
no_license
|
A-fish-in-Lake-Baikal/python-exercise
|
61fc62549e6d1db1056f80e4d560cd56cd55af54
|
f4b8da074cd61e1579740163d93dc909da5780ac
|
refs/heads/master
| 2023-06-24T15:06:13.036918
| 2023-06-09T03:06:55
| 2023-06-09T03:06:55
| 132,322,783
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
# -- coding: utf-8 --
# @time :
# @author : 马维畅
# @file : .py
# @software: pycharm
|
[
"1627967707@qq.com"
] |
1627967707@qq.com
|
c6dcaaa1d65c21d6af0c71bf369ddeb0b511691a
|
0c5139b940c94b17cb60202ba84017c001208a08
|
/mysite/polls/admin.py
|
78baf719c3e0f7f26118abbe4690636368d8c916
|
[] |
no_license
|
joshuafierro/poll-app
|
8c4a6591a7b94e5f90ec35c7177dd3ef920e5734
|
1f27419584e13f80a9cfcc79f8a3e0a803f7e5c1
|
refs/heads/master
| 2020-04-09T03:25:55.054339
| 2018-12-03T02:01:03
| 2018-12-03T02:01:03
| 159,982,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
from django.contrib import admin
from .models import Question, Choice
# Register your models here.
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
# fields = ['pub_date', 'question_text']
fieldsets = [
(None,
{'fields': ['question_text']}),
('Date information', {'fields': ['pub_date']}),
]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question, QuestionAdmin)
admin.site.register(Choice)
|
[
"josh.fierrosga@gmail.com"
] |
josh.fierrosga@gmail.com
|
b39c21cee150b4a79bb52e15acbc4d34f7fd77a1
|
60509c5e1956cb64d0220e9fc4542db7ef18bbf9
|
/data_processor/tasks.py
|
57aaa7dceaa00bf6eb7e982b81a54c0837862e4d
|
[] |
no_license
|
Nemesisesq/streamsavvy_dataprocessing
|
18989378c8aa6c5a250449e74d93d64303874092
|
6c814818c0bac27c99ad4a5c8799648a5d9e637a
|
refs/heads/master
| 2021-06-15T02:38:32.982171
| 2017-01-17T16:38:49
| 2017-01-17T16:38:49
| 61,290,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
import requests
import scrapy
from celery.schedules import crontab
from celery.task import periodic_task
from celery.task import task
from celery.utils.log import get_task_logger
from datetime import datetime, time
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from college_football_scraper.spiders.college_football_spider import CollegeFootballSpider
from college_football_scraper.spiders.example import ExampleSpider
from college_football_scraper.spiders.pro_football_spider import ProFootballSpider
#
logger = get_task_logger(__name__)
#
#
# @task
# def scraper_example(a, b):
# print(a + b)
#
#
# # A periodic task that will run every minute (the symbol "*" means every)
# @periodic_task(serializer='json', run_every=(crontab(hour="*", minute="*", day_of_week="*")), name='helloworld',
# ignore_result=True)
# def scraper_runner():
# print('hello world')
# r = requests.get('http://localhost:8081')
# print(r.text )
# logger.info("Start task")
# now = datetime.now()
# result = scraper_example(now.day, now.minute)
# logger.info("Task finished: result = %i" % result)
#
#
# @periodic_task(serializer='json',
# run_every=(crontab(minute="0", hour="10", day_of_week="2,3,4", month_of_year="1, 2, 8, 9, 10, 11, 12")),
# name=' college football scraper', ignore_results=True)
# def ncaaf_scraper():
# # print('college football scraper is runninh' + time.strftime("%c"))
# process = CrawlerProcess(get_project_settings())
# process.crawl(CollegeFootballSpider)
# process.start()
#
# return
#
#
# #
#
# @periodic_task(serializer='json',
# run_every=(crontab(minute="0", hour="10", day_of_week="3,4,5", month_of_year="1, 2, 8, 9, 10, 11, 12")),
# name='NFL scraper', ignore_results=True)
# def nfl_scraper():
# # logger('pro football scraper start' + time.strftime("%c"))
# process = CrawlerProcess(get_project_settings())
# process.crawl(ProFootballSpider)
# process.start()
#
#
# def example_scraper_test():
# print('')
|
[
"nemesisesq@gmail.com"
] |
nemesisesq@gmail.com
|
6b0c2baf8952bfe46d3c9ac541be5644748044b9
|
e6a48a7d5ee2df232355f5d5488fa1cd3c53ce89
|
/tests.py
|
7f298b580cf0a2453c734408872a2479a954b2cd
|
[] |
no_license
|
charleycodes/testing-py
|
f1e07cb678d52e26cd1cdb6bc34dcf7a3c2b331f
|
963a50d0074083cf02a253ef77cef46db5c7ff7a
|
refs/heads/master
| 2021-06-05T11:10:35.950108
| 2016-10-14T19:57:53
| 2016-10-14T19:57:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
import unittest
import party
class PartyTests(unittest.TestCase):
"""Tests for my party site."""
def setUp(self):
self.client = party.app.test_client()
party.app.config['TESTING'] = True
def test_homepage(self):
result = self.client.get("/")
self.assertIn("I'm having a party", result.data)
def test_no_rsvp_yet(self):
# FIXME: Add a test to show we haven't RSVP'd yet
result = self.client.get("/")
self.assertNotIn('<h2>Party Details</h2>', result.data)
self.assertIn('<h2>Please RSVP</h2>', result.data)
def test_rsvp(self):
result = self.client.post("/rsvp",
data={'name': "Jane",
'email': "jane@jane.com"},
follow_redirects=True)
self.assertIn('<h2>Party Details</h2>', result.data)
self.assertNotIn('<h2>Please RSVP</h2>', result.data)
def test_rsvp_mel(self):
result = self.client.post("/rsvp",
data={'name': "Mel Melitpolski",
'email': "mel@ubermelon.com"},
follow_redirects=True)
self.assertNotIn('<h2>Party Details</h2>', result.data)
self.assertIn('<h2>Please RSVP</h2>', result.data)
if __name__ == "__main__":
unittest.main()
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
fc9a67b56288b8d36e5bd0d08e89b03d60e15396
|
17665a2daa763d499599d218492953a1cd9e6aad
|
/Honors Thesis Research/Code/Old Versions/data_redux_v2/spectrum.py~
|
404e55285dd359e5b7cb7b35d00618f31fdc0d7f
|
[] |
no_license
|
rfasullo15/Stellar-Classification
|
332267bdc1aa69244f832b4b579a135c133b51be
|
6f0dbb802ac0be6823b3f28f3d14cd5e0841220d
|
refs/heads/master
| 2022-09-14T13:51:36.383409
| 2019-05-27T15:23:10
| 2019-05-27T15:23:10
| 188,866,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,246
|
'''
The purpose of a spectrum object is to maintain the calculated values for a given spectrum.
History:
6/25/2018: Class created
'''
import numpy as np
import copy
class Spectrum:
def __init__(self, waves, name):
self.waves = waves
self.name = name
self.isSolved = False
self.values = [0,0,0]
self.grpsize = 8
self.peaks = self.findpeaks()
self.origpeaks = copy.deepcopy(self.peaks)
def findpeaks(self):
start = 0
finish = start + self.grpsize
peakindex = []
pstarts = []
while finish < 2048:
section = np.array(self.waves[start:finish])
downcheck = -np.sort(-section)
if np.array_equal(section, downcheck):
pstarts.append(section[0])
val, index = self.recurse_search(finish)
peakindex.append((val,index))
finish = index
start = finish
finish = start + self.grpsize
peakindex = list(set(peakindex))
#print(pstarts)
wantedindeces = [elem[1] for elem in peakindex]
peaks = self.pickpeak(wantedindeces)
return peaks
def resetpeaks(self):
self.peaks = copy.deepcopy(self.origpeaks)
def removepeak(self, pek):
self.peaks.remove(self.pickpeak([pek])[0])
print(self.peaks)
def addpeak(self, pek):
print(self.pickpeak([pek]))
def pickpeak(self, wantedindeces):
peaks = []
buffer = 4
for index in wantedindeces:
templist = self.waves[index-buffer : index+buffer]
templist = np.sort(templist)
if len(np.nonzero(self.waves == templist[0])[0]) == 1:
peaks.append(np.nonzero(self.waves == templist[0])[0][0])
else:
multiples = np.nonzero(self.waves == templist[0])
for idx in multiples[0]:
if idx < index+buffer and idx > index-buffer:
peaks.append(idx)
return peaks
def recurse_search(self, index):
val, idx = self.fall_down(index)
tempsect = np.array(self.waves[idx:(idx+self.grpsize)])
count = 0
tempidx = idx
while count<tempsect.__len__():
if tempsect[count] < val:
val,idx = self.recurse_search(tempidx)
break
count +=1
tempidx +=1
return (val, idx)
def fall_down(self, start):
curr = self.waves[start]
next = self.waves[start+1]
count = start+1
while curr>next:
curr = next
count+=1
next = self.waves[count]
return (curr, count)
|
[
"90rfasullo@gmail.com"
] |
90rfasullo@gmail.com
|
|
0302d39e78724531b2f09e38788aa5f669609958
|
82ca891008793f570668a7f2c760ae0f22d40494
|
/src/VAMPzero/Component/Wing/Aileron/Geometry/tipYLocation.py
|
078b55d940fb68c2cb44be5e6bcef8800cd0f839
|
[
"Apache-2.0"
] |
permissive
|
p-chambers/VAMPzero
|
22f20415e83140496b1c5702b6acbb76a5b7bf52
|
4b11d059b1c7a963ec7e7962fa12681825bc2f93
|
refs/heads/master
| 2021-01-19T10:49:06.393888
| 2015-06-24T10:33:41
| 2015-06-24T10:33:41
| 82,208,448
| 1
| 0
| null | 2017-02-16T17:42:55
| 2017-02-16T17:42:55
| null |
UTF-8
|
Python
| false
| false
| 2,598
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright: Deutsches Zentrum fuer Luft- und Raumfahrt e.V., 2015 (c)
Contact: daniel.boehnke@dlr.de and jonas.jepsen@dlr.de
'''
from cmath import pi
from VAMPzero.Handler.Parameter import parameter
rad = pi / 180.
class tipYLocation(parameter):
'''
Calculates the spanwise tip location of the aileron, measured from the fuselage center line
(note: only one aileron is considered).
:Unit: [m]
:Source:
:Author: Lisanne van Veen
'''
def __init__(self, value=0., unit='m', parent='', cpacsPath=''):
super(tipYLocation, self).__init__(value=value, unit=unit, doc=self.__doc__, status='init', parent=parent,
cpacsPath=cpacsPath)
def calc(self):
'''
The function is a statistical relation obtained by analyzing data of large passenger aircraft.
The design space of this equation is:
* refAreaWing 72.72 - 845 m2
* spanWing 26 - 79.80 m
:Source:
'''
refAreaWing = self.parent.wing.refArea.getValue()
spanWing = self.parent.wing.span.getValue()
tipYLocationAileron = - 2.103872236 + 0.5286847608 * spanWing + 0.00004371791524 * (refAreaWing ** 2) \
- 0.0007899727342 * spanWing * refAreaWing + 0.002586029039 * (spanWing ** 2)
# if the spanwise tip location of the aileron is larger than half of the wing span
# set the location of the spanwise tip location equal to 95% of the half wing span
if tipYLocationAileron > (spanWing / 2.):
tipYLocationAileron = (spanWing / 2.) * 0.95
return self.setValueCalc(tipYLocationAileron)
###################################################################################################
#EOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFE#
###################################################################################################
|
[
"daniel.boehnke@dlr.de"
] |
daniel.boehnke@dlr.de
|
6f3d7b1e2e7c737ef8c2cd6fd829fb2a589d34b0
|
8aa9ecfe421b196589b6c9fdc0e954d02d927feb
|
/examples/reference/models/multi_select_server.py
|
b34672b41c365a5a26a006ee7da904e5a58dc470
|
[
"BSD-3-Clause"
] |
permissive
|
hongyu9000/bokeh
|
b384484925c6c145e4eaf87460a3f776095e81ed
|
b19f2c5547024bdc288d02e73fdb65e65991df5f
|
refs/heads/master
| 2020-09-03T15:57:31.157443
| 2019-11-04T05:25:46
| 2019-11-04T05:25:46
| 219,503,733
| 1
| 0
|
BSD-3-Clause
| 2019-11-04T13:06:20
| 2019-11-04T13:06:19
| null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
## Bokeh server for MultiSelect
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import MultiSelect
from bokeh.plotting import figure
x=[3,4,6,12,10,1]
y=[7,1,3,4,1,6]
label=['Red', 'Orange', 'Red', 'Orange','Red', 'Orange']
df=pd.DataFrame({'x':x,'y':y,'label':label}) #create a dataframe for future use
source = ColumnDataSource(data=dict(x=x, y=y,label=label))
plot_figure = figure(title='Multi-Select',plot_height=450, plot_width=600,
tools="save,reset", toolbar_location="below")
plot_figure.scatter('x', 'y',color='label', source=source, size=10)
multi_select = MultiSelect(title="Filter Plot by color:", value=["Red", "Orange"],
options=[("Red", "Red"), ("Orange", "Orange")])
def multiselect_click(attr,old,new):
active_mselect=multi_select.value ##Getting multi-select value
selected_df=df[df['label'].isin(active_mselect)] #filter the dataframe with value in multi-select
source.data=dict(x=selected_df.x, y=selected_df.y,label=selected_df.label)
multi_select.on_change('value',multiselect_click)
layout=row(multi_select, plot_figure)
curdoc().add_root(layout)
curdoc().title = "Multi-Select Bokeh Server"
|
[
"bryan@bokeh.org"
] |
bryan@bokeh.org
|
78a7139ed792f4ea8911bc6eaf9aa8468d422fd8
|
000a2564c839a25955faf9cec65c3290956b17b7
|
/pygeoapi/config.py
|
f9fe0d3efe67d372f8807f8d77e480a26a3fcbad
|
[
"MIT"
] |
permissive
|
emotional-cities/pygeoapi
|
6734064f891bb14343c004f04864e531fd0d4852
|
6701d7aaf750d3076907784ec26321b6cb55dc64
|
refs/heads/master
| 2023-04-16T02:34:37.229234
| 2022-03-04T11:45:56
| 2022-03-04T11:45:56
| 388,576,649
| 0
| 3
|
MIT
| 2022-03-31T15:12:40
| 2021-07-22T19:34:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,593
|
py
|
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2021 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import click
import json
from jsonschema import validate as jsonschema_validate
import logging
import os
from pygeoapi.util import to_json, yaml_load
LOGGER = logging.getLogger(__name__)
THISDIR = os.path.dirname(os.path.realpath(__file__))
def validate_config(instance_dict):
"""
Validate pygeoapi configuration against pygeoapi schema
:param instance_dict: dict of configuration
:returns: `bool` of validation
"""
schema_file = os.path.join(THISDIR, 'schemas', 'config',
'pygeoapi-config-0.x.yml')
with open(schema_file) as fh2:
schema_dict = yaml_load(fh2)
jsonschema_validate(json.loads(to_json(instance_dict)), schema_dict)
return True
@click.group()
def config():
"""Configuration management"""
pass
@click.command()
@click.pass_context
@click.option('--config', '-c', 'config_file', help='configuration file')
def validate(ctx, config_file):
"""Validate configuration"""
if config_file is None:
raise click.ClickException('--config/-c required')
with open(config_file) as ff:
click.echo('Validating {}'.format(config_file))
instance = yaml_load(ff)
validate_config(instance)
click.echo('Valid configuration')
config.add_command(validate)
|
[
"noreply@github.com"
] |
noreply@github.com
|
33c67722dbb06879c8fb968c5865e09c1513e5c0
|
21fc3622bb7a3a89a8ed9dec932919936fb1ce36
|
/buildout-cache/eggs/plone.app.dexterity-2.1.20-py2.7.egg/plone/app/dexterity/browser/types.py
|
deb8a418a6eb1ee8ec737110deb355f2e9dceb54
|
[] |
no_license
|
erlantostes/plone
|
4bc1ccba9e0ab77ce5370489f6b47b806c889c29
|
3a5fb7574cee269a99b148eef695256805ce1a45
|
refs/heads/master
| 2020-04-01T18:04:32.927641
| 2018-10-17T11:22:59
| 2018-10-17T11:22:59
| 153,469,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,233
|
py
|
# -*- coding: utf-8 -*-
from OFS.SimpleItem import SimpleItem
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile \
import ViewPageTemplateFile as FiveViewPageTemplateFile
from ZPublisher.BaseRequest import DefaultPublishTraverse
from plone.app.dexterity import _
from plone.app.dexterity.browser.utils import UTF8Property
from plone.app.dexterity.interfaces import ITypeSchemaContext
from plone.app.dexterity.interfaces import ITypesContext
from plone.app.dexterity.interfaces import ITypeSettings
from plone.app.dexterity.interfaces import ITypeStats
from plone.dexterity.interfaces import IDexterityFTI
from plone.dexterity.utils import getAdditionalSchemata
from plone.schemaeditor.browser.schema.traversal import SchemaContext
from plone.z3cform import layout
from plone.z3cform.crud import crud
from plone.z3cform.layout import FormWrapper
from z3c.form import button
from z3c.form import field
from zope.browserpage.viewpagetemplatefile import ViewPageTemplateFile
from zope.cachedescriptors.property import Lazy as lazy_property
from zope.component import adapter
from zope.component import ComponentLookupError
from zope.component import getAllUtilitiesRegisteredFor
from zope.component import getUtility
from zope.interface import implementer
from zope.publisher.interfaces.browser import IBrowserPublisher
import urllib
HELP = """<p>Content types show up on Plone's 'Add Item' menu and allow
you to store custom data in your site.</p>
<p>Click the "Add Content Type" button to begin creating
a new content type with its own fields.</p>"""
ALLOWED_FIELDS = [
u'plone.app.textfield.RichText',
u'plone.namedfile.field.NamedBlobImage',
u'plone.namedfile.field.NamedBlobFile',
u'plone.schema.email.Email',
u'z3c.relationfield.schema.RelationChoice',
u'z3c.relationfield.schema.RelationList',
u'zope.schema._bootstrapfields.Bool',
u'zope.schema._bootstrapfields.Int',
u'zope.schema._bootstrapfields.Password',
u'zope.schema._bootstrapfields.Text',
u'zope.schema._bootstrapfields.TextLine',
u'zope.schema._field.Choice',
u'zope.schema._field.Date',
u'zope.schema._field.Datetime',
u'zope.schema._field.Float',
u'zope.schema._field.Set',
u'zope.schema._field.URI',
]
class TypeEditSubForm(crud.EditSubForm):
""" Content type edit subform. Just here to use a custom template.
"""
template = ViewPageTemplateFile('types_listing_row.pt')
class TypeEditForm(crud.EditForm):
"""Content type edit form.
Just a normal CRUD form without the form title or edit button.
"""
label = None
editsubform_factory = TypeEditSubForm
buttons = crud.EditForm.buttons.copy().omit('edit')
handlers = crud.EditForm.handlers.copy()
@button.buttonAndHandler(_(u'Clone'))
def handleClone(self, action):
selected = self.selected_items()
if len(selected) > 1:
self.status = _(u'Please select a single type to clone.')
elif len(selected) == 1:
id = selected[0][0]
url = '%s/%s/@@clone' % (self.context.context.absolute_url(), id)
self.request.response.redirect(url)
else:
self.status = _(u'Please select a type to clone.')
@button.buttonAndHandler(_(u'Export Type Profiles'))
def handleExport(self, action):
selected = ",".join([items[0] for items in self.selected_items()])
if len(selected) == 0:
self.status = _(u'Please select types to export.')
elif len(selected) > 0:
url = '%s/@@types-export?selected=%s' % \
(self.context.context.absolute_url(),
urllib.quote(selected))
self.request.response.redirect(url)
@button.buttonAndHandler(_(u'Export Schema Models'))
def handleExportModels(self, action):
selected = ",".join([items[0] for items in self.selected_items()])
if len(selected) == 0:
self.status = _(u'Please select types to export.')
elif len(selected) > 0:
url = '%s/@@models-export?selected=%s' % \
(self.context.context.absolute_url(),
urllib.quote(selected))
self.request.response.redirect(url)
class TypesEditFormWrapper(FormWrapper):
""" Render Plone frame around our form with little modifications """
form = TypeEditForm
index = FiveViewPageTemplateFile("typesformwrapper.pt")
@adapter(IDexterityFTI)
@implementer(ITypeSettings)
class TypeSettingsAdapter(object):
def __init__(self, context):
self.context = context
@property
def id(self):
return self.context.getId()
title = UTF8Property('title')
description = UTF8Property('description')
@property
def container(self):
return self.context.container
def _get_allowed_content_types(self):
return set(self.context.allowed_content_types)
def _set_allowed_content_types(self, value):
if not value:
value = ()
self.context.allowed_content_types = tuple(value)
if value:
self.context.filter_content_types = True
allowed_content_types = property(
_get_allowed_content_types, _set_allowed_content_types)
def _get_filter_content_types(self):
value = self.context.filter_content_types
if not value:
return 'all'
elif value and not self.allowed_content_types:
return 'none'
else:
return 'some'
def _set_filter_content_types(self, value):
if value == 'none':
self.context.filter_content_types = True
self.context.allowed_content_types = ()
elif value == 'all':
self.context.filter_content_types = False
elif value == 'some':
self.context.filter_content_types = True
filter_content_types = property(
_get_filter_content_types, _set_filter_content_types)
@adapter(IDexterityFTI)
@implementer(ITypeStats)
class TypeStatsAdapter(object):
def __init__(self, context):
self.context = context
@property
def item_count(self):
catalog = getToolByName(self.context, 'portal_catalog')
lengths = dict(
catalog.Indexes['portal_type'].uniqueValues(withLengths=True))
return lengths.get(self.context.getId(), 0)
class TypesListing(crud.CrudForm):
""" The combined content type edit + add forms.
"""
@lazy_property
def description(self):
if self.get_items():
return _(u'The following custom content types are available for '
u'your site.')
else:
return _('help_addcontenttype_button',
default=HELP)
template = ViewPageTemplateFile('types_listing.pt')
view_schema = field.Fields(ITypeSettings).select('title', 'description')
view_schema += field.Fields(ITypeStats)
addform_factory = crud.NullForm
editform_factory = TypeEditForm
def get_items(self):
"""Look up all Dexterity FTIs via the component registry.
(These utilities are created via an IObjectCreated handler for the
DexterityFTI class, configured in plone.dexterity.)
"""
ftis = getAllUtilitiesRegisteredFor(IDexterityFTI)
return [(fti.__name__, fti) for fti in ftis]
def remove(self, (id, item)):
""" Remove a content type.
"""
ttool = getToolByName(self.context, 'portal_types')
ttool.manage_delObjects([id])
def link(self, item, field):
"""Generate links to the edit page for each type.
(But only for types with schemata that can be edited through the web.)
"""
if field == 'title':
return '{0}/{1}'.format(
self.context.absolute_url(),
urllib.quote(item.__name__)
)
# Create a form wrapper so the form gets layout.
TypesListingPage = layout.wrap_form(
TypesListing, __wrapper_class=TypesEditFormWrapper,
label=_(u'Dexterity Content Types'))
@implementer(ITypeSchemaContext)
class TypeSchemaContext(SchemaContext):
fti = None
schemaName = u''
schemaEditorView = 'fields'
allowedFields = ALLOWED_FIELDS
def browserDefault(self, request):
return self, ('@@overview',)
@property
def additionalSchemata(self):
return getAdditionalSchemata(portal_type=self.fti.getId())
# IBrowserPublisher tells the Zope 2 traverser to pay attention to the
# publishTraverse and browserDefault methods.
@implementer(ITypesContext, IBrowserPublisher)
class TypesContext(SimpleItem):
"""This class represents the types configlet.
It allows us to traverse through it to (a wrapper of) the schema
of a particular type.
"""
def __init__(self, context, request):
super(TypesContext, self).__init__(context, request)
# make sure that breadcrumbs will be correct
self.id = None
self.Title = lambda: _(u'Dexterity Content Types')
# turn off green edit border for anything in the type control panel
request.set('disable_border', 1)
def publishTraverse(self, request, name):
"""Traverse to a schema context.
1. Try to find a content type whose name matches the next URL path
element.
2. Look up its schema.
3. Return a schema context (an acquisition-aware wrapper of the
schema).
"""
try:
fti = getUtility(IDexterityFTI, name=name)
except ComponentLookupError:
return DefaultPublishTraverse(self, request).publishTraverse(
request,
name
)
schema = fti.lookupSchema()
schema_context = TypeSchemaContext(
schema, request, name=name, title=fti.title).__of__(self)
schema_context.fti = fti
schema_context.schemaName = u''
return schema_context
def browserDefault(self, request):
"""Show the 'edit' view by default.
If we aren't traversing to a schema beneath the types configlet,
we actually want to see the TypesListingPage.
"""
return self, ('@@edit',)
|
[
"root@REIT016437.ifb.local"
] |
root@REIT016437.ifb.local
|
5343d009bb6e6979cd372eac83f506835b25c72f
|
e1ef77ee96f043c2f1a3c97f6b6b3364de00f2e3
|
/simvars.py
|
13da92dc9a92974e0449390d526b61c783cac8d6
|
[] |
no_license
|
awelch17/gsw-sim
|
defaaaf31f332b65cc3f374f482eb7ff82a2987a
|
d85210e6ad6651ba878fbe954873b73361564ac5
|
refs/heads/master
| 2022-04-07T21:20:30.731991
| 2020-02-24T22:47:06
| 2020-02-24T22:47:06
| 242,860,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
WIDTH = 300
HEIGHT = 300
SCALE = 5
|
[
"awelch17@geffenacademy.ucla.edu"
] |
awelch17@geffenacademy.ucla.edu
|
83246efd64e69461197ca4f0db477e3fc3ab28b5
|
823891eb0faab76c4276c0301c8e2271a94cdedc
|
/blog/migrations/0001_initial.py
|
78aa5eefc032c5fb135df89b91369985ba1d4de7
|
[] |
no_license
|
jrcl/my-first-blog
|
49846c319cf93aa5a9e51c56b649a14cb420c5d4
|
5049e3e2202fb5ced3971e38e1c0140ea206620a
|
refs/heads/master
| 2021-01-19T05:00:53.955329
| 2016-07-30T17:07:00
| 2016-07-30T17:07:00
| 64,555,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-30 15:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"jrcl_@live.com"
] |
jrcl_@live.com
|
5314fbf518948b0492c4a997c02f31f89b594a84
|
010a2317be0f4c278c7d9efe3b2881135ceacc6b
|
/DurgaSir/td_module_Math2.py
|
7a2015d2479b198bb5b227f40de3716347f9e86e
|
[] |
no_license
|
rkandekar/python
|
34a774343236a5771557216223831cc7470161b7
|
ed459a89611f752c158cfbe7f59408ec533b2533
|
refs/heads/master
| 2020-03-23T02:56:13.413127
| 2018-07-15T06:06:20
| 2018-07-15T06:06:20
| 141,000,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
from math import *
print(sqrt(4))
print(ceil(10.2))
|
[
"noreply@github.com"
] |
noreply@github.com
|
eaf9c474202dbe579718661a3a2f69fec186fff9
|
4e362dc4df6287682a464e8e37f4f87a59136e4c
|
/web/controllers/home.py
|
dcc6a061c82831138d4cf9291a75944b079274a2
|
[] |
no_license
|
StrokaLab/JAnaP
|
5279f4a25e889890a1aea544d7c8c390cdbcacda
|
7624b4931fc1e545560e95d18b15017c244dab92
|
refs/heads/master
| 2021-08-24T13:54:18.164971
| 2021-06-24T18:43:30
| 2021-06-24T18:43:30
| 172,580,016
| 0
| 2
| null | 2021-06-24T18:43:31
| 2019-02-25T20:31:55
|
Python
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
from controllers import app
from controllers import configuration
from flask import render_template
from flask import redirect
@app.route("/")
def getHome():
return redirect("/projects")
|
[
"kelsey.gray0@gmail.com"
] |
kelsey.gray0@gmail.com
|
205439b9d4515ba300cfde06099f3161d70b93d5
|
6595a2006c84aa78eea5582eae766190429d303f
|
/feed_bot/main.py
|
7bad07edfb5e18dde8abbe0f40a9b989725774ff
|
[
"MIT"
] |
permissive
|
delimbetov/telegram_filtered_feed
|
40815053520d61b61bbbcd73498c12bd3773985c
|
9e1fa9376d08a8dc32b15f437f289d1967b5b305
|
refs/heads/main
| 2023-04-09T02:59:07.825779
| 2021-03-29T22:17:38
| 2021-03-29T22:17:38
| 352,798,910
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,346
|
py
|
from common.logging import configure_logging
from common.resources.localization import load_localizations
from bot import Bot, BotConfig, PersistenceConfig
from common.persistent_storage.factory import PostgresConfig, PersistentStorageType
import config
import sys
def main():
# Configure logging
configure_logging(name="feed_bot")
# Parse command line args
# 0 - prog name
# 1 - api id
# 2 - api hash
# 3 - bot token
# 4 - dev key
# 5 - resolver username
# >=6 - forwarders user ids
if len(sys.argv) < 7:
raise RuntimeError("App id, app hash, token, dev key, resolver username and forwarders user ids "
"are required to be passed as command line argument")
api_id = int(sys.argv[1])
api_hash = sys.argv[2]
token = sys.argv[3]
dev_key = sys.argv[4]
# accepting single resolver arg for now, but it should be easy to scale in the future because everything else
# is prepared for multi resolvers
resolver_usernames = [sys.argv[5]]
forwarders_user_ids = {int(arg) for arg in sys.argv[6:]}
# Load localizations
load_localizations()
# Load configs
postgres_config = PostgresConfig(
database=config.db_name,
user=config.db_user,
password=config.db_password,
host=config.db_host,
port=config.db_port)
persistence_type = \
PersistentStorageType.Postgres if config.persistence_use_postgres else PersistentStorageType.Pickle
persistence_config = PersistenceConfig(
persistence_type=persistence_type,
postgres_config=postgres_config)
bot_config = BotConfig(
api_id=api_id,
api_hash=api_hash,
token=token,
dev_key=dev_key,
resolver_usernames=resolver_usernames,
forwarders_user_ids=forwarders_user_ids,
resolve_max_wait_count=config.resolve_max_wait_count,
resolve_timeout_seconds=config.resolve_timeout_seconds,
resolve_warning_wait_number=config.resolve_warning_wait_number,
forward_max_wait_count=config.forward_max_wait_count,
forward_timeout_seconds=config.forward_timeout_seconds,
persistence_config=persistence_config)
# Create bot obj
bot = Bot(config=bot_config)
# Run the bot
bot.run()
if __name__ == '__main__':
main()
|
[
"1starfall1@gmail.com"
] |
1starfall1@gmail.com
|
eadf8693de509f3c4e64a1d6cf870b69b27696a3
|
2aa32777e9991afc371fcee1883d1d4411fe2db6
|
/binance-tutorials-master/coinview/backtest.py
|
34a7e7d54fca1fef7c03b57d90f1271aa6ba153a
|
[] |
no_license
|
webclinic017/megalodong
|
88b2714549a427087c375d807dc98e5506b0e162
|
c9b8c0b52ca0e3c94818aaf3ab117c9abcc8d03b
|
refs/heads/main
| 2023-03-31T23:17:16.171794
| 2021-04-06T15:38:55
| 2021-04-06T15:38:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
import backtrader as bt
import btalib
from talib import abstract
import pandas as pd
# import datetime
class RSIStrategy(bt.Strategy):
def __init__(self):
# self.sma = btalib.sma(data.Close)
self.rsi = bt.talib.RSI(data)
def next(self):
if self.rsi < 30 and not self.position:
self.buy(size=0.01)
if self.rsi > 70 and self.position:
self.close()
cerebro = bt.Cerebro()
# fromdate = datetime.datetime.strptime('2020-07-01', '%Y-%m-%d')
# todate = datetime.datetime.strptime('2020-07-12', '%Y-%m-%d')
data = bt.feeds.GenericCSVData(dataname='2021_Jan_5m.csv', dtformat=2, compression=5, timeframe=bt.TimeFrame.Minutes)
# data = pd.read_csv("2021_Jan_5m.csv",
# parse_dates=True,
# sep=",",
# names=[
# "Opentime",
# "Open",
# "High",
# "Low",
# "Close",
# 'Volume',
# "Closetime",
# "Assetvol",
# "Numberoftrades",
# "Takerbuybase",
# "Takerbuyquote",
# "Ignore",
# ])
# data.set_index('Opentime', inplace=True)
# data.index = pd.to_datetime(data.index, unit='ms')
# btalib.config.set_return_dataframe() # force return of a DataFrame
# my_sma = btalib.sma(data)
# print(data)
# print(data.Close)
# print(btalib.rsi(data.Close))
cerebro.adddata(data)
cerebro.addstrategy(RSIStrategy)
cerebro.run()
cerebro.plot()
|
[
"felix99885@web.de"
] |
felix99885@web.de
|
c7ddb6e456c167c0205664de7a610f2034772703
|
d33d722990fbbad37314f605aa2f559fe5112266
|
/app.py
|
4cdd3176daa3c4bf1bcc15be4a46ae3c4fda439e
|
[] |
no_license
|
Alicepinch/everything-vegan
|
c4e03befb3b099eced347b026b8f66c574ed937a
|
95d8dc2e06da4433d0a663fece65a50138e45bc7
|
refs/heads/master
| 2023-04-03T17:01:27.088114
| 2021-04-16T17:19:47
| 2021-04-16T17:19:47
| 339,351,194
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,626
|
py
|
import os
from flask import (
Flask, flash, render_template,
session, request, url_for, redirect)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from flask_sslify import SSLify
from datetime import date, timedelta
from werkzeug.security import generate_password_hash, check_password_hash
from validation import (
valid_registration, login_required, valid_recipe, valid_password_update)
if os.path.exists("env.py"):
import env
# Config #
app = Flask(__name__)
sslify = SSLify(app)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(minutes=120)
mongo = PyMongo(app)
# Global variables used throughout functions #
default_pic = ("/static/images/default-profile-picture.jpg")
date = date.today()
recipes_data = mongo.db.recipes
users_data = mongo.db.users
subscribers_data = mongo.db.subscribers
# Homepage #
@app.route('/')
def index():
return render_template("index.html")
# Recipe functions #
@app.route('/recipes')
def recipes():
"""
Lists all recipes in mongoDB data.
"""
recipes = list(recipes_data.find())
return render_template('recipes.html', recipes=recipes)
@app.route('/recipes/<meal>')
def meals(meal):
"""
Displays different meals when different filter options are clicked on page.
"""
if meal == "breakfast":
recipes = recipes_data.find({"meal_name": "Breakfast"})
elif meal == "lunch":
recipes = recipes_data.find({"meal_name": "Lunch"})
elif meal == "dinner":
recipes = recipes_data.find({"meal_name": "Dinner"})
elif meal == "desserts":
recipes = recipes_data.find({"meal_name": "Desserts"})
return render_template(
'recipes.html', meal=meal, recipes=recipes)
@app.route('/recipes', methods=["POST"])
def search():
"""
Searches the recipe index. Will return results for,
Recipe name, description and ingredients.
"""
# Fetches users search input
query = request.form.get("search-query")
# Search results
recipes = recipes_data.find({"$text": {"$search": query}})
# Counts all search results
searched_recipes = recipes.count()
return render_template("recipes.html", query=query,
recipes=recipes, searched_recipes=searched_recipes)
@app.route('/recipe/<recipe_id>')
def recipe_page(recipe_id):
"""
Returns page for specific recipe ID.
"""
recipe = recipes_data.find_one({"_id": ObjectId(recipe_id)})
return render_template('recipe.html', recipe=recipe)
# Login / register function #
@app.route('/login', methods=["GET", "POST"])
def login():
"""
Logs user in if username exists in database and password is correct.
"""
# Returns login template
if request.method == "GET":
return render_template("login.html")
session.permanent = True
password = request.form.get("password")
existing_user = users_data.find_one(
{"username": request.form.get("username").lower()})
# Checks if usersname exists and password matches database
if existing_user and check_password_hash(
existing_user["password"], password):
# Adds user to session
session["user"] = request.form.get("username").lower()
return redirect(url_for(
"profile", username=session["user"]))
# If the username or password does not exist/match the database
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
@app.route('/register', methods=["GET", "POST"])
def register():
"""
Registers user and adds to database if the username and email address
are both new and valid.
"""
# Returns the register template
if request.method == "GET":
return render_template("register.html")
existing_username = users_data.find_one(
{"username": request.form.get("username").lower()})
existing_email = users_data.find_one(
{"email": request.form.get("email").lower()})
# Checks username or email isn't already in use
if existing_username or existing_email:
if existing_username:
flash("Sorry, this username is already in use. Please try another")
else:
flash("Sorry, this email is already in use. Please try another")
return redirect(url_for("register"))
# Checks password and username are correct pattern's from validate.py
if valid_registration():
register = {
"username": request.form.get("username").lower(),
"email": request.form.get("email").lower(),
"password": generate_password_hash(request.form.get("password")),
"date_joined": date.strftime("%d/%m/%Y"),
"profile_image": request.form.get(
"profile_img") or default_pic,
"saved_recipes": []
}
# Adds user to users database
users_data.insert_one(register)
session["user"] = request.form.get("username").lower()
flash("Welcome! Thank you for sigining up!😊")
return redirect(url_for(
"profile", username=session["user"]))
return redirect(url_for("register"))
@app.route('/logout')
@login_required
def logout():
"""
Logs user out from session.
"""
flash("Goodbye! You have been logged out")
session.pop("user")
return redirect(url_for("login"))
# User logged in functions #
@app.route('/profile/<username>')
@login_required
def profile(username):
"""
Displays all recipes created by user.
If admin is logged in all recipes will show.
"""
# Fetches all user's information from database
user = users_data.find_one({"username": session['user']})
# Checks if user is admin and returns all recipes
if session['user'] == "admin":
recipes = list(recipes_data.find())
# If user is not admin, users recipes will show
else:
recipes = list(recipes_data.find(
{"created_by": session['user']}))
return render_template(
"profile.html", user=user, recipes=recipes, username=session['user'])
@app.route('/add-recipe', methods=["GET", "POST"])
@login_required
def add_recipe():
"""
Adds new recipe to database if recipe fields are all valid.
"""
# Returns new recipe template
if request.method == "GET":
return render_template('add-recipe.html')
# Checks all form inputs are correct lengths from validate.py
if valid_recipe():
recipe = {
"meal_name": request.form.get("meal_name"),
"recipe_name": request.form.get("recipe_name"),
"ingredients": request.form.get("ingredients"),
"description": request.form.get("description").capitalize(),
"recommendation": request.form.get("recos").capitalize(),
"yield": request.form.get("yield"),
"active_time": request.form.get(
"active_time").replace('mins', 'minutes').title(),
"total_time": request.form.get(
"total_time").replace('mins', 'minutes').title(),
"img_url": request.form.get("img_url"),
"method": request.form.get("method"),
"created_by": session["user"],
"date_created": date.strftime("%d/%m/%Y"),
}
# Inserts new recipe to recipes database
recipes_data.insert_one(recipe)
flash("Recipe Successfully Added 🍽")
return redirect(url_for("recipes"))
# Redirects back to form if invalid recipe
return redirect(request.referrer)
# Saved Recipe functions #
@app.route('/saved-recipes')
@login_required
def saved_recipes():
"""
Displays all the users saved recipes array.
"""
# Fetches users data and their saved recipes
user = users_data.find_one({"username": session["user"]})
saved = user["saved_recipes"]
saved_rec = [] # Creates empty array
# Loops through users saved recipes
for recipe_id in saved:
# Assigns recipe id to it's recipe in data and adds to empty array
recipe_id = recipes_data.find_one({'_id': ObjectId(recipe_id)})
saved_rec.append(recipe_id)
return render_template(
'saved-recipes.html', saved=saved, saved_rec=saved_rec)
@app.route('/save/<recipe_id>', methods=["POST"])
@login_required
def save_recipe(recipe_id):
"""
Saves recipe to users saved array if recipe not already saved.
"""
# Fetches users data and their saved recipes
user = users_data.find_one({"username": session["user"]})
saved = user["saved_recipes"]
# Checks if the recipe is aleady in users saved array
if ObjectId(recipe_id) in saved:
flash("Recipe already saved!😊")
return redirect(request.referrer)
# If not saved add recipe id to users saved recipe array
users_data.update_one(
user, {"$push": {
"saved_recipes": ObjectId(recipe_id)}})
flash("Recipe Saved to profile!💚")
return redirect(request.referrer)
@app.route('/saved-recipes/remove/<recipe_id>', methods=["POST"])
@login_required
def remove_saved_recipe(recipe_id):
"""
Removes recipe ID from the users "saved_recipes" array.
"""
# Fetches users data
user = users_data.find_one({"username": session["user"]})
# Removes recipe id from users saved recipe array
users_data.update_one(
user, {"$pull": {"saved_recipes": ObjectId(recipe_id)}})
flash("Recipe removed from saved")
return redirect(request.referrer)
# Edit and delete recipes #
@app.route('/recipe/edit-recipe/<recipe_id>', methods=["GET", "POST"])
@login_required
def edit_recipe(recipe_id):
"""
Allows users to edit a recipe if the user is admin or has created
the recipe and the edits are all valid.
Returns 404 if user didn't create recipe or user is not admin to
avoid other users knowing the URL is correct.
"""
recipe = recipes_data.find_one({"_id": ObjectId(recipe_id)})
created_by = recipe["created_by"]
# Checks user logged in is user who created recipe or admin
if created_by == session['user'] or session['user'] == "admin":
# Returns edit recipe template
if request.method == "GET":
return render_template('edit-recipe.html', recipe=recipe)
# Checks all form inputs are correct lengths from validate.py
if valid_recipe():
recipes_data.update_one(
{"_id": ObjectId(recipe_id)},
{'$set': {
"meal_name": request.form.get("meal_name"),
"recipe_name": request.form.get("recipe_name"),
"ingredients": request.form.get("ingredients"),
"description": request.form.get(
"description").capitalize(),
"recommendation": request.form.get("recos").capitalize(),
"yield": request.form.get("yield"),
"active_time": request.form.get(
"active_time").replace('mins', 'minutes').title(),
"total_time": request.form.get(
"total_time").replace('mins', 'minutes').title(),
"img_url": request.form.get("img_url"),
"method": request.form.get("method"),
"last_edited_by": session['user']
}})
flash("Recipe Updated 😊")
return redirect(url_for("recipe_page", recipe_id=recipe_id))
else:
return redirect(request.referrer)
# If user didn't create recipe or is not admin, 404 error returns
else:
return render_template('/errors/404.html'), 404
@app.route('/recipe/delete-recipe/<recipe_id>')
@login_required
def delete_recipe(recipe_id):
"""
Deletes recipe. Checks if recipe ID is in any users "saved_recipes" array,
if it is then recipe will be deleted from array as well.
Returns 404 if user didn't create recipe or user is not admin to
avoid other users knowing the URL is correct.
"""
recipe = recipes_data.find_one({"_id": ObjectId(recipe_id)})
created_by = recipe["created_by"]
users_saved = list(users_data.find(
{"saved_recipes": ObjectId(recipe_id)}))
# Checks user logged in is user who created recipe or admin
if created_by == session['user'] or session['user'] == "admin":
# Loops through all users saved recipes
for users in users_saved:
# Removes deleted recipe id from all users saved array
users_data.update_many(
users, {"$pull": {"saved_recipes": ObjectId(recipe_id)}})
recipes_data.delete_one(recipe)
flash("Recipe Succesfully Removed!")
# If user didn't create recipe or is not admin, 404 error returns
else:
return render_template('/errors/404.html'), 404
return redirect(url_for("recipes"))
# Update / delete user #
@app.route('/profile/delete-account/<username>')
@login_required
def delete_user(username):
"""
Deletes users account, recipes created by user will be updated to be
managed by admin. Returns 404 if the session user is not the username
passed in the URL to avoid other users knowing the URL is correct.
"""
users_recipes = list(recipes_data.find({"created_by": session["user"]}))
# If session user matches username in URL
if session['user'] == username:
# Loops through users recipes and updates them to be managed by admin
for recipe in users_recipes:
recipes_data.update(
recipe,
{'$set': {
"created_by": "admin"
}})
# Removes user from database
users_data.remove({"username": session['user']})
session.pop("user")
flash("Sorry to see you go! Your user has been deleted.")
# If session user does not match username, 404 error returns
else:
return render_template('/errors/404.html'), 404
return redirect(url_for("login"))
@app.route('/profile/update-password/<username>', methods=["GET", "POST"])
@login_required
def update_password(username):
"""
User can update their current password If the current password is correct,
and that the new passwords match the correct format and match before
updating password in database.
"""
current_password = request.form.get("password")
user = users_data.find_one({'username': session['user']})
new_password = request.form.get('new-password')
confirm_password = request.form.get("confirm-password")
# Returns update password template
if request.method == "GET":
return render_template(
'update-password.html', username=session['user'])
# Checks current password matches password in database
if check_password_hash(user["password"], current_password):
# Checks the new passwords match the password format from validate.py
if valid_password_update():
# Checks both new passwords match
if new_password == confirm_password:
# Updates the password and redirects to profile page
users_data.update_one(
user,
{'$set': {
'password': generate_password_hash
(new_password)
}})
flash("Password updated! 😊")
return redirect(url_for('profile', username=session['user']))
else:
flash("Passwords do not match! Please try again😔")
return redirect(url_for("update_password",
username=session['user']))
return redirect(url_for('update_password', username=session['user']))
else:
flash('Incorrect password. Please try again😔')
return redirect(url_for('update_password', username=session['user']))
@app.route('/profile/update-profile-pic', methods=["POST"])
@login_required
def update_profile_pic():
"""
Updates users profile photo if user is logged in.
"""
users_data.update_one(
{"username": session['user']},
{'$set': {
"profile_image": request.form.get(
"profile_img")
}})
return redirect(request.referrer)
# Newsletter Subscribe #
@app.route('/subscribe', methods=["POST"])
def subscribe_user():
"""
Subscribes email to newsletter if email is not subscribed already.
"""
existing_sub = subscribers_data.find_one(
{"subscriber_email": request.form.get("sub_email")})
if not existing_sub:
subscribe = {
"subscriber_email": request.form.get("sub_email")}
subscribers_data.insert_one(subscribe)
return redirect(request.referrer)
# Error Pages #
@app.errorhandler(404)
def page_not_found(error):
'''
Handles 404 error (page not found)
'''
return render_template('/errors/404.html'), 404
@app.errorhandler(500)
def internal_server_error(error):
'''
Handles 500 error (internal server error)
'''
return render_template('/errors/500.html'), 500
@app.errorhandler(405)
def method_not_allowed(error):
'''
Handles 405 error (method not allowed)
'''
return render_template('/errors/405.html'), 405
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
|
[
"alicepinch@hotmail.co.uk"
] |
alicepinch@hotmail.co.uk
|
9bc9d10b3ff2fc226bfa41037a50c9604a3db5ba
|
46aa1b419f9e47b4bb53068474e4ea47ca0c3834
|
/displaytools3.py
|
737a148590ccb3cb5e111f3c964c3717edca03c3
|
[
"MIT"
] |
permissive
|
cknoll/displaytools
|
4850e60e7dcb465fde4e81d61f50ca0697d809e2
|
3739b7ace5ff18edca18a113de05caefe88919ce
|
refs/heads/master
| 2021-06-02T00:34:37.803620
| 2017-11-11T17:07:11
| 2017-11-11T17:07:11
| 42,675,014
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,051
|
py
|
# -*- coding: utf-8 -*-
"""
This module was written by Carsten Knoll, see
https://github.com/cknoll/displaytools
This code is licensed under GPLv3
https://www.gnu.org/licenses/gpl-3.0.en.html
------------------------------------------------------------------------------
This module is an experimental ipython extension.
Background: insert some logic to display the 'result' of an assignment
# load it with %reload_ext displaytools
usage:
`my_random_variable = np.random.rand() ##`
inserts the source line `display(my_random_variable)` to the source code,
that is actually executed.
That way, the notebook is more comprehensible beacause the reader knows
the content of `my_random_variable`. It saves the typing effort and the code
duplication of manually adding `display(my_random_variable)`.
"""
# Issues: SyntaxError points to the wrong line (due to display insertion)
# Note: this extension does not work properly with keywordargs: x = func(a, b=2)
# todo maybe use sp.Eq(sp.Symbol('Z1'), theta, evaluate=False) to get better formatting
#import new
import types
import IPython
from IPython.display import display
class Container(object):
pass
# special comments
sc = '##'
sc_lhs = '##:'
sc_transpose = '##T'
sc_lhs_transpose = '##:T'
sc_list = [sc, sc_lhs, sc_transpose, sc_lhs_transpose]
# ensure that all special comments have the same start string
for elt in sc_list:
assert elt.startswith(sc)
def eval_line_end(line):
res = Container()
res.sc = False
res.lhs = False
res.transpose = False
res.assignment = False
if line.endswith(sc):
res.sc = True
elif line.endswith(sc_lhs):
res.sc = True
res.lhs = True
# Transposition assumes that sympy or numpy is imported
elif line.endswith(sc_transpose):
res.sc = True
res.transpose = True
elif line.endswith(sc_lhs_transpose):
res.sc = True
res.lhs = True
res.transpose = True
return res
def process_line(line, line_flags, disp_str):
if line_flags.assignment:
delim = "---"
brace_str = "%s"
else:
delim = "___"
brace_str = "(%s)"
#!! try ... eval(...) except SyntaxError ?
if line_flags.transpose:
disp_str = brace_str % disp_str
disp_str += '.T'
if line_flags.lhs:
new_line = 'custom_display("%s", %s); print("%s")' % (disp_str, disp_str, delim)
else:
new_line = 'display(%s); print("%s")' % (disp_str, delim)
return new_line
def insert_disp_lines(raw_cell):
lines = raw_cell.split('\n')
N = len(lines)
# iterate from behind -> insert does not change the lower indices
for i in range(N-1, -1, -1):
line = lines[i]
line_flags = eval_line_end(line)
if line_flags.sc:
if line[0] in [' ', '#']:
# this line is part of a comment or indented block
# -> ignore
continue
if not line.index('#') == line.index(sc):
# the special comment might not be the first comment
# -> ignore this line?
# continue
# new option: not an important special case
pass
if ' = ' in line:
idx = line.index(' = ')
var_str = line[:idx].strip()
line_flags.assignment = True
new_line = process_line(line, line_flags, var_str)
lines.insert(i+1, new_line)
else:
# this line is not an assignment
# -> it is replaced by `display(line)`
idx = line.index(sc)
disp_str = line[:idx]
line_flags.assignment = False
new_line = process_line(line, line_flags, disp_str)
lines[i] = new_line
new_raw_cell = "\n".join(lines)
return new_raw_cell
def custom_display(lhs, rhs):
"""
lhs: left hand side
rhs: right hand side
This function serves to inject the string for the left hand side
of an assignment
"""
# This code is mainly copied from IPython/display.py
# (IPython version 2.3.0)
kwargs = {}
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.displaypub import publish_display_data
format = InteractiveShell.instance().display_formatter.format
format_dict, md_dict = format(rhs, include=include, exclude=exclude)
# example format_dict (for a sympy expression):
# {u'image/png': '\x89PNG\r\n\x1a\n\x00 ...\x00\x00IEND\xaeB`\x82',
# u'text/latex': '$$- 2 \\pi \\sin{\\left (2 \\pi t \\right )}$$',
# u'text/plain': u'-2\u22c5\u03c0\u22c5sin(2\u22c5\u03c0\u22c5t)'}
# it is up to IPython which item value is finally used
# now merge the lhs into the dict:
if not isinstance(lhs, str):
raise TypeError('unexpexted Type for lhs object: %s' %type(lhs))
new_format_dict = {}
for key, value in list(format_dict.items()):
if 'text/plain' in key:
new_value = lhs+' := '+value
new_format_dict[key] = new_value
elif 'text/latex' in key:
if value.startswith("$$"):
# this is the expected case
new_value = r"$$\texttt{%s} := %s" % (lhs, value[2:])
new_format_dict[key] = new_value
else:
# this is unexpected but raising an exceptions seems
# not necessary; hanle like plain text (see above)
new_value = lhs+' := '+value
new_format_dict[key] = new_value
else:
# this happens e.g. for mime-type (i.e. key) 'image/png'
new_format_dict[key] = value
# legacy IPython 2.x support
if IPython.__version__.startswith('2.'):
publish_display_data('display', new_format_dict, md_dict)
else:
# indeed, I dont know with which version the api changed
# but it does not really matter (for me)
publish_display_data(data=new_format_dict, metadata=md_dict)
def load_ipython_extension(ip):
def new_run_cell(self, raw_cell, *args, **kwargs):
new_raw_cell = insert_disp_lines(raw_cell)
if 0:
#debug
print("cell:")
print(raw_cell)
print("new_cell:")
print(new_raw_cell)
print('-'*5)
#print("args", args)
#print("kwargs", kwargs)
return ip.old_run_cell(new_raw_cell, *args, **kwargs)
# prevent unwanted overwriting when the extension is reloaded
if not 'new_run_cell' in str(ip.run_cell):
ip.old_run_cell = ip.run_cell
ip.run_cell = types.MethodType(new_run_cell, ip)
ip.user_ns['display'] = display
ip.user_ns['custom_display'] = custom_display
|
[
"CarstenKnoll@gmx.de"
] |
CarstenKnoll@gmx.de
|
9622490f20cf321f83d5fabf121619a564816f02
|
37f488aa42e043a07ce4a706319844d932f1cd22
|
/eval/test_.py
|
6bac5d2fb1345d9ed11ab3380665469f1e717561
|
[
"Apache-2.0"
] |
permissive
|
almoslmi/e2e_EL_multilingual_experiments
|
b149f8a4769a127d0e257f7889929eb3e922080f
|
a52745435f897d8ed5847d80e8bbce4175fb348f
|
refs/heads/master
| 2020-06-17T22:39:03.406241
| 2019-06-20T04:50:17
| 2019-06-20T04:50:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
import os
import sys
import json
from tqdm import tqdm
from keras.models import load_model
from sklearn.metrics import classification_report
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
sys.path.append("./modules")
import candidate_generator as cg
import entvec_encoder as eenc
import laserencoder as lenc
import predictor as pr
def run():
out = []
with open("./data.json") as f:
data = json.load(f)[946:1163]
sents = []
for doc in data:
sents += doc
model = load_model("./models/model_best.h5")
trie = cg.load("./data/mention_stat.marisa")
kv = eenc.load("./entity_vector/enwiki_20180420_100d.bin")
enc = lenc.Encoder()
for d in tqdm(sents):
X_list, cand_list, gram_list = pr.feature(d["sentence"], trie, kv, enc)
result = pr.predict(model, X_list, cand_list, gram_list)
out.append({"true": d["entities"], "pred": result})
with open("result.json", "w") as f:
json.dump(out, f, indent=4)
if __name__ == "__main__":
run()
|
[
"shun.sugiyama@jp21.com"
] |
shun.sugiyama@jp21.com
|
3700d5b41ee354b7571d296caae7e7d00f26b52c
|
49961fcc0e797b0e876298bd85a2302a252b9368
|
/PY/qrcode.py
|
bfd90cead610c7c2971db361f7c7a94dbd9c7a24
|
[
"MIT"
] |
permissive
|
digitalrobertlima/BrCrypto
|
a6780c74a3543ac47c8de23cb135045d9cf4a24d
|
a49a0020266bf89e76db7b1f57792727eab3470f
|
refs/heads/master
| 2023-04-20T23:12:01.976224
| 2021-05-08T17:44:48
| 2021-05-08T17:44:48
| 284,328,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
import pyqrcode as pqr
import png
import io
url = pqr.create('Hello World')
with open('code.png', 'w') as fstream:
url.png('code.png', scale=5)
url.png('code.png', scale=5)
buffer = io.BytesIO()
url.png(buffer)
print(list(buffer.getvalue()))
|
[
"ge.crio123@gmail.com"
] |
ge.crio123@gmail.com
|
3ed358220dd479af523de7acafc28a6d237fcb72
|
c5f50b946ca8ff2d85204c8bf3feefd1e3491e26
|
/code/PI/pub.py
|
815dd4e02dd3e6285c6d776554e1c1ac74a24124
|
[] |
no_license
|
jackwaines/iot
|
891ecbf3bc9cc856f92a13dbbc6250d4d14dd980
|
e1e0ae5ef326a065d1610bdb07853d0f738cf52c
|
refs/heads/main
| 2023-04-12T21:32:30.258395
| 2021-05-13T09:10:07
| 2021-05-13T09:10:07
| 366,326,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,517
|
py
|
# import libs
import paho.mqtt.client as mqtt
import os
import time
import random
from time import strftime
from datetime import datetime
import requests
import json
import schedule
import numpy as np
import tensorflow as tf
# load the AI model
model2 = tf.keras.models.load_model('./my_model')
# MQTT Functions
def on_message(client, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
def on_publish(client, obj, mid):
print("mid: " + str(mid))
# gettiing dict with temperature, date and icon for forecast
def day_forecast():
temp_day = []
for i in forecast_response['list']:
foo = '12:00:00'
if foo in i['dt_txt']:
dictor = {
'date': i['dt'],
'temp': i['main']['temp'],
'icon': i['weather'][0]['icon'],
'date_txt': i['dt_txt']
}
temp_day.append(dictor)
# This for loop is selecting all DT from respoonse and making list of it
temport = []
for d in temp_day:
temport.append(d['date'])
# This loop converting timestamp DT format to week days names and making list of it
dates_formated = []
for value in temport:
dates_formated.append(
datetime.utcfromtimestamp(value).strftime('%A'))
return [temp_day, dates_formated]
def night_forecast():
temp_night = []
for i in forecast_response['list']:
foo = '03:00:00'
if foo in i['dt_txt']:
dictor = {
'date': i['dt_txt'],
'temp': i['main']['temp'],
}
temp_night.append(dictor)
return temp_night
# send email function
def send_mail(city, temperature, humidity, pressure, wind, description):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
mail= MIMEMultipart()
sender_email = "sender@gmail.com" # replace with sender mail
rec_email = "reciver@gmail.com" # replace with reciver mail
password = "Passwd" # replace with sender mail password
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(sender_email, password)
mail['From']='Weather Notification System'
mail['To'] = rec_email
mail['Subject']='Weather App – Alert'
city = city
temperature = str(temperature)+ " C"
humidity = str(humidity) + " %"
pressure = str(pressure) + " hPa"
wind = str(wind) + " m/s"
description = description
body=" City: "+str(city)+"\n Temperature: "+str(temperature)+"\n Humidity: "+str(humidity)+"\n Pressure: "+str(pressure)+"\n Wind: "+str(wind)+"\n Description: "+ str(description)
mail.attach(MIMEText(body,'plain'))
msg=mail.as_string()
server.sendmail(sender_email, rec_email, msg)
print('Mail Sent')
email = "Email Will Send Your Mail."
def email12():
global email
email = "Email Send At 12PM. Please Check Your Mail."
def email06():
global email
email = "Email Send At 06PM. Please Check Your Mail."
# schedule mail send time
schedule.every().day.at("00:00").do(lambda: send_mail(city_float, temp_float, hum_float, pre_float, wind_float, des_float))
schedule.every().day.at("18:00").do(lambda: send_mail(city_float, temp_float, hum_float, pre_float, wind_float, des_float))
schedule.every().day.at("00:00").do(email12)
schedule.every().day.at("18:00").do(email06)
# generate random sensor values
def generate_sensor_data():
global temp, hum, pre
temp = random.randint(20, 30)
hum = random.randint(60, 90)
pre = random.randint(1000, 1120)
# AI prediction
def predict(temp_float, hum_float, pre_float):
input = np.array([[temp_float, hum_float, pre_float]])
pred = model2.predict_classes(input)
suggestion = 0
if pred == [1]:
suggestion = "Most Probably Today Will Rain. So, Don't Miss Your Jacket."
if pred == [2]:
suggestion = "Most Probably Today Will Snow."
else:
suggestion = "I Cannot Predict Whether Rain or Snow."
return suggestion
# check out and in temp
def check_temp(temp_float, temp):
instuction = 0
if temp_float > temp:
instuction = "Outside Temperature Higher Than Inside."
else:
instuction = "Inside Temperature Higher Than Outside."
return instuction
try:
mqttc = mqtt.Client()
mqttc.on_message = on_message
mqttc.on_publish = on_publish
# Connect
mqttc.username_pw_set("user_name", "passwd") # Replace with mqtt username and passwd
mqttc.connect('IP_Adress', 1883, 60) # Replace your AWS E2C IP_address
# Continue the network loop, exit when an error occurs
while True :
global temp_float, hum_float, pre_float, wind_float, city_float, des_float
generate_sensor_data()
API_KEY = '30ad27b312182fa9f7569003a337536b'
# Replace your city name
city = 'Middlesbrough'
# getting api
url = f'http://api.openweathermap.org/data/2.5/weather?q={city}&units=metric&appid={API_KEY}'
response = requests.get(url).json()
# If name of city is wrong spell or unknown
if response.get('cod') != 200:
message = response.get('message', '')
weather = {
'city': city,
'temperature': response['main']['temp'],
'humidity': response['main']['humidity'],
'pressure': response['main']['pressure'],
'wind': response['wind']['speed'],
'description': response['weather'][0]['description'],
'icon': response['weather'][0]['icon'],
}
temp_float = weather.get('temperature')
hum_float = weather.get('humidity')
pre_float = weather.get('pressure')
wind_float = weather.get('wind')
city_float = weather.get('city')
des_float = weather.get('description')
temp_int = round(temp_float)
# This api is showing forecast for five days with days/nights
url_forecast = f'http://api.openweathermap.org/data/2.5/forecast?q={city}&units=metric&appid={API_KEY}'
forecast_response = requests.get(url_forecast).json()
day = day_forecast()
night = night_forecast()
prediction = predict(temp_float, hum_float, pre_float)
instuction = check_temp(temp_float, temp)
# print(prediction)
sensor = {
"temp": temp,
"hum": hum,
"pre": pre
}
api = {
"temperature": temp_int,
"humidity": weather.get('humidity'),
"pressure": weather.get('pressure'),
"wind": weather.get('wind'),
"city" :weather.get('city'),
"description": weather.get('description'),
"icon": weather.get('icon'),
"prediction": prediction,
"instuction": instuction,
"email": email
}
forecast = {
"day": day,
"night": night
}
# send MQTT data
mqttc.publish("sensor", (json.dumps(sensor)))
mqttc.publish("api", (json.dumps(api)))
mqttc.publish("forecast", (json.dumps(forecast)))
print('published')
schedule.run_pending()
time.sleep(1)
except:
exit
|
[
"noreply@github.com"
] |
noreply@github.com
|
a37eb4e9b811f81b05aa31c0731b3975910c9fa1
|
3db9480f4d7ab4c177dca87d8bc9a9d3cdf7569b
|
/lib/sedna/algorithms/hard_example_mining/__init__.py
|
2f9b4c5c779e90962922e2f6c4d347a04257dcbf
|
[
"Apache-2.0"
] |
permissive
|
ZHANGJun0007/sedna
|
ffc258974e46a1d1754a6c127d8e6280d6fdc0dd
|
c3475e2917e375826bc884c21378bc66150af656
|
refs/heads/main
| 2023-07-25T03:53:32.916432
| 2021-08-23T06:39:12
| 2021-08-23T06:39:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .hard_example_mining import *
|
[
"joeyhwong@gknow.cn"
] |
joeyhwong@gknow.cn
|
8ff0cf9f0326d522054d28a689a09a1bba5d292a
|
1c8ea05ed65d76ab0e7bf8e642e0573e34d880ab
|
/BOJ/continue_number.py
|
fd2990487b7814cfe23effa2dc2d61bb8c9f9285
|
[] |
no_license
|
PARKJUHONG123/turbo-doodle
|
1d259c88544d5e52ed375f119792363f5c1b4377
|
6b073281236af50949042c1a6b269752037cb829
|
refs/heads/master
| 2023-01-14T13:41:58.299164
| 2020-11-23T12:12:30
| 2020-11-23T12:12:30
| 259,669,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
# 1~9 : 9
# 10~99 : 90
# 100~999 : 900
# 1000~9999 : 9000
import sys
def nine_num(size):
num = pow(10, size - 1)
return num * 9
N = sys.stdin.readline().split()[0]
length = len(N)
answer = 0
for i in range(1, length):
answer += nine_num(i) * i
num = pow(10, length - 1)
answer += (int(N) - num + 1) * length
print(answer)
|
[
"corallines@naver.com"
] |
corallines@naver.com
|
6551775da0fef49383c917f13bf8624a2a168ddc
|
936c91e915539171878b602d6fb186a2d095dc13
|
/flatten.py
|
214e05468e8fa7b79b7abea6476085e67bbaa5b4
|
[] |
no_license
|
sharababy/ga-p
|
e05c6f2c27ff7dedb238d1d2a822e002907709c3
|
a4630e09097d67cc85e1d24a16694d09b2a018b1
|
refs/heads/master
| 2020-03-29T21:15:04.175964
| 2018-09-27T09:43:17
| 2018-09-27T09:43:17
| 150,357,689
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
import csv
import numpy as np
import json
X = []
def flatten(csvf):
with open(csvf) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV) # Skip header line
for row in readCSV:
k = []
k.append(row[0]) # channel grouping
k.append(row[1]) # date
k.append(row[3]) # visitor id
geo = json.loads(row[4])
if "continent" in geo:
k.append(geo["continent"])
else:
k.append("N/A")
if "subContinent" in geo:
k.append(geo["subContinent"])
else:
k.append("N/A")
# if "region" in geo:
# k.append(geo["region"])
# else:
# k.append("N/A")
if "country" in geo:
k.append(geo["country"])
else:
k.append("N/A")
if "city" in geo:
k.append(geo["city"])
else:
k.append("N/A")
total = json.loads(row[7])
if "visits" in total:
k.append(total["visits"])
else:
k.append("0")
if "hits" in total:
k.append(total["hits"])
else:
k.append("0")
if "pageviews" in total:
k.append(total["pageviews"])
else:
k.append("0")
if "bounces" in total:
k.append(total["bounces"])
else:
k.append("0")
if "newVisits" in total:
k.append(total["newVisits"])
else:
k.append("0")
ts = json.loads(row[8])
if "source" in ts:
k.append(ts["source"])
else:
k.append("N/A")
if "medium" in ts:
k.append(ts["medium"])
else:
k.append("N/A")
k.append(row[9]) # visitnumber
# k.append(row[10])
if "transactionRevenue" in total:
k.append(total["transactionRevenue"])
else:
k.append("0")
X.append(k)
# print(k)
# exit()
flatten("train.csv")
flatten("test.csv")
# X = np.asarray(X)
# channelGrouping = {
# 'Organic Search': 1,
# 'Referral': 2,
# 'Paid Search': 3,
# 'Affiliates': 4,
# 'Direct': 5,
# 'Display': 6,
# 'Social': 7,
# '(Other)': 8}
# for x in X:
# channelGrouping[x[0]] = 1
# print(channelGrouping)
# np.savetxt("processed-train.csv", X, delimiter=",")
with open("alldata.csv", 'w') as f:
csv.writer(f).writerows(X)
|
[
"bassi.vdt@gmail.com"
] |
bassi.vdt@gmail.com
|
f9603f7316c51c5a75eec543b9b2913331dc44e3
|
3ac1ca18816430f0728dae6ebb71a728999f289b
|
/setup.py
|
77d94746200501fb2b7a29488f822d35057ead3e
|
[] |
no_license
|
DSLituiev/pdscatter
|
777cf46ad8553d01a58d665b5d0e41ba1d23670d
|
a510607faa41ec2bc627a910a3b6a74246569f26
|
refs/heads/master
| 2020-06-05T08:39:58.691462
| 2015-11-26T00:30:35
| 2015-11-26T00:30:35
| 38,710,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,833
|
py
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pdscatter',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='make scatter plots on pandas dataframe with colouring and interactivity',
long_description=long_description,
# The project's main homepage.
url='https://github.com/pypa/sampleproject',
# Author details
author='Dmytro S Lituiev',
author_email='d.lituiev@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='plotting',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pandas', 'matplotlib', 'plotly']
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
[
"d.lituiev@gmail.com"
] |
d.lituiev@gmail.com
|
1841cfbfa774d1663121686f516b410799d7ad1f
|
430af16a7e275a87931de78bc9125c969b214df4
|
/irepublica_wordlist_generator.py
|
156793bca8d6dba9566c98609abd346f54cf9756
|
[] |
no_license
|
irepublica/py
|
ddf41f4461aa891f8fa58ad7e73c4050d282d986
|
0dfb7bb562664bc1b549ba5ab23c73e8f68cbd51
|
refs/heads/main
| 2023-01-29T00:30:26.491119
| 2020-12-12T14:20:43
| 2020-12-12T14:20:43
| 312,934,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,720
|
py
|
###
# this code generate a combination of fix number of upper letter and/or lower case and/or number and/or special characters
# this code works as a small suppliment to crunch, where the rule of this program is not available in crunch
# the wordlist generated by this code is suitable to brute force router default password
# python3 irepublica_wordlist_generator.py --help for information
###
import argparse
import itertools
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-nup", "--number-upper", dest="n_upper", help="number of upper letters in the string")
parser.add_argument("-nlow", "--number-lower", dest="n_lower", help="number of lower letters in the string")
parser.add_argument("-nnumber", "--number-number", dest="n_number", help="number of numbers in the string")
parser.add_argument("-nspecial", "--number-special", dest="n_special", help="number of special characters in the string")
parser.add_argument("-up", "--upper-letter", dest="list_upper", help="upper letters included, no argument means include all upper letters, eg -up ABD")
parser.add_argument("-low", "--lower-letter", dest="list_lower", help="lower letters included, no argument means include all lower letters, eg -low abcdef")
parser.add_argument("-number", "--number", dest="list_number", help="number included, no argument means include all numbers, eg -number 1234")
parser.add_argument("-special", "--special-character", dest="list_special", help="special characters included, no argument means include all special characters")
parser.add_argument("-o", "--output", dest="output", help="output file to write, eg -o test.txt")
options = parser.parse_args()
if not options.n_upper:
options.n_upper = 0
options.list_upper = []
elif not options.list_upper:
options.list_upper = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
else:
options.list_upper = [char for char in str(options.list_upper)]
if not options.n_lower:
options.n_lower = 0
options.list_lower = []
elif not options.list_lower:
options.list_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
else:
options.list_lower = [char for char in str(options.list_lower)]
if not options.n_number:
options.n_number = 0
options.list_number = []
elif not options.list_number:
options.list_number = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
else:
options.list_number = [char for char in str(options.list_number)]
if not options.n_special:
options.n_special = 0
options.list_special = []
elif not options.list_special:
options.list_special = ['!', '@', '#', '$', '%', '^', '&', '*']
else:
options.list_special = [char for char in str(options.list_special)]
return options
def distribute_position(n_upper=0, n_lower=0, n_number=0, n_special=0):
position_string = "u"*n_upper + "l"*n_lower + "n"*n_number + "s"*n_special
list_position = [''.join(p) for p in itertools.permutations(position_string)]
list_position = set(list_position) # remove duplication
list_position = list(list_position)
return list_position
def generate_wordlist(list_position, list_upper, list_lower, list_number, list_special):
n_total = len(list_position[0])
list_word = []
for position_string in list_position:
list_word_start = [""]
list_word_end = []
for position in position_string:
if position == "u":
for word in list_word_start:
for upper in list_upper:
list_word_end.append(word+upper)
if position == "l":
for word in list_word_start:
for lower in list_lower:
list_word_end.append(word+lower)
if position == "n":
for word in list_word_start:
for number in list_number:
list_word_end.append(word+number)
if position == "s":
for word in list_word_start:
for special in list_special:
list_word_end.append(word+special)
list_word_start = list_word_end
list_word_end = []
list_word = list_word + list_word_start
return list_word
def write_file(list_word, output):
print("number of words is " + str(len(list_word)))
with open(output, 'w') as file_output:
for word in list_word:
file_output.write('%s\n' % word)
# main
options = get_arguments()
list_position = distribute_position(int(options.n_upper), int(options.n_lower), int(options.n_number), int(options.n_special))
list_word = generate_wordlist(list_position, options.list_upper, options.list_lower, options.list_number, options.list_special)
write_file(list_word, str(options.output))
|
[
"noreply@github.com"
] |
noreply@github.com
|
95a1ebef8e072e4f52379db3b973967ae27f05b7
|
486cf1ffd184a184d51605e4016bf9825c0a7aae
|
/packages/openshift/status.py
|
de68f37e15f267d0635f67b64ec81a30c83eafa5
|
[] |
no_license
|
jupierce/openshift-client-python
|
3a148e9117008bac8882283ba580aca74ed743d1
|
05aaa8d9290d42c03650bbc900421231410f85dc
|
refs/heads/master
| 2021-06-24T09:15:55.711068
| 2019-07-18T16:18:16
| 2019-07-18T16:18:16
| 179,127,261
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,117
|
py
|
#!/usr/bin/python
from openshift import Missing
def is_route_admitted(apiobj):
return apiobj.model.status.can_match({
'ingress': [
{
'conditions': [
{
'type': 'Admitted',
'status': 'True',
}
]
}
]
})
def is_pod_running(apiobj):
return apiobj.model.status.phase == 'Running'
def is_pod_succeeded(apiobj):
return apiobj.model.status.phase == 'Succeeded'
def is_node_ready(apiobj):
return apiobj.model.status.conditions.can_match({
'type': 'Ready',
'status': 'True',
})
def is_operator_ready(operator_apiobj):
# Operator not reporting conditions yet?
if not operator_apiobj.model.status.conditions:
return False
happy = True
for condition in operator_apiobj.model.status.conditions:
if condition.type == "Progressing" and condition.status == "True":
happy = False
if condition.type == "Failing" and condition.status == "True":
happy = False
# Degraded replaced 'Failing' in 4.1
if condition.type == "Degraded" and condition.status == "True":
happy = False
if condition.type == "Available" and condition.status == "False":
happy = False
return happy
def is_credentialsrequest_provisioned(apiobj):
if apiobj.model.status.provisioned is not Missing:
return apiobj.model.status.provisioned # This is a boolean
return False
def is_pvc_bound(apiobj):
return apiobj.model.status.phase == 'Bound'
def is_imagestream_imported(apiobj):
"""
Returns False if an imagestream reports an issue
importing images. Recommended that you run import-image --all
against the imagestream.
"""
return not apiobj.model.status.tags.can_match(
{
'conditions': [
{
'type': 'ImportSuccess',
'status': 'False'
}
]
}
)
|
[
"jupierce@redhat.com"
] |
jupierce@redhat.com
|
24439cacca27563dbc4956aa58d4bbc47e183e0b
|
4de627abbc287bfede3c4cf2deb6531788d58d7f
|
/devel/lib/python2.7/dist-packages/novatel_gps_msgs/msg/_Trackstat.py
|
ed9622f378f02a9c08b2e054d5611dec368ffca5
|
[] |
no_license
|
ghxguo/turtle_drive
|
8412ec3d3486fc8f8715dd09c151ea329c288751
|
3d71dcea46f06497c86d6986d3cb3bd10a53cbd8
|
refs/heads/master
| 2020-05-17T17:16:49.368592
| 2019-04-23T14:16:15
| 2019-04-23T14:16:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
/home/nishanth/TurtleStuff/turtle_drive/devel/.private/novatel_gps_msgs/lib/python2.7/dist-packages/novatel_gps_msgs/msg/_Trackstat.py
|
[
"nishmankame@vt.edu"
] |
nishmankame@vt.edu
|
f5282b92dfb6af26a78d6b27d683a125d46c98c4
|
5196d8aeeded2cb8f3414d14b9a0050f69cb809e
|
/tools/train_nsfw.py
|
99da01a0b02cff24860ef3151884daca10ce4c5d
|
[] |
no_license
|
SethWen/nsfw-classification-tensorflow
|
0a81ad422f762eedf188f4f8e7ec2161d6428276
|
6dfcb16fd655e66b9dd83237bbe89e84aa5322b9
|
refs/heads/master
| 2022-12-19T04:30:57.035744
| 2020-09-26T05:53:04
| 2020-09-26T05:53:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,848
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 19-2-15 下午8:53
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/CRNN_Tensorflow
# @File : train_nsfw.py
# @IDE: PyCharm
"""
Train nsfw model script
"""
import argparse
import os
import os.path as ops
import time
import math
import numpy as np
import tensorflow as tf
import glog as log
from config import global_config
from data_provider import nsfw_data_feed_pipline
from nsfw_model import nsfw_classification_net
CFG = global_config.cfg
def init_args():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=str, help='The dataset_dir')
parser.add_argument('--use_multi_gpu', type=bool, default=False, help='If use multiple gpu devices')
parser.add_argument('--weights_path', type=str, default=None, help='The pretrained weights path')
return parser.parse_args()
def calculate_top_k_error(predictions, labels, k=1):
"""
Calculate the top-k error
:param predictions: 2D tensor with shape [batch_size, num_labels]
:param labels: 1D tensor with shape [batch_size, 1]
:param k: int
:return: tensor with shape [1]
"""
batch_size = CFG.TRAIN.BATCH_SIZE
in_top1 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=k))
num_correct = tf.reduce_sum(in_top1)
return (batch_size - num_correct) / float(batch_size)
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def compute_net_gradients(images, labels, net, optimizer=None, is_net_first_initialized=False):
"""
Calculate gradients for single GPU
:param images: images for training
:param labels: labels corresponding to images
:param net: classification model
:param optimizer: network optimizer
:param is_net_first_initialized: if the network is initialized
:return:
"""
net_loss = net.compute_loss(input_tensor=images,
labels=labels,
name='nsfw_cls_model',
reuse=is_net_first_initialized)
net_logits = net.inference(input_tensor=images,
name='nsfw_cls_model',
reuse=True)
net_predictions = tf.nn.softmax(net_logits)
net_top1_error = calculate_top_k_error(net_predictions, labels, 1)
if optimizer is not None:
grads = optimizer.compute_gradients(net_loss)
else:
grads = None
return net_loss, net_top1_error, grads
def train_net(dataset_dir, weights_path=None):
"""
:param dataset_dir:
:param weights_path:
:return:
"""
# set nsfw data feed pipline
train_dataset = nsfw_data_feed_pipline.NsfwDataFeeder(dataset_dir=dataset_dir,
flags='train')
val_dataset = nsfw_data_feed_pipline.NsfwDataFeeder(dataset_dir=dataset_dir,
flags='val')
with tf.device('/gpu:1'):
# set nsfw net
nsfw_net = nsfw_classification_net.NSFWNet(phase=tf.constant('train', dtype=tf.string),
resnet_size=CFG.NET.RESNET_SIZE)
nsfw_net_val = nsfw_classification_net.NSFWNet(phase=tf.constant('test', dtype=tf.string),
resnet_size=CFG.NET.RESNET_SIZE)
# compute train loss
train_images, train_labels = train_dataset.inputs(batch_size=CFG.TRAIN.BATCH_SIZE,
num_epochs=1)
train_loss = nsfw_net.compute_loss(input_tensor=train_images,
labels=train_labels,
name='nsfw_cls_model',
reuse=False)
train_logits = nsfw_net.inference(input_tensor=train_images,
name='nsfw_cls_model',
reuse=True)
train_predictions = tf.nn.softmax(train_logits)
train_top1_error = calculate_top_k_error(train_predictions, train_labels, 1)
# compute val loss
val_images, val_labels = val_dataset.inputs(batch_size=CFG.TRAIN.VAL_BATCH_SIZE,
num_epochs=1)
# val_images = tf.reshape(val_images, example_tensor_shape)
val_loss = nsfw_net_val.compute_loss(input_tensor=val_images,
labels=val_labels,
name='nsfw_cls_model',
reuse=True)
val_logits = nsfw_net_val.inference(input_tensor=val_images,
name='nsfw_cls_model',
reuse=True)
val_predictions = tf.nn.softmax(val_logits)
val_top1_error = calculate_top_k_error(val_predictions, val_labels, 1)
# set tensorflow summary
tboard_save_path = 'tboard/nsfw_cls'
os.makedirs(tboard_save_path, exist_ok=True)
summary_writer = tf.summary.FileWriter(tboard_save_path)
train_loss_scalar = tf.summary.scalar(name='train_loss',
tensor=train_loss)
train_top1_err_scalar = tf.summary.scalar(name='train_top1_error',
tensor=train_top1_error)
val_loss_scalar = tf.summary.scalar(name='val_loss',
tensor=val_loss)
val_top1_err_scalar = tf.summary.scalar(name='val_top1_error',
tensor=val_top1_error)
train_merge_summary_op = tf.summary.merge([train_loss_scalar, train_top1_err_scalar])
val_merge_summary_op = tf.summary.merge([val_loss_scalar, val_top1_err_scalar])
# Set tf saver
saver = tf.train.Saver()
model_save_dir = 'model/nsfw_cls'
os.makedirs(model_save_dir, exist_ok=True)
train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
model_name = 'nsfw_cls_{:s}.ckpt'.format(str(train_start_time))
model_save_path = ops.join(model_save_dir, model_name)
# set optimizer
with tf.device('/gpu:1'):
# set learning rate
global_step = tf.Variable(0, trainable=False)
decay_steps = [CFG.TRAIN.LR_DECAY_STEPS_1, CFG.TRAIN.LR_DECAY_STEPS_2]
decay_values = []
init_lr = CFG.TRAIN.LEARNING_RATE
for step in range(len(decay_steps) + 1):
decay_values.append(init_lr)
init_lr = init_lr * CFG.TRAIN.LR_DECAY_RATE
learning_rate = tf.train.piecewise_constant(
x=global_step,
boundaries=decay_steps,
values=decay_values,
name='learning_rate'
)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=0.9).minimize(
loss=train_loss,
var_list=tf.trainable_variables(),
global_step=global_step)
# Set sess configuration
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TRAIN.GPU_MEMORY_FRACTION
sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
sess_config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=sess_config)
summary_writer.add_graph(sess.graph)
# Set the training parameters
train_epochs = CFG.TRAIN.EPOCHS
log.info('Global configuration is as follows:')
log.info(CFG)
with sess.as_default():
tf.train.write_graph(graph_or_graph_def=sess.graph, logdir='',
name='{:s}/nsfw_cls_model.pb'.format(model_save_dir))
if weights_path is None:
log.info('Training from scratch')
init = tf.global_variables_initializer()
sess.run(init)
else:
log.info('Restore model from last model checkpoint {:s}'.format(weights_path))
saver.restore(sess=sess, save_path=weights_path)
train_cost_time_mean = []
val_cost_time_mean = []
for epoch in range(train_epochs):
# training part
t_start = time.time()
_, train_loss_value, train_top1_err_value, train_summary, lr = \
sess.run(fetches=[optimizer,
train_loss,
train_top1_error,
train_merge_summary_op,
learning_rate])
if math.isnan(train_loss_value):
log.error('Train loss is nan')
return
cost_time = time.time() - t_start
train_cost_time_mean.append(cost_time)
summary_writer.add_summary(summary=train_summary,
global_step=epoch)
# validation part
t_start_val = time.time()
val_loss_value, val_top1_err_value, val_summary = \
sess.run(fetches=[val_loss,
val_top1_error,
val_merge_summary_op])
summary_writer.add_summary(val_summary, global_step=epoch)
cost_time_val = time.time() - t_start_val
val_cost_time_mean.append(cost_time_val)
if epoch % CFG.TRAIN.DISPLAY_STEP == 0:
log.info('Epoch_Train: {:d} total_loss= {:6f} top1_error= {:6f} '
'lr= {:6f} mean_cost_time= {:5f}s '.
format(epoch + 1,
train_loss_value,
train_top1_err_value,
lr,
np.mean(train_cost_time_mean)))
train_cost_time_mean.clear()
if epoch % CFG.TRAIN.VAL_DISPLAY_STEP == 0:
log.info('Epoch_Val: {:d} total_loss= {:6f} top1_error= {:6f}'
' mean_cost_time= {:5f}s '.
format(epoch + 1,
val_loss_value,
val_top1_err_value,
np.mean(val_cost_time_mean)))
val_cost_time_mean.clear()
if epoch % 2000 == 0:
saver.save(sess=sess, save_path=model_save_path, global_step=epoch)
sess.close()
return
def train_net_multi_gpu(dataset_dir, weights_path=None):
"""
:param dataset_dir:
:param weights_path:
:return:
"""
# set nsfw data feed pipline
train_dataset = nsfw_data_feed_pipline.NsfwDataFeeder(dataset_dir=dataset_dir,
flags='train')
val_dataset = nsfw_data_feed_pipline.NsfwDataFeeder(dataset_dir=dataset_dir,
flags='val')
# set nsfw net
nsfw_net = nsfw_classification_net.NSFWNet(phase=tf.constant('train', dtype=tf.string),
resnet_size=CFG.NET.RESNET_SIZE)
nsfw_net_val = nsfw_classification_net.NSFWNet(phase=tf.constant('test', dtype=tf.string),
resnet_size=CFG.NET.RESNET_SIZE)
# fetch train and validation data
train_images, train_labels = train_dataset.inputs(
batch_size=CFG.TRAIN.BATCH_SIZE, num_epochs=1)
val_images, val_labels = val_dataset.inputs(
batch_size=CFG.TRAIN.BATCH_SIZE, num_epochs=1)
# set average container
tower_grads = []
train_tower_loss = []
train_tower_top1_error = []
val_tower_loss = []
val_tower_top1_error = []
batchnorm_updates = None
train_summary_op_updates = None
# set learning rate
global_step = tf.Variable(0, trainable=False)
decay_steps = [CFG.TRAIN.LR_DECAY_STEPS_1, CFG.TRAIN.LR_DECAY_STEPS_2]
decay_values = []
init_lr = CFG.TRAIN.LEARNING_RATE
for step in range(len(decay_steps) + 1):
decay_values.append(init_lr)
init_lr = init_lr * CFG.TRAIN.LR_DECAY_RATE
learning_rate = tf.train.piecewise_constant(
x=global_step,
boundaries=decay_steps,
values=decay_values,
name='learning_rate'
)
# set optimizer
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
# set distributed train op
with tf.variable_scope(tf.get_variable_scope()):
is_network_initialized = False
for i in range(CFG.TRAIN.GPU_NUM):
with tf.device('/gpu:{:d}'.format(i)):
with tf.name_scope('tower_{:d}'.format(i)) as scope:
train_loss, train_top1_error, grads = compute_net_gradients(
train_images, train_labels, nsfw_net, optimizer,
is_net_first_initialized=is_network_initialized)
is_network_initialized = True
# Only use the mean and var in the first gpu tower to update the parameter
# TODO implement batch normalization for distributed device (luoyao@baidu.com)
if i == 0:
batchnorm_updates = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_summary_op_updates = tf.get_collection(tf.GraphKeys.SUMMARIES)
tower_grads.append(grads)
train_tower_loss.append(train_loss)
train_tower_top1_error.append(train_top1_error)
with tf.name_scope('validation_{:d}'.format(i)) as scope:
val_loss, val_top1_error, _ = compute_net_gradients(
val_images, val_labels, nsfw_net_val, optimizer,
is_net_first_initialized=is_network_initialized)
val_tower_loss.append(val_loss)
val_tower_top1_error.append(val_top1_error)
grads = average_gradients(tower_grads)
avg_train_loss = tf.reduce_mean(train_tower_loss)
avg_train_top1_error = tf.reduce_mean(train_tower_top1_error)
avg_val_loss = tf.reduce_mean(val_tower_loss)
avg_val_top1_error = tf.reduce_mean(val_tower_top1_error)
# Track the moving averages of all trainable variables
variable_averages = tf.train.ExponentialMovingAverage(
CFG.TRAIN.MOVING_AVERAGE_DECAY, num_updates=global_step)
variables_to_average = tf.trainable_variables() + tf.moving_average_variables()
variables_averages_op = variable_averages.apply(variables_to_average)
# Group all the op needed for training
batchnorm_updates_op = tf.group(*batchnorm_updates)
apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)
train_op = tf.group(apply_gradient_op, variables_averages_op,
batchnorm_updates_op)
# set tensorflow summary
tboard_save_path = 'tboard/nsfw_cls'
os.makedirs(tboard_save_path, exist_ok=True)
summary_writer = tf.summary.FileWriter(tboard_save_path)
avg_train_loss_scalar = tf.summary.scalar(name='average_train_loss',
tensor=avg_train_loss)
avg_train_top1_err_scalar = tf.summary.scalar(name='average_train_top1_error',
tensor=avg_train_top1_error)
avg_val_loss_scalar = tf.summary.scalar(name='average_val_loss',
tensor=avg_val_loss)
avg_val_top1_err_scalar = tf.summary.scalar(name='average_val_top1_error',
tensor=avg_val_top1_error)
learning_rate_scalar = tf.summary.scalar(name='learning_rate_scalar',
tensor=learning_rate)
train_merge_summary_op = tf.summary.merge([avg_train_loss_scalar,
avg_train_top1_err_scalar,
learning_rate_scalar] + train_summary_op_updates)
val_merge_summary_op = tf.summary.merge([avg_val_loss_scalar, avg_val_top1_err_scalar])
# set tensorflow saver
saver = tf.train.Saver()
model_save_dir = 'model/nsfw_cls'
os.makedirs(model_save_dir, exist_ok=True)
train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
model_name = 'nsfw_cls_{:s}.ckpt'.format(str(train_start_time))
model_save_path = ops.join(model_save_dir, model_name)
# set sess config
sess_config = tf.ConfigProto(device_count={'GPU': CFG.TRAIN.GPU_NUM}, allow_soft_placement=True)
sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TRAIN.GPU_MEMORY_FRACTION
sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
sess_config.gpu_options.allocator_type = 'BFC'
# Set the training parameters
train_epochs = CFG.TRAIN.EPOCHS
log.info('Global configuration is as follows:')
log.info(CFG)
sess = tf.Session(config=sess_config)
summary_writer.add_graph(sess.graph)
with sess.as_default():
tf.train.write_graph(graph_or_graph_def=sess.graph, logdir='',
name='{:s}/nsfw_cls_model.pb'.format(model_save_dir))
if weights_path is None:
log.info('Training from scratch')
init = tf.global_variables_initializer()
sess.run(init)
else:
log.info('Restore model from last model checkpoint {:s}'.format(weights_path))
saver.restore(sess=sess, save_path=weights_path)
train_cost_time_mean = []
val_cost_time_mean = []
for epoch in range(train_epochs):
# training part
t_start = time.time()
_, train_loss_value, train_top1_err_value, train_summary, lr = \
sess.run(fetches=[train_op,
avg_train_loss,
avg_train_top1_error,
train_merge_summary_op,
learning_rate])
if math.isnan(train_loss_value):
log.error('Train loss is nan')
return
cost_time = time.time() - t_start
train_cost_time_mean.append(cost_time)
summary_writer.add_summary(summary=train_summary,
global_step=epoch)
# validation part
t_start_val = time.time()
val_loss_value, val_top1_err_value, val_summary = \
sess.run(fetches=[avg_val_loss,
avg_val_top1_error,
val_merge_summary_op])
summary_writer.add_summary(val_summary, global_step=epoch)
cost_time_val = time.time() - t_start_val
val_cost_time_mean.append(cost_time_val)
if epoch % CFG.TRAIN.DISPLAY_STEP == 0:
log.info('Epoch_Train: {:d} total_loss= {:6f} top1_error= {:6f} '
'lr= {:6f} mean_cost_time= {:5f}s '.
format(epoch + 1,
train_loss_value,
train_top1_err_value,
lr,
np.mean(train_cost_time_mean)))
train_cost_time_mean.clear()
if epoch % CFG.TRAIN.VAL_DISPLAY_STEP == 0:
log.info('Epoch_Val: {:d} total_loss= {:6f} top1_error= {:6f}'
' mean_cost_time= {:5f}s '.
format(epoch + 1,
val_loss_value,
val_top1_err_value,
np.mean(val_cost_time_mean)))
val_cost_time_mean.clear()
if epoch % 2000 == 0:
saver.save(sess=sess, save_path=model_save_path, global_step=epoch)
sess.close()
return
if __name__ == '__main__':
# init args
args = init_args()
if CFG.TRAIN.GPU_NUM < 2:
args.use_multi_gpu = False
# train lanenet
if not args.use_multi_gpu:
train_net(args.dataset_dir, args.weights_path)
else:
train_net_multi_gpu(args.dataset_dir, args.weights_path)
|
[
"luoyao@baidu.com"
] |
luoyao@baidu.com
|
e3b4474b1fdee304f0d66c2a205acce345b33525
|
cc5b858569edf6d819491940769eeef99ae78c18
|
/app.py
|
1e444cd1ff921d36afb514f44af5f445cf749d72
|
[] |
no_license
|
abhishek064/web-server-using-flask
|
0f54f34c75e8e61b623b5e26443d827a73c3d81a
|
3cf44bec3a1a80a054e8293c6678def0041f787b
|
refs/heads/master
| 2021-01-04T18:48:48.168521
| 2020-02-15T13:16:29
| 2020-02-15T13:16:29
| 240,715,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app=Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(200), nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow)
def __repr__(self):
return '<Task %r>' % self.id
@app.route('/', methods=['POST','GET'])
def index():
if request.method == 'POST':
task_content=request.form['content']
new_task= Todo(content=task_content)
try:
db.session.add(new_task)
db.session.commit()
return redirect('/')
except:
return 'There was an issue adding your task'
else:
tasks=Todo.query.order_by(Todo.date_created).all()
return render_template('index.html', tasks=tasks)
@app.route('/delete/<int:id>')
def delete(id):
task_to_delete = Todo.query.get_or_404(id)
try:
db.session.delete(task_to_delete)
db.session.commit()
return redirect('/')
except:
return 'There was a problem deleting that task'
@app.route('/update/<int:id>', methods=['GET', 'POST'])
def update(id):
task =Todo.query.get_or_404(id)
if request.method == 'POST':
task.content=request.form['content']
try:
db.session.commit()
return redirect('/')
except:
return 'There was an issue updating your task'
else:
return render_template('update.html', task=task)
if __name__ == "__main__":
app.run(debug=True)
|
[
"abhisheksutariya64@gmail.com"
] |
abhisheksutariya64@gmail.com
|
dff782445f083c8852c95a14f37f05b290a8043b
|
ba6921a268198bc0af433622c021533905f5d462
|
/scripts/in_container/run_migration_reference.py
|
43692b2c458d2e0a50e097a818144f39bdf31553
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
potiuk/airflow
|
b6447765b1a7b586a3d6c8d7ba9262f6bf68fbfd
|
ca2f3013bcb123c4b3973a5b85de77094bf2c459
|
refs/heads/main
| 2023-08-30T13:05:50.698888
| 2023-05-21T21:08:14
| 2023-05-21T21:26:14
| 173,467,275
| 8
| 7
|
Apache-2.0
| 2023-05-21T21:58:40
| 2019-03-02T15:50:53
|
Python
|
UTF-8
|
Python
| false
| false
| 6,272
|
py
|
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Module to update db migration information in Airflow
"""
from __future__ import annotations
import os
import re
from pathlib import Path
from textwrap import wrap
from typing import TYPE_CHECKING, Iterable
from alembic.script import ScriptDirectory
from tabulate import tabulate
from airflow import __version__ as airflow_version
from airflow.utils.db import _get_alembic_config
if TYPE_CHECKING:
from alembic.script import Script
airflow_version = re.match(r"(\d+\.\d+\.\d+).*", airflow_version).group(1) # type: ignore
project_root = Path(__file__).parents[2].resolve()
def replace_text_between(file: Path, start: str, end: str, replacement_text: str):
original_text = file.read_text()
leading_text = original_text.split(start)[0]
trailing_text = original_text.split(end)[1]
file.write_text(leading_text + start + replacement_text + end + trailing_text)
def wrap_backticks(val):
def _wrap_backticks(x):
return f"``{x}``"
return ",\n".join(map(_wrap_backticks, val)) if isinstance(val, (tuple, list)) else _wrap_backticks(val)
def update_doc(file, data):
replace_text_between(
file=file,
start=" .. Beginning of auto-generated table\n",
end=" .. End of auto-generated table\n",
replacement_text="\n"
+ tabulate(
headers={
"revision": "Revision ID",
"down_revision": "Revises ID",
"version": "Airflow Version",
"description": "Description",
},
tabular_data=data,
tablefmt="grid",
stralign="left",
disable_numparse=True,
)
+ "\n\n",
)
def has_version(content):
return re.search(r"^airflow_version\s*=.*", content, flags=re.MULTILINE) is not None
def insert_version(old_content, file):
new_content = re.sub(
r"(^depends_on.*)",
lambda x: f"{x.group(1)}\nairflow_version = '{airflow_version}'",
old_content,
flags=re.MULTILINE,
)
file.write_text(new_content)
def revision_suffix(rev: Script):
if rev.is_head:
return " (head)"
if rev.is_base:
return " (base)"
if rev.is_merge_point:
return " (merge_point)"
if rev.is_branch_point:
return " (branch_point)"
return ""
def ensure_airflow_version(revisions: Iterable[Script]):
for rev in revisions:
assert rev.module.__file__ is not None # For Mypy.
file = Path(rev.module.__file__)
content = file.read_text()
if not has_version(content):
insert_version(content, file)
def get_revisions() -> Iterable[Script]:
config = _get_alembic_config()
script = ScriptDirectory.from_config(config)
yield from script.walk_revisions()
def update_docs(revisions: Iterable[Script]):
doc_data = []
for rev in revisions:
doc_data.append(
dict(
revision=wrap_backticks(rev.revision) + revision_suffix(rev),
down_revision=wrap_backticks(rev.down_revision),
version=wrap_backticks(rev.module.airflow_version), # type: ignore
description="\n".join(wrap(rev.doc, width=60)),
)
)
update_doc(
file=project_root / "docs" / "apache-airflow" / "migrations-ref.rst",
data=doc_data,
)
def num_to_prefix(idx: int) -> str:
return f"000{idx+1}"[-4:] + "_"
def ensure_mod_prefix(mod_name, idx, version):
prefix = num_to_prefix(idx) + "_".join(version) + "_"
match = re.match(r"([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_(.+)", mod_name)
if match:
# previously standardized file, rebuild the name
mod_name = match.group(5)
else:
# new migration file, standard format
match = re.match(r"([a-z0-9]+)_(.+)", mod_name)
if match:
mod_name = match.group(2)
return prefix + mod_name
def ensure_filenames_are_sorted(revisions):
renames = []
is_branched = False
unmerged_heads = []
for idx, rev in enumerate(revisions):
mod_path = Path(rev.module.__file__)
version = rev.module.airflow_version.split(".")[0:3] # only first 3 tokens
correct_mod_basename = ensure_mod_prefix(mod_path.name, idx, version)
if mod_path.name != correct_mod_basename:
renames.append((mod_path, Path(mod_path.parent, correct_mod_basename)))
if is_branched and rev.is_merge_point:
is_branched = False
if rev.is_branch_point:
is_branched = True
elif rev.is_head:
unmerged_heads.append(rev.revision)
if is_branched:
head_prefixes = [x[0:4] for x in unmerged_heads]
alembic_command = (
"alembic merge -m 'merge heads " + ", ".join(head_prefixes) + "' " + " ".join(unmerged_heads)
)
raise SystemExit(
"You have multiple alembic heads; please merge them with the `alembic merge` command "
f"and re-run pre-commit. It should fail once more before succeeding. "
f"\nhint: `{alembic_command}`"
)
for old, new in renames:
os.rename(old, new)
if __name__ == "__main__":
revisions = list(reversed(list(get_revisions())))
ensure_airflow_version(revisions=revisions)
revisions = list(reversed(list(get_revisions())))
ensure_filenames_are_sorted(revisions)
revisions = list(get_revisions())
update_docs(revisions)
|
[
"noreply@github.com"
] |
noreply@github.com
|
3d6ae1524644fc9c4474380cbf883f97a21eee67
|
2e414394e8a0a85dd7244fcd24d8b9910ce2dbea
|
/arcpy_pyqt5.py
|
f39d6db2ddd4995d176225d71761ac4abc42c2dc
|
[] |
no_license
|
GitHubQXJ/Pyqt
|
f8370b7a006cb0a3e9b0180da3266f1cc76aadbe
|
5a7bf2a5182d7b10726d9163beca1721ab4a7e5a
|
refs/heads/master
| 2020-03-29T02:08:37.446753
| 2018-09-19T12:14:33
| 2018-09-19T12:14:33
| 149,422,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,968
|
py
|
# coding=utf-8
import arcpy_crete_xml
import sys
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class picture(QWidget):
def __init__(self):
super(picture, self).__init__()
self.resize(600, 400)
self.setWindowTitle("label显示图片")
self.label = QLabel(self)
self.label.setStyleSheet("QLabel{background:white;}"
"QLabel{color:rgb(300,300,300,120);font-size:10px;font-weight:bold;font-family:宋体;}"
)
#选择图像
select_img = QPushButton(self)
select_img.setText("选择图像")
select_img.move(10, 30)
select_img.clicked.connect(self.SelectImage)
#生成xml的名字
open_xml = QPushButton(self)
open_xml.setText("输入生成xml的路径与名字")
open_xml.move(10, 90)
open_xml.clicked.connect(self.OpenXml)
#执行按钮
create_xml = QPushButton(self)
create_xml.setText("点击生成xml")
create_xml.move(10, 150)
create_xml.resize(80,40)
create_xml.clicked.connect(self. ImplementXml)
def SelectImage(self):
global imgName
imgName, imgType = QFileDialog.getOpenFileName(self, "打开图像", "", "All Files(*)")
self.label.setText(imgName)
self.label.move(120,40)
self.label.adjustSize()
def OpenXml(self):
global fileName2
fileName2 = QFileDialog.getSaveFileName(self,
"文件保存",
"C:/",
" *.xml")
def ImplementXml(self):
arcpy_crete_xml.Createxml(imgName,fileName2[0])
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
my = picture()
my.show()
sys.exit(app.exec_())
|
[
"284451800a@gmail.com"
] |
284451800a@gmail.com
|
7b8246fb813397ebd4875bdf28fb67ca041509c4
|
5c79958d97d03452adb4ef076c0a315883dc8bcd
|
/python/oneflow/compatible/single_client/test/ops/test_l1loss.py
|
a487e4cfd29a1b8930e5db611c9d6693737bdc77
|
[
"Apache-2.0"
] |
permissive
|
CHzhangi/oneflow
|
a9fcf0eeeb6a6873ae7234ad811c9dfb9f8ffec6
|
4ea3935458cc83dcea0abd88dd613f09c57dc01a
|
refs/heads/master
| 2023-07-09T21:59:07.442976
| 2021-07-27T09:27:19
| 2021-07-27T09:27:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,735
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from collections import OrderedDict
from typing import Dict
import numpy as np
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as tp
def _compare_l1loss_with_np(
input_shape, target_shape, device_type, machine_ids, device_counts
):
input = np.random.random(size=input_shape).astype(np.float32)
target = np.random.random(size=target_shape).astype(np.float32)
assert device_type in ["cpu", "gpu"]
func_config = flow.FunctionConfig()
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_counts)
else:
flow.config.gpu_device_num(device_counts)
func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))
func_config.default_logical_view(flow.scope.consistent_view())
def np_l1loss(np_input, np_target):
np_l1 = np.abs(np_target - np_input)
np_l1_mean = np.mean(np_l1)
np_l1_sum = np.sum(np_l1)
np_l1_dict = {
"np_l1_loss": np_l1,
"np_l1_loss_mean": np_l1_mean,
"np_l1_loss_sum": np_l1_sum,
}
return np_l1_dict
def np_l1_loss_diff(np_input, np_target):
original_shape = np_target.shape
elemcnt = np_target.size
prediction = np_input.reshape(-1)
label = np_target.reshape(-1)
prediction_grad = np.zeros(elemcnt).astype(prediction.dtype)
for i in np.arange(elemcnt):
diff = prediction[i] - label[i]
prediction_grad[i] = np.sign(diff)
grad_mean = prediction_grad.reshape(original_shape) / elemcnt
grad_dict = {"np_grad_mean": grad_mean}
return grad_dict
np_out_l1loss_dict = np_l1loss(input, target)
np_grad_dict = np_l1_loss_diff(input, target)
def assert_prediction_grad(blob: tp.Numpy):
assert np.allclose(blob, np_grad_dict["np_grad_mean"])
@flow.global_function(type="train", function_config=func_config)
def oneflow_l1loss(
of_input: tp.Numpy.Placeholder(shape=input.shape),
of_target: tp.Numpy.Placeholder(shape=target.shape),
) -> Dict[str, tp.Numpy]:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=target.shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
name="v",
)
x_var = of_input + v
flow.watch_diff(x_var, assert_prediction_grad)
l1loss = flow.nn.L1Loss(x_var, of_target, reduction="none", name="of_l1loss")
l1loss_mean = flow.nn.L1Loss(
x_var, of_target, reduction="mean", name="of_l1loss_mean"
)
l1loss_sum = flow.nn.L1Loss(
x_var, of_target, reduction="sum", name="of_l1loss_sum"
)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(l1loss_mean)
return {
"of_l1_loss": l1loss,
"of_l1_loss_mean": l1loss_mean,
"of_l1_loss_sum": l1loss_sum,
}
of_out_l1loss_dict = oneflow_l1loss(input, target)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss"], np_out_l1loss_dict["np_l1_loss"]
)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss_mean"][0], np_out_l1loss_dict["np_l1_loss_mean"]
)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss_sum"][0], np_out_l1loss_dict["np_l1_loss_sum"]
)
def _gen_arg_dict(shape, device_type, machine_ids, device_counts):
arg_dict = OrderedDict()
arg_dict["input_shape"] = [shape]
arg_dict["target_shape"] = [shape]
arg_dict["device_type"] = [device_type]
arg_dict["machine_ids"] = [machine_ids]
arg_dict["device_counts"] = [device_counts]
return arg_dict
@flow.unittest.skip_unless_1n1d()
class Testl1loss1n1d(flow.unittest.TestCase):
def test_l1loss_cpu(test_case):
arg_dict = _gen_arg_dict(
shape=(16, 3), device_type="cpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_l1loss_gpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 16, 32), device_type="gpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class Testl1loss1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_l1loss_gpu_1n2d(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 32, 16), device_type="gpu", machine_ids="0:0-1", device_counts=2
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
32b12704ed0452a472f4a93b47cf393fa27d480c
|
899bac17acf97252a33d91af076ff1f16b975210
|
/eduiddashboard/forms.py
|
b5bafba7d3c82f84075476f0b8cf2fa105708771
|
[] |
no_license
|
isabella232/eduid-dashboard
|
91a209f7833f26a7949cecec60df2e501a82f840
|
99cffaa90f41b13ec34f9d057f19630c644df6ee
|
refs/heads/master
| 2023-03-16T05:19:36.184687
| 2018-07-27T12:27:53
| 2018-07-27T12:27:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
from deform.form import Form
class BaseForm(Form):
""" base form class to override if needed """
pass
|
[
"msaelices@yaco.es"
] |
msaelices@yaco.es
|
2a42f51f32d4980a8248c2497f3243c3cba3bd4c
|
29286f5f9629bc44fd56d169e39b691536387208
|
/lab5/my_application/server.py
|
68e32e80b0d7da0b6dc42b540de30460d3b0ed71
|
[
"MIT"
] |
permissive
|
craigbrennan24/Lab4-Docker
|
a71a302d34511f63f396ab3359133b893c1a0e60
|
bccd374636af5f69e32c05907690c36ecaf08cae
|
refs/heads/master
| 2021-01-10T13:17:39.842731
| 2015-10-21T14:59:41
| 2015-10-21T14:59:41
| 44,254,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def index():
return "Index page\n"
@app.route('/hello')
def hello():
return "Hello world!\n"
if __name__ == "__main__":
app.run(host="0.0.0.0",port=8080,debug=True)
|
[
"craig_brennan24@hotmail.com"
] |
craig_brennan24@hotmail.com
|
1188ff03230eeceac03d987c78afe09f2a08b6a2
|
aaf043152f1ed3e59c85ccceee25ea9122e0edaa
|
/pyvoltha_min/adapters/extensions/events/device_events/onu/onu_lopc_miss_event.py
|
b0d10ca325e12c42aa5bf627882760a7c8198953
|
[
"Apache-2.0"
] |
permissive
|
hanyingzhong/pyvoltha-min
|
15fce1b98ecfed16a1e7df593068875568a0c039
|
1e7c313b7be612ca17a42a098dd83743a996c38c
|
refs/heads/master
| 2023-03-25T21:37:42.819826
| 2021-03-24T19:47:01
| 2021-03-24T19:47:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,469
|
py
|
# Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from voltha_protos.events_pb2 import EventCategory, EventSubCategory
from pyvoltha_min.adapters.extensions.events.adapter_events import DeviceEventBase
class OnuLopcMissEvent(DeviceEventBase):
def __init__(self, event_mgr, onu_id, intf_id, serial_number, raised_ts):
super(OnuLopcMissEvent, self).__init__(event_mgr, raised_ts, object_type='onu LOPC_MISS',
event='ONU_LOPC_MISS',
category=EventCategory.EQUIPMENT,
sub_category=EventSubCategory.ONU)
self._onu_id = onu_id
self._intf_id = intf_id
self._serial_number = serial_number
def get_context_data(self):
return {'onu-id': self._onu_id,
'onu-intf-id': self._intf_id,
'onu-serial-number': self._serial_number}
|
[
"chip.boling@tibitcom.com"
] |
chip.boling@tibitcom.com
|
a3dcd4456879cc220aa0640c372c170a6f3e4263
|
75cc297e0ed208ed48dc04b4d86400bb0a5e5c50
|
/examples/timeline/model_solution/timeline/app/models.py
|
4d91e10097bb0fc3c385dfe8fb08c9184fd19d46
|
[] |
no_license
|
SimonCockx/entity
|
c0c2b798b18b389431b223061c6c025158afde85
|
d888771f73f6db0300afbf8ec3fcc1fa6f9cf106
|
refs/heads/main
| 2023-04-08T09:13:58.300487
| 2021-04-19T00:01:34
| 2021-04-19T00:01:34
| 359,275,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
from collections import Sequence
from typing import Any
from django.db import models
from app.entities import EventRepo, Event, EntityDoesNotExist
MAX_NAME_LENGTH = 100
class EventORM(models.Model):
name = models.CharField(max_length=MAX_NAME_LENGTH)
year = models.IntegerField()
description = models.TextField()
@classmethod
def from_entity(cls, event: Event) -> 'EventORM':
return EventORM(event.id, event.name, event.year, event.description)
def to_entity(self) -> Event:
return Event(self.id, self.name, self.year, self.description)
class DjangoEventRepo(EventRepo):
def list(self) -> list[Event]:
return [q.to_entity() for q in EventORM.objects.all()]
def get(self, id: int) -> Event:
try:
return EventORM.objects.get(id=id).to_entity()
except EventORM.DoesNotExist:
raise EntityDoesNotExist()
def save(self, event: Event) -> None:
EventORM.from_entity(event).save()
def create(self, ev_dict: dict[str, Any]) -> Event:
q = EventORM.objects.create(**ev_dict)
return q.to_entity()
def delete(self, id: int) -> None:
EventORM.objects.filter(id=id).delete()
|
[
"simon@sikanda.be"
] |
simon@sikanda.be
|
e2ec00935752105a1560d7cc0670dfea639a3dc8
|
78a733324876a9369e3f7f2ad1dc235a1118f0ae
|
/src/Location/Location.py
|
62c6477b258c8cb96fba284c746fb5a94675c81d
|
[] |
no_license
|
razibchamp/drs
|
fe1a00b323661981b118abce1ed7a70ad0566b23
|
dcb8188c19d373e4eecc230803495c09cb621667
|
refs/heads/master
| 2020-03-25T02:04:49.763993
| 2018-08-19T09:22:27
| 2018-08-19T09:22:27
| 143,272,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
class Location:
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
|
[
"nightshuvo@gmail.com"
] |
nightshuvo@gmail.com
|
181a9e06bb2b8f84cd124f76613902dc3253f2d4
|
f8b15bde2e75b24bc157e216d4242c085c2d5f3a
|
/src/tools/operator_wrapper/alert_operator.py
|
f8dae50ecdcaa1754343cae2be4342753f3a589c
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
AldonahZero/pai
|
181704d0ec169db3d2e49c5953e332900f5b24d9
|
e6667b9a1c89b8aba26e4b818f7338a083c3d359
|
refs/heads/master
| 2020-09-09T10:56:59.108163
| 2019-11-13T08:25:55
| 2019-11-13T08:25:55
| 221,428,287
| 2
| 0
|
MIT
| 2019-11-13T10:02:34
| 2019-11-13T10:02:33
| null |
UTF-8
|
Python
| false
| false
| 2,308
|
py
|
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import sys
from .base_operator import BaseOperator
logger = logging.getLogger(__name__)
class AlertOperator(BaseOperator):
ALERT_TYPE = {
"gpu_related": {"NvidiaSmiLatencyTooLarge", "NvidiaSmiEccError", "NvidiaMemoryLeak", "NvidiaZombieProcess", "GpuUsedByExternalProcess", "GpuUsedByZombieContainer"},
}
def __init__(self, prometheus_ip, prometheus_port=9091):
super(AlertOperator, self).__init__(prometheus_ip, prometheus_port)
def get_gpu_alert_nodes(self):
api_path = "/prometheus/api/v1/query?query=ALERTS"
alerts_info = self.request(api_path)
if alerts_info["status"] != "success":
logger.error("Alert response error: {}".format(alerts_info["data"]))
sys.exit(1)
alerts_info = alerts_info["data"]["result"]
gpu_alert_nodes = {}
for alert in alerts_info:
metric = alert["metric"]
if metric["alertname"] in self.ALERT_TYPE["gpu_related"] and metric["alertstate"] == "firing":
node_ip = metric["instance"].split(':')[0]
gpu_alert_nodes[node_ip] = metric["alertname"]
return gpu_alert_nodes
|
[
"noreply@github.com"
] |
noreply@github.com
|
dcc93bea34e67aac667f305955c10c2cc9644471
|
9801d1f7eb7a1e4daeece8de4673f943bd40841c
|
/Bookmark/Bookmark/urls.py
|
a2b4d99973f7f31064076883b685958196ab77a2
|
[] |
no_license
|
Yash-Sapre/Saveit
|
b80d80943737317db70b4e219c73185bd16c8535
|
6dbc8be4ab9ef526e91916bbb218a873f1976e8b
|
refs/heads/main
| 2023-07-22T14:28:55.153289
| 2021-09-10T10:43:12
| 2021-09-10T10:43:12
| 405,006,082
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
"""Bookmark URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.urls.conf import include
urlpatterns = [
path('admin/', admin.site.urls),
path('saveit/',include('saveit.urls',namespace='saveit')),
]
|
[
"yss052001@gmail.com"
] |
yss052001@gmail.com
|
b982d5403b5334baa6e3ec0af46dee88ac765ead
|
86e492636b8459f32493e73536cb4a970c9d2c52
|
/for-3.py
|
1aa9d15aa252ae61b6557c15a72ea65daae00169
|
[] |
no_license
|
royanusree17/royanusree
|
c29d1e8e31179367b837f1412b3bb80409dbde35
|
ab78a843d70df716951a82ece4789fa2709b73f9
|
refs/heads/master
| 2020-04-16T03:13:33.561549
| 2019-01-11T11:04:54
| 2019-01-11T11:04:54
| 165,224,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
numbers = [1,2,3,4,5,6,7,8,9]
odd_count=0
even_count=0
for a in numbers:
if a % 2:
odd_count+=1
else:
even_count+=1
print("total even numbers is:",even_count)
print("total odd numbers is:",odd_count)
|
[
"royanusree17@gmail.com"
] |
royanusree17@gmail.com
|
ed4e792ba70c4002bfe1ec382590ba8af555085c
|
ebd5c9da14013b996f42062a1b4c703124aa7bfb
|
/manage.py
|
ac47b282914fc285307777870b1642bfa6f6fe4b
|
[] |
no_license
|
nargok/graphql_hackernews_clone
|
fcba15b8b0d8f442260dc755d704c766ff876d6a
|
cd25c2bd71316831b072e12252263c6ad5c9febf
|
refs/heads/master
| 2020-04-11T18:14:35.945830
| 2018-12-22T02:36:02
| 2018-12-22T02:36:02
| 161,991,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'graphql_tutorial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"nargok@gmail.com"
] |
nargok@gmail.com
|
d9cc4f54dc821de9d57341568500d5e8c9b79f44
|
c8e18c574497d8778e298a5342c5c52ccdcabe6d
|
/rango/views.py
|
f0e6dffbbe64e0584ece31ac3c6f7d509e71d6cd
|
[] |
no_license
|
cinvincible/itech
|
52e2a3ad9f9523b8ebc90f6465aa98bb2b961d98
|
244bf1f86bfb6110b0127f0e48b6c33144c5a476
|
refs/heads/main
| 2023-07-09T02:38:17.437052
| 2021-07-30T11:12:05
| 2021-07-30T11:12:05
| 390,513,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,933
|
py
|
from datetime import datetime
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import redirect
from django.urls import reverse
from rango.models import Category, Page
from rango.forms import CategoryForm, PageForm
from rango.forms import UserForm, UserProfileForm
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
def index(request):
request.session.set_test_cookie()
visitor_cookie_handler(request)
category_list = Category.objects.order_by('-likes')[:5]
page_list = Page.objects.order_by('-views')[:5]
context_dict = {}
context_dict['boldmessage'] = 'Crunchy, creamy, cookie, candy, cupcake!'
context_dict['categories'] = category_list
context_dict['pages'] = page_list
response = render(request, 'rango/index.html', context=context_dict)
return response
def about(request):
if request.session.test_cookie_worked():
print("TEST COOKIE WORKED!")
request.session.delete_test_cookie()
context_dict = {}
visitor_cookie_handler(request)
context_dict['visits'] = request.session['visits']
return render(request, 'rango/about.html', context=context_dict)
def show_category(request, category_name_slug):
context_dict = {}
try:
category = Category.objects.get(slug=category_name_slug)
pages = Page.objects.filter(category=category)
context_dict['pages'] = pages
context_dict['category'] = category
except Category.DoesNotExist:
context_dict['category'] = None
context_dict['pages'] = None
return render(request, 'rango/category.html', context=context_dict)
@login_required
def add_Category(request):
form = CategoryForm()
if request.method == 'POST':
form = CategoryForm(request.POST)
if form.is_valid():
form.save(commit=True)
return redirect('/rango/')
else:
print(form.errors)
return render(request, 'rango/add_category.html', {'form': form})
@login_required
def add_Page(request, category_name_slug):
try:
category = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
category = None
if category is None:
return redirect('/rango/')
form = PageForm()
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if category:
page = form.save(commit=False)
page.category = category
page.views = 0
page.save()
return redirect(reverse('rango:show_category',
kwargs={'category_name_slug':
category_name_slug}))
else:
print(form.errors)
context_dict = {'form': form, 'category': category}
return render(request, 'rango/add_page.html', context=context_dict)
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(request.POST)
profile_form = UserProfileForm(request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
else:
user_form = UserForm()
profile_form = UserProfileForm()
return render(request, 'rango/register.html', context={'user_form': user_form,
'profile_form': profile_form,
'registered': registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return redirect(reverse('rango:index'))
else:
return HttpResponse("Your Rango account is disabled.")
else:
print(f"Invalid login details: {username}, {password}")
return HttpResponse("Invalid login details supplied.")
else:
return render(request, 'rango/login.html')
@login_required
def user_logout(request):
logout(request)
return redirect(reverse('rango:index'))
@login_required
def restricted(request):
return render(request, 'rango/restricted.html')
def get_server_side_cookie(request, cookie, default_val=None):
val = request.session.get(cookie)
print(val)
if not val:
val = default_val
return val
def visitor_cookie_handler(request):
visits = int(get_server_side_cookie(request, 'visits', '1'))
last_visit_cookie = get_server_side_cookie(request,
'last_visit',
str(datetime.now()))
last_visit_time = datetime.strptime(last_visit_cookie[:-7],
'%Y-%m-%d %H:%M:%S')
if (datetime.now() - last_visit_time).days > 0:
visits = visits + 1
request.session['last_visit'] = str(datetime.now())
else:
request.session['last_visit'] = last_visit_cookie
request.session['visits'] = visits
|
[
"noreply@github.com"
] |
noreply@github.com
|
110cfc179264d2d70fbca93edc3f46956b3bc8f2
|
e7a45a5ef1cbbac2e6f9865f46fd5350cd1358da
|
/download_wallpapers_automatically.py
|
0b026b7498f2ce85b5c2ef08594c8331c7e97db4
|
[] |
no_license
|
shanto12/Wallpaper_Downloader
|
3b49349f7bcef221e1d24e6198c4c319f339c7c9
|
e103e6dd98193c9cf0e71864f91f3d24d8334f7c
|
refs/heads/master
| 2023-05-30T17:41:16.550122
| 2020-06-24T06:29:40
| 2020-06-24T06:29:40
| 274,592,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,505
|
py
|
## Importing Necessary Modules
import requests # to get image from the web
import shutil # to save it locally
import re
from string import Template
IM_FOLDER = "C:/Users/smatthew/Downloads/wallpapers"
HOME_URL = "http://wallpaperswide.com/3440x1440-wallpapers-r/page/"
LIMIT = None
DONT_DOWNLOAD_SET = {'latest_wallpapers', 'aero-desktop-wallpapers', 'animals-desktop-wallpapers',
'architecture-desktop-wallpapers', 'army-desktop-wallpapers', 'artistic-desktop-wallpapers',
'awareness-desktop-wallpapers', 'black_and_white-desktop-wallpapers',
'cartoons-desktop-wallpapers', 'celebrities-desktop-wallpapers', 'city-desktop-wallpapers',
'computers-desktop-wallpapers', 'cute-desktop-wallpapers', 'elements-desktop-wallpapers',
'food_and_drink-desktop-wallpapers', 'funny-desktop-wallpapers', 'games-desktop-wallpapers',
'girls-desktop-wallpapers', 'holidays-desktop-wallpapers', 'love-desktop-wallpapers',
'motors-desktop-wallpapers', 'movies-desktop-wallpapers', 'music-desktop-wallpapers',
'nature-desktop-wallpapers', 'seasons-desktop-wallpapers', 'space-desktop-wallpapers',
'sports-desktop-wallpapers', 'travel-desktop-wallpapers', 'vintage-desktop-wallpapers'}
pattern = '<a href="/((?!\d*x\d*).{15,90}).html" title='
pattern = re.compile(pattern)
t = Template('http://wallpaperswide.com/download/$wall_name-$resolution.jpg')
def download_image(image_url):
filename = image_url.split("/")[-1]
file_path = IM_FOLDER + "/" + filename
# Open the url image, set stream to True, this will return the stream content.
r = requests.get(image_url, stream=True)
# Check if the image was retrieved successfully
if r.status_code == 200 and not r.history:
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
r.raw.decode_content = True
# Open a local file with wb ( write binary ) permission.
with open(file_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
print('Image sucessfully Downloaded: ', image_url)
return True
else:
# print('Image Couldn\'t be retreived')
return False
def process_page(page_url):
r = requests.get(page_url)
page_text = r.text
image_name_list = pattern.findall(page_text)
image_name_set = {x[:-1] for x in image_name_list if x not in DONT_DOWNLOAD_SET}
print(f"Page Processed: {page_url}")
return image_name_set
def process_image_downloads(image_name_set):
for image_name in image_name_set:
for resolution in ["5120x2160","3840x2160", "3840x1600"]:
image_url = t.substitute(wall_name=image_name, resolution=resolution)
if download_image(image_url):
break
else:
print("WARNING:No url found for image_name: {}".format(image_name))
# def test():
# pass
# text = """
# <a href="/chamarel_waterfalls_mauritius-wallpapers.html" title="View Chamarel Waterfalls, Mauritius Ultra HD Wallpaper for 4K UHD Widescreen desktop, tablet & smartphone" itemprop="significantLinks">
# <a href="/chamarel_waterfalls_mauritius-wallpapers.html" title="Chamarel Waterfalls, Mauritius Ultra HD Wallpaper for 4K UHD Widescreen desktop, tablet & smartphone">
# <a href="/shanto.html" title="Chamarel Waterfalls, Mauritius Ultra HD Wallpaper for 4K UHD Widescreen desktop, tablet & smartphone">
# <a href="/3840x1600-wallpapers-r.html" title="Chamarel Waterfalls, Mauritius Ultra HD Wallpaper for 4K UHD Widescreen desktop, tablet & smartphone">
# """
# pattern = '<a href="/(.*).html" title='
# pattern = '<a href="/((?!\d*x\d*).{15,}).html" title='
#
# pattern = re.compile(pattern)
# result = pattern.findall(text)
# print("done")
def main():
page_num = 1
page_url = HOME_URL + str(page_num)
image_name_set = set()
while (page_image_name_set:=process_page(page_url)) and (LIMIT is None or page_num<=LIMIT):
print(f"Page number processing : {page_num}")
if len(page_image_name_set)!=18:
print("WARNING: Found length not equal to 18: Length: {}".format(len(page_image_name_set)))
image_name_set.update(page_image_name_set)
page_num +=1
page_url = HOME_URL + str(page_num)
process_image_downloads(image_name_set)
# test()
main()
print("DONE!!!")
|
[
"shanto12@gmail.com"
] |
shanto12@gmail.com
|
51148ac47e877227c9dce5f337b517b769a256b5
|
fba1d31a224e521f0c7a7041c24c86475455e8f7
|
/backend/ediaristas_workshop/web/urls.py
|
2bf95b241e54aa5e6da11240040a9c8cc10de9ce
|
[
"MIT"
] |
permissive
|
shioheii/ediaristas
|
c402911256c02655333a485d7d3ac96e5e67721b
|
fd07472f532683c13511b12ffdc37a5816b729bf
|
refs/heads/main
| 2023-08-18T21:47:32.860984
| 2021-10-20T17:09:56
| 2021-10-20T17:09:56
| 419,107,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
from django.urls import path
from .views import cadastrar_diarista, listar_diaristas, editar_diarista, remover_diarista
urlpatterns = [
path('cadastrar_diarista', cadastrar_diarista, name='cadastrar_diarista'),
path('listar_diaristas', listar_diaristas, name='listar_diaristas'),
path('editar_diarista/<int:diarista_id>', editar_diarista, name='editar_diarista'),
path('remover_diarista/<int:diarista_id>', remover_diarista, name='remover_diarista')
]
|
[
"89222572+shioheii@users.noreply.github.com"
] |
89222572+shioheii@users.noreply.github.com
|
367fc94a0fd126565afc94f6fd44e62f3b1f9d8b
|
65829d312009ee4c686aad419a800678c6954755
|
/samples/basic/util_properties.py
|
cbc89f62bdb8ba01d82175d336ee8aeea3f73820
|
[] |
no_license
|
hercwey/PythonBestStudySamples
|
3d8fd667045ba648037a03c6867d9ec7faa2b22d
|
63696bbac1250ab60bfb6b9846bb4f884b7ffe6c
|
refs/heads/master
| 2020-04-17T21:35:33.541165
| 2017-02-07T01:51:27
| 2017-02-07T01:51:27
| 66,250,378
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
# -*- coding: utf-8 -*-
"""
python 操作properties文件
"""
class Properties(object):
"""properties object"""
def __init__(self, fileName):
self.fileName = fileName
self.properties = {}
def __getDict(self, strName, dictName, value):
if (strName.find('.') > 0):
k = strName.split('.')[0]
dictName.setdefault(k, {})
return self.__getDict(strName[len(k) + 1:], dictName[k], value)
else:
dictName[strName] = value
return
def getProperties(self):
try:
pro_file = open(self.fileName, 'Ur')
for line in pro_file.readlines():
line = line.strip().replace('\n', '')
if line.find("#") != -1:
line = line[0:line.find('#')]
if line.find('=') > 0:
strs = line.split('=')
strs[1] = line[len(strs[0]) + 1:]
self.__getDict(strs[0].strip(), self.properties, strs[1].strip())
except Exception, e:
raise e
else:
pro_file.close()
return self.properties
|
[
"bernwey@gmail.com"
] |
bernwey@gmail.com
|
639f782d0b5634e6547e1ac507f86cdcb51bf2ac
|
26551769200eafa5bdd72ea5a51f87e61dbd8d6d
|
/hackerrank/august_infinitum_2014/testit.py
|
ea69cdf1c777c240061cb87045566b78a4323b3d
|
[] |
no_license
|
kalachand/codes
|
f894945a2cdc4c7868fd1f24c3b7727f32cf5ba1
|
ed45d7ffe380e4e5d52f95e9542a108e4ceeceb7
|
refs/heads/master
| 2021-01-15T12:44:29.552598
| 2015-11-03T21:02:36
| 2015-11-03T21:02:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
import random
t=1000
print t
for i in range(0,t):
n=random.randint(1,20)
m=random.randint(1,50)
print n,m
|
[
"akash.wanted@gmail.com"
] |
akash.wanted@gmail.com
|
2efdbea82d4abfb77c931c704001160943c5b23f
|
f2f8a8b75c65d10e1a0f8cb89d27e558107e34b7
|
/greenBottles.py
|
e153e3a95d6514ef50058f1d414cc0882984bbe8
|
[] |
no_license
|
jrawson7093/code-change-test
|
c23dd9cac585c7e6f9a510efa8053de3ed3d4891
|
5a33ebd6e16c09ef9603adca2519a971e368aff2
|
refs/heads/master
| 2021-01-17T21:14:21.361669
| 2014-05-16T13:23:22
| 2014-05-16T13:23:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
import time
for i in range(10, 0, -1): #Count down from 10 to 1
for j in range(2): #Do this twice
print("{0} green bottle(s), hanging on the wall".format(i))
print("And if 1 green bottle should accidently fall,")
print("They'll be {0} green bottle(s) hanging on the wall.\n".format(i-1))
time.sleep(1) #Wait for a second
|
[
"jrawson7093@soham-college.org.uk"
] |
jrawson7093@soham-college.org.uk
|
b006def7b59d29f5eaeeb1c93b391c39ce36af97
|
aac97d130077481103bcb4b8a10d2f3ecafe8dd2
|
/test.py
|
819f4d9e8ffed64796c9ef2095f5d8d7cc203194
|
[
"Zlib"
] |
permissive
|
orlp/pyflat
|
58963f8681693f736872e5eacec1ae73f436ffcc
|
06f3afa1e905aded583910197448c28894e7a1f2
|
refs/heads/master
| 2016-09-10T03:17:20.893770
| 2015-03-13T10:57:19
| 2015-03-13T10:57:19
| 5,957,596
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from __future__ import print_function
import flat
window = flat.Window(800, 600, "hi")
@window.event("on_draw")
def on_draw(window):
flat.gl.glClearColor(0.5, 0.5, 0.5, 1.0)
flat.gl.glClear(flat.gl.GL_COLOR_BUFFER_BIT)
flat.run()
|
[
"orsonpeters@gmail.com"
] |
orsonpeters@gmail.com
|
51dfe4429a048801b0b68c3d5c4c08950d083957
|
a49cb927114854f115c7b500297509aaa06fc553
|
/02 高级处理/02-蓝图/home/views.py
|
fdf63186389a4f21f55b500acf4d0bf5b68559e7
|
[] |
no_license
|
XT-0220/flask_pratice
|
4bb9e9a991085ebce8b2e79c5e65a47850ca3b19
|
cce7e9ab7996eff57906524836f22e9be879eab3
|
refs/heads/master
| 2022-12-25T22:24:55.231274
| 2020-10-13T07:22:16
| 2020-10-13T07:22:16
| 302,593,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
from home import home_blue
# 使用蓝图对象来定义路由
@home_blue.route('/')
def index():
return 'index'
|
[
"1606778306@qq.com"
] |
1606778306@qq.com
|
c016beb5d996c1ca1390e35753f3e429fdebd5a6
|
4ec6ed4ebcb9346042669e6aa03be0e502ed48b3
|
/leetcode/convert-sorted-array-to-binary-search-tree.py
|
84e69232dd85203daae4a1d75c1f376e113add3f
|
[] |
no_license
|
shonihei/road-to-mastery
|
79ed41cb1ad0dc2d0b454db2ccc7dd9567b03801
|
312bdf5101c3c1fc9a4d0b6762b5749ca57efe08
|
refs/heads/master
| 2021-01-22T19:59:17.038641
| 2017-11-16T15:21:55
| 2017-11-16T15:21:55
| 85,266,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
"""
Given an array where elements are sorted in ascending order, convert it to a height balanced BST.
"""
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def sortedArrayToBST(nums):
if not nums:
return None
mid = len(nums) // 2
root = TreeNode(nums[mid])
root.left = sortedArrayToBST(nums[:mid])
root.right = sortedArrayToBST(nums[mid+1:])
return root
|
[
"shonihei@gmail.com"
] |
shonihei@gmail.com
|
a4d11746359dce92018f4d136895e0404bff8188
|
53d455b09622cc7d85c6227cfc88e87a269f8e94
|
/mysite/settings.py
|
3ec8adcf457031f8156c804505a9ca36454fe287
|
[] |
no_license
|
type89/new_djangogirl
|
1e44949c4ddbff4a7c3c4ef5b340134b5bd25416
|
e7c66d1f36707f3ce53fbc9b4e3e2bae2c66af23
|
refs/heads/master
| 2020-05-18T17:35:40.290035
| 2019-06-29T06:51:22
| 2019-06-29T06:51:22
| 184,560,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,264
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@z(_0a2h7=yy6dp2zb8opr)lt(g0enuzqr$x17@#o6zh$#os-_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#ALLOWED_HOSTS = []
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'taggit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
#LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'ja'
#TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"zong1yuan2@gmail.com"
] |
zong1yuan2@gmail.com
|
1bc121225564cba3c38fedf0ca37e1a4a62a6b65
|
decd47b6a8f1a4f7eb41972bbb8f7e704a0b707e
|
/Anirudh Kala/booth-OSI.py
|
1e2c7740a1f28333e85e9d5d9c10dd72e8228725
|
[] |
no_license
|
bonomali/Data_Science
|
d4e017e3f201ccaeb392ec980bb0acee1dfd3131
|
c8dffb46356fdeed7c6f35cfa28e11acd6efc048
|
refs/heads/master
| 2021-05-30T10:16:47.903876
| 2015-10-04T23:54:29
| 2015-10-04T23:54:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,411
|
py
|
import scipy as sp
import csv
import pandas as pd
'''
path = {}
path[1] = "AF (OSIsoft)"
path[2] = "Analytics (OSIsoft)"
path[3] = "BI (OSIsoft)"
path[4] = "Cloud Connect (OSIsoft)"
path[5] = "Data Archive (OSIsoft)"
path[6] = "ESRI (OSIsoft)"
path[7] = "Interfaces (OSIsoft)"
path[8] = "Managed PI"
path[9] = "Odata & SQL (OSIsoft)"
path[10] = "Online Services (OSIsoft)"
path[11] = "OSI soft Event Frames"
path[12] = "OSIsoft PI Coresight"
path[13] = "OSIsoft PI DataLink"
path[14] = "OSIsoft PI Manual Logger"
path[15] = "OSIsoft PI Notifications"
path[16] = "OSIsoft PI ProcessBook"
path[17] = "OSIsoft Product Expo (OSIsoft)"
path[18] = "Search (OSIsoft)"
path[19] = "WebAPI (OSIsoft)"
path[20] = "WebParts (OSIsoft)"
# data = sp.genfromtxt("./Data/Registration-Data/UC2014AllFields.csv",delimiter = "\t")
#data = csv.reader("./Data/Booth-Scan-Data/ExpoScansOSI/AF (OSIsoft).csv.clean.csv", delimiter= " ")
data = {}
data1 = {}
for i in range(0,20) :
data[i] = pd.read_csv("./Data/Booth-Scan-Data/ExpoScansOSI/" + path[i+1] + ".csv.clean.csv" , sep = ',' )
data[i]['Event'] = path[i+1]
for i in range(0,20) :
data[i].to_csv("./Data/Booth-Scan-Data/ExpoScansOSI/process/" + path[i+1] + ".csv" , sep = ',' )
## to contanicate
for i in range(0,20) :
data1[i] = pd.read_csv("./Data/Booth-Scan-Data/ExpoScansOSI/process/" + path[i+1] + ".csv" , sep = ',' )
#result = data1[0].append(data1[1] )
result = data1[0]
for i in range(1,20) :
result = result.append(data1[i] , ignore_index = True)
result.to_csv("./Data/Booth-Scan-Data/ExpoScansOSI/process/FINAL.csv" , sep = ',' )
final = pd.read_csv("./Data/Booth-Scan-Data/ExpoScansOSI/process/FINAL.csv" , sep = ',' )
final["Address Line"] = final["Address Line 1"].map(str) + final["Address Line 2"].map(str)
final["Name"] = final["Name Badge First Name"].map(str) + " " + final["Name Badge Last Name"].map(str)
final.pop('Address Line 2')
final.pop('Address Line 1')
final.to_csv("./Data/Booth-Scan-Data/ExpoScansOSI/process/FINAL2.csv" , sep = ',' )
'''
## ***************************** Till here I got the FINAL2.csv file ***************************************************
dataset = pd.read_csv("./Data/Booth-Scan-Data/OSI.csv")
dataset['UID'] = dataset['Name'].map(str) + " " + dataset['Email Address'].map(str)
dataset.to_csv("./Data/Booth-Scan-Data/OSI.csv")
'''colHH = data[0]['Event']
print(colHH)
'''
|
[
"abhinav@abhinav-Inspiron-5537.(none)"
] |
abhinav@abhinav-Inspiron-5537.(none)
|
146ac52c155f6a21ab8f406bde451d1ce53f6925
|
0d464df42f5cc3c9a3b992ae9ff6160e5da5701d
|
/CHAPTER 12 (sorting and selection)/decorated_merge_sort.py
|
1ac0479db866b540143a43557da8834d51e73996
|
[
"MIT"
] |
permissive
|
ahammadshawki8/DSA-Implementations-in-Python
|
6b61d44e638bfb7f6cf3a8b1fc57d15777313420
|
fc18b54128cd5bc7639a14999d8f990190b524eb
|
refs/heads/master
| 2022-12-26T03:54:16.229935
| 2020-10-07T05:17:55
| 2020-10-07T05:17:55
| 267,899,551
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
from linked_queue_class import *
from merge_sort_linked import *
def decorated_merge_sort(data,key=None):
"""Demonstration of the decorate-sort-undercorate pattern."""
if key is not None:
for j in range(len(data)):
data[j] = _Item(key(data[j]), data[j]) # decorate each element
merge_sort(data) # sort with existing algorithm
if key is not None:
for j in range(len(data)):
data[j] = data[j]._value # undercoat each element
|
[
"ahammadshawki8@gmail.com"
] |
ahammadshawki8@gmail.com
|
54e634a91b6785558588740c3c225ac1aa4c252c
|
bdf3c98e33c6bc9c8e0332043cc5a3a3f5e8ea42
|
/src/mantis_model/catkin_generated/pkg.develspace.context.pc.py
|
c4db2075151b9a2c10d3a4400eb7142cca173946
|
[] |
no_license
|
jstestsuite/ros-test
|
c33e244794287e2665cace847a9faf1b596ddf92
|
b0bb5b06c1c18b804c0f0a4e2deb4d88be894d1f
|
refs/heads/master
| 2020-04-11T16:54:41.401198
| 2019-02-13T00:44:14
| 2019-02-13T00:44:14
| 161,938,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "mantis_model"
PROJECT_SPACE_DIR = "/home/jman/ros/src/ugv_course/mantis_model/devel"
PROJECT_VERSION = "0.0.0"
|
[
"jsteele171@gmail.com"
] |
jsteele171@gmail.com
|
37123c73964b688ed5ae817b4fd7992a14458970
|
267970b4cdb9aae02e66ae2c31f36f2b2bade05f
|
/elevation_api.py
|
4bfba5b80e95fa9d985a77d56d4dbd9efe55ebe6
|
[] |
no_license
|
Tound/FlightPlannerScripts
|
169839cd6207709c74cb0e323e52e06cec763a5d
|
795822cd12d5f056778e82a9843c44a732e7eb08
|
refs/heads/main
| 2023-04-25T21:07:00.689092
| 2021-05-28T14:12:24
| 2021-05-28T14:12:24
| 335,413,457
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
import requests
import json
URL = "https://maps.googleapis.com/maps/api/elevation/json?path="
API_KEY="AIzaSyDs4rYa3WPPu30NV0K9ysOtxkgWXhb7jF8"
#URL = https://maps.googleapis.com/maps/api/elevation/json?path=36.578581,-118.291994|36.23998,-116.83171&samples=10&key=AIzaSyDs4rYa3WPPu30NV0K9ysOtxkgWXhb7jF8
samples = 10
loc_string = "36.578581,-118.291994|36.23998,-116.83171"
#loc_string = "54.484898756295365,-0.6140756607055664|54.485297638747866,-0.6133460998535156"
data = requests.get(URL + loc_string + "&samples=" + f"{samples}" + "&key=" + API_KEY)#,params="elevation")
data = data.json()['results']
for result in data:
elevation = result['elevation']
print(elevation)
|
[
"tompound@ymail.com"
] |
tompound@ymail.com
|
128cf41497f94f6804cfba19418c237eafbd269e
|
234dbe7c61df183f11169c933b082eb940b56c17
|
/predictpipeline.py
|
3af2a77707bf929ee8d57eef77e45f4bc6b4f6c3
|
[] |
no_license
|
Akshat243/EGEN_project
|
39b57d7826cbd9b30f8e8f0592a1bdfdca8df05f
|
6e5158f4b71f7a562e169506aef355ebef82a7d3
|
refs/heads/master
| 2022-12-15T15:10:46.759557
| 2020-09-09T06:36:19
| 2020-09-09T06:36:19
| 294,018,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,546
|
py
|
import apache_beam as beam
import logging
import joblib
import numpy as np
import pandas as pd
import argparse
import re
from google.cloud import storage
from apache_beam.options.pipeline_options import StandardOptions, GoogleCloudOptions, SetupOptions, PipelineOptions
from past.builtins import unicode
# Build and run the pipeline
def run(argv=None):
pipeline_options = PipelineOptions(flags=argv)
parser = argparse.ArgumentParser()
parser.add_argument(
'--output_topic',
required=True,
help=(
'Output PubSub topic of the form '
'"projects/<PROJECT>/topics/<TOPIC>".'))
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--input_topic',
help=(
'Input PubSub topic of the form '
'"projects/<PROJECT>/topics/<TOPIC>".'))
group.add_argument(
'--input_subscription',
help=(
'Input PubSub subscription of the form '
'"projects/<PROJECT>/subscriptions/<SUBSCRIPTION>."'))
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(StandardOptions).streaming = True
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = 'quickstart-1569373998669'
google_cloud_options.job_name = 'alexa-prediction'
google_cloud_options.staging_location = 'gs://p2_bucket/staging/'
google_cloud_options.temp_location = 'gs://p2_bucket/temp/'
pipeline_options.view_as(StandardOptions).runner = 'DataflowRunner'
pipeline_options.view_as(SetupOptions).save_main_session = True
pipeline_options.view_as(SetupOptions).setup_file = "./setup.py"
logging.info("Pipeline arguments: {}".format(pipeline_options))
p = beam.Pipeline(options=pipeline_options)
(p
| "Read data from PubSub" >> beam.io.ReadFromPubSub(subscription=known_args.input_subscription).with_output_types(bytes)
| 'decode' >> beam.Map(lambda x: x.decode('utf-8','ignore'))
| "predicting" >> beam.ParDo(PredictSklearn(project='egenproject', bucket_name='p2_bucket', model_path='amazon_alexa.csv', destination_name='amazon_alexa.csv')).with_output_types(str)
#| 'encode' >> beam.Map(lambda x: x.encode('utf-8','ignore')).with_output_types(bytes)
| "Write to PubSub" >> beam.io.WriteStringsToPubSub(known_args.output_topic))
result = p.run()
result.wait_until_finish()
# Function to download model from bucket
def download_blob(bucket_name=None, source_blob_name=None, project=None, destination_file_name=None):
storage_client = storage.Client(project)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
class PredictSklearn(beam.DoFn):
def __init__(self, project=None, bucket_name=None, model_path=None, destination_name=None):
self._model = None
self._project = project
self._bucket_name = bucket_name
self._model_path = model_path
self._destination_name = destination_name
def download_blob(self, bucket_name=None, source_blob_name=None, project=None, destination_file_name=None):
from google.cloud import storage
storage_client = storage.Client(project)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
def setup(self):
"""Download sklearn model from GCS"""
logging.info(
"Sklearn model initialisation {}".format(self._model_path))
self.download_blob(bucket_name=self._bucket_name, source_blob_name=self._model_path,
project=self._project, destination_file_name=self._destination_name)
def process(self, element):
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
data = pd.read_csv(r"amazon_alexa.csv", encoding = "ISO-8859-1")
reviews = data["verified_reviews"].tolist()
labels = data["feedback"].values
processed_reviews = []
def preprocess(text):
text = re.sub(r'\W', ' ', str(text))
text = re.sub(r'\s+[a-zA-Z]\s+', ' ', text)
text = re.sub(r'\^[a-zA-Z]\s+', ' ', text)
text = re.sub(r'\s+', ' ', text, flags=re.I)
text = re.sub(r'^b\s+', '', text)
return text
for text in reviews:
processed_reviews.append(preprocess(text))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(processed_reviews, labels, test_size=0.2, random_state=0)
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(max_features=2000, min_df=5, max_df=0.75, stop_words=stopwords.words('english'))
X_train1 = vectorizer.fit_transform(X_train).toarray()
X_test1 = vectorizer.transform(X_test).toarray()
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=200, random_state=42)
rfc.fit(X_train1, y_train)
processedelement=list(preprocess(element.encode('ISO-8859-1','ignore')))
processedelement=vectorizer.transform(processedelement)
y_pred = rfc.predict(processedelement)
return str(y_pred)
# log the output
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
[
"noreply@github.com"
] |
noreply@github.com
|
27bcb2ddf2ee9ab7fbfbefe716406c1ac2b1218b
|
321db74b4b2dfa164a590b2988ceeb6e49642fe6
|
/app.py
|
942726b3ab0fa89f68878bda730a697bffa199c3
|
[] |
no_license
|
dawong8/carry-me-backend
|
c9341be112d26fd0cb7780a5fccd5e0d8d22feae
|
eec54234195273a4da21e71b3a7baf4f539c1fb9
|
refs/heads/master
| 2022-12-09T00:08:04.684547
| 2019-03-25T01:20:29
| 2019-03-25T01:20:29
| 174,024,572
| 0
| 0
| null | 2022-12-08T01:41:41
| 2019-03-05T21:38:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
from flask import Flask
from resources.users import users_api
from resources.game_data import game_api
# from resources.reviews import reviews_api
from flask_cors import CORS
from flask_login import LoginManager, login_required, logout_user
login_manager = LoginManager()
import models
import config
app = Flask(__name__)
app.secret_key = config.SECRET_KEY
login_manager.init_app(app)
@login_manager.user_loader
def load_user(userid):
try:
return models.User.get(models.User.id==userid)
except models.DoesNotExist:
return None
CORS(users_api, origins=["http://localhost:3000"], supports_credentials=True)
# CORS(reviews_api, origins=["https://localhost:3000"], supports_credentials=True)
CORS(game_api, origins=["http://localhost:3000"], supports_credentials=True)
app.register_blueprint(users_api, url_prefix='/api/v1')
# app.register_blueprint(reviews_api, url_prefix='/api/v1')
app.register_blueprint(game_api, url_prefix='/api/v1')
if __name__ == '__main__':
models.initialize()
app.run(debug=config.DEBUG, port=config.PORT)
|
[
"wong.danny29@gmail.com"
] |
wong.danny29@gmail.com
|
a34cfcdef57043cc7f41ac54d209257b20f9284b
|
fff74e87ac64ca852fd4896e0ffa82db7bdd5246
|
/src/secml/ml/classifiers/loss/c_loss_logistic.py
|
95e161ced9df8bc72b7a29936896888bd576561b
|
[
"Apache-2.0"
] |
permissive
|
Cinofix/secml
|
04c69ef214df5a324561e7da299379969737d7ed
|
431373e65d8cfe2cb7cf042ce1a6c9519ea5a14a
|
refs/heads/master
| 2022-12-21T06:43:13.038269
| 2020-09-15T13:56:01
| 2020-09-15T13:56:01
| 298,513,252
| 0
| 0
|
Apache-2.0
| 2020-09-25T08:25:31
| 2020-09-25T08:25:30
| null |
UTF-8
|
Python
| false
| false
| 4,170
|
py
|
"""
.. module:: CLossLogistic
:synopsis: Logistic loss function
.. moduleauthor:: Battista Biggio <battista.biggio@unica.it>
.. moduleauthor:: Marco Melis <marco.melis@unica.it>
"""
from secml.ml.classifiers.loss import CLossClassification
from secml.ml.classifiers.loss.c_loss import _check_binary_score
from secml.ml.classifiers.clf_utils import convert_binary_labels
from secml.array import CArray
class CLossLogistic(CLossClassification):
"""Logistic loss function.
Attributes
----------
class_type : 'log'
suitable_for : 'classification'
"""
__class_type = 'log'
def loss(self, y_true, score, pos_label=1, bound=10):
"""Computes the value of the logistic loss function.
Parameters
----------
y_true : CArray
Ground truth (correct), targets. Vector-like array.
score : CArray
Outputs (predicted), targets.
2-D array of shape (n_samples, n_classes) or 1-D flat array
of shape (n_samples,). If 1-D array, the probabilities
provided are assumed to be that of the positive class.
pos_label : {0, 1}, optional
The class wrt compute the loss function. Default 1.
If `score` is a 1-D flat array, this parameter is ignored.
bound : scalar or None, optional
Set an upper bound for a linear approximation when -y*s is large
to avoid numerical overflows.
10 is a generally acceptable -> log(1+exp(10)) = 10.000045
Returns
-------
CArray
Loss function. Vector-like array.
"""
if pos_label not in (0, 1):
raise ValueError("only {0, 1} are accepted for `pos_label`")
y_true = convert_binary_labels(y_true).ravel() # Convert to {-1, 1}
score = _check_binary_score(score, pos_label)
# log(1 + exp(-y*s)) / log(2)
v = CArray(- y_true * score).astype(float)
if bound is None:
v = (1.0 + v.exp()).log()
else:
# linear approximation avoids numerical overflows
# when -yf >> 1 : log ( 1+ exp(-yf)) ~= -yf
v[v < bound] = (1.0 + v[v < bound].exp()).log()
return v / CArray([2]).log()
def dloss(self, y_true, score, pos_label=1, bound=10):
"""Computes the derivative of the hinge loss function with respect to `score`.
Parameters
----------
y_true : CArray
Ground truth (correct), targets. Vector-like array.
score : CArray
Outputs (predicted), targets.
2-D array of shape (n_samples, n_classes) or 1-D flat array
of shape (n_samples,). If 1-D array, the probabilities
provided are assumed to be that of the positive class.
pos_label : {0, 1}, optional
The class wrt compute the loss function derivative. Default 1.
If `score` is a 1-D flat array, this parameter is ignored.
bound : scalar or None, optional
Set an upper bound for a linear approximation when -y*s is large
to avoid numerical overflows.
10 is a generally acceptable -> log(1+exp(10)) = 10.000045
Returns
-------
CArray
Derivative of the loss function. Vector-like array.
"""
if pos_label not in (0, 1):
raise ValueError("only {0, 1} are accepted for `pos_label`")
y_true = convert_binary_labels(y_true).ravel() # Convert to {-1, 1}
score = _check_binary_score(score, pos_label)
# d/df log ( 1+ exp(-yf)) / log(2) =
# 1/ log(2) * ( 1+ exp(-yf)) exp(-yf) -y
v = CArray(- y_true * score).astype(float)
if bound is None:
h = -y_true * v.exp() / (1.0 + v.exp())
else:
# linear approximation avoids numerical overflows
# when -yf >> 1 : loss ~= -yf, and grad = -y
h = -y_true.astype(float)
h[v < bound] = h[v < bound] * v[v < bound].exp() / \
(1.0 + v[v < bound].exp())
return h / CArray([2]).log()
|
[
"marco.melis@diee.unica.it"
] |
marco.melis@diee.unica.it
|
a6278547013cee57c3cc6702108a2284c2d04ee0
|
6a45a5dd00dd01edfcd3d4145501048ff2d987ba
|
/src/data_preprocess.py
|
9cdf7be3e9d81e358b656db4ef49ab9feff3f081
|
[] |
no_license
|
luzengxiang/CISC879_Project
|
18d46b960934ce6a38ccc176c04618cf7ee75a30
|
181a833025cecfaba600308f0b9ee448e7468bb5
|
refs/heads/master
| 2021-08-23T01:39:34.804696
| 2017-12-02T06:50:56
| 2017-12-02T06:50:56
| 108,591,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
from lib.crop_image import import_eps
import argparse
import numpy as np
import os
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "data_process based on Zone")
parser.add_argument('Zone', type=int,help='Positional zone number:int')
args = parser.parse_args()
if os.path.exists("../data/processed/Zone_%d" % args.Zone) == False:
os.mkdir("../data/processed/Zone_%d" % args.Zone)
X, Y = import_eps("../data", '../data/stage1_labels.csv', args.Zone)
sys.stderr.write("Saving Data:" + '\n')
np.save("../data/processed/Zone_%d/X" % args.Zone,X)
np.save("../data/processed/Zone_%d/Y" % args.Zone,Y)
|
[
"zxlu@udel.edu"
] |
zxlu@udel.edu
|
b86e21edd60be743de7e055ffd942d0674c17b3d
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/GstGL/GLRenderbufferAllocationParams.py
|
23d2ebdb5969b2516f3587d02945af8e1fa95f4d
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554
| 2020-06-06T04:15:00
| 2020-06-06T04:15:00
| 269,693,287
| 8
| 2
| null | 2020-06-05T15:57:54
| 2020-06-05T15:57:54
| null |
UTF-8
|
Python
| false
| false
| 6,772
|
py
|
# encoding: utf-8
# module gi.repository.GstGL
# from /usr/lib64/girepository-1.0/GstGL-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.repository.Gst as __gi_repository_Gst
import gi.repository.GstBase as __gi_repository_GstBase
import gobject as __gobject
class GLRenderbufferAllocationParams(__gi.Boxed):
"""
:Constructors:
::
GLRenderbufferAllocationParams()
new(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int) -> GstGL.GLRenderbufferAllocationParams
new_wrapped(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int, gl_handle=None, user_data=None, notify:GLib.DestroyNotify=None) -> GstGL.GLRenderbufferAllocationParams
"""
def copy(self, *args, **kwargs): # real signature unknown
pass
def new(self, context, alloc_params=None, renderbuffer_format, width, height): # real signature unknown; restored from __doc__
""" new(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int) -> GstGL.GLRenderbufferAllocationParams """
pass
def new_wrapped(self, context, alloc_params=None, renderbuffer_format, width, height, gl_handle=None, user_data=None, notify=None): # real signature unknown; restored from __doc__
""" new_wrapped(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int, gl_handle=None, user_data=None, notify:GLib.DestroyNotify=None) -> GstGL.GLRenderbufferAllocationParams """
pass
def _clear_boxed(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
height = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
renderbuffer_format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
width = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_padding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(GLRenderbufferAllocationParams), '__module__': 'gi.repository.GstGL', '__gtype__': <GType GstGLRenderbufferAllocationParams (93979012457472)>, '__dict__': <attribute '__dict__' of 'GLRenderbufferAllocationParams' objects>, '__weakref__': <attribute '__weakref__' of 'GLRenderbufferAllocationParams' objects>, '__doc__': None, 'parent': <property object at 0x7f56a3a28310>, 'renderbuffer_format': <property object at 0x7f56a3a28450>, 'width': <property object at 0x7f56a3a284f0>, 'height': <property object at 0x7f56a3a285e0>, '_padding': <property object at 0x7f56a3a286d0>, 'new': gi.FunctionInfo(new), 'new_wrapped': gi.FunctionInfo(new_wrapped)})"
__gtype__ = None # (!) real value is '<GType GstGLRenderbufferAllocationParams (93979012457472)>'
__info__ = StructInfo(GLRenderbufferAllocationParams)
|
[
"ttys3@outlook.com"
] |
ttys3@outlook.com
|
316f82be60d77481665da8b54b8a48decca9f131
|
8a07250aaa7aaa0756e1abc36579f6dcc8f2db1d
|
/solutions/72. Edit Distance.py
|
a2b8246ce394c3924d056cdeb752d1b8cdf741b9
|
[] |
no_license
|
atriekak/LeetCode
|
f92db0f5f2efb3ba97393b496374f7adc8b92545
|
8438f8a53fbc44d9d10a1e0b96f7ba78dc38eb88
|
refs/heads/main
| 2023-07-17T08:29:45.625297
| 2021-07-04T04:22:43
| 2021-07-04T04:22:43
| 324,005,296
| 0
| 0
| null | 2021-09-02T21:23:06
| 2020-12-23T21:29:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
#Approach: Dynamic Programming
#Time Complexity: O(m * n)
#Space Complexity: O(m * n)
#where, m and n are the lengths of word2 and word1, respectively
m = len(word2)
n = len(word1)
dp = [[0 for j in range(n + 1)] for i in range(m + 1)]
#first row
for j in range(n + 1):
dp[0][j] = j
#first col
for i in range(m + 1):
dp[i][0] = i
for i in range(1, m + 1):
for j in range(1, n + 1):
if word2[i-1] == word1[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = 1 + min(dp[i-1][j-1], dp[i-1][j], dp[i][j-1])
return dp[-1][-1]
|
[
"atriekak@gmail.com"
] |
atriekak@gmail.com
|
b5f82902fa6d5c8bb6c50793c090fc352b764b65
|
bafbfd5a5c5600bc3ef1a7bc13a5e8efe96699fe
|
/myproject/myproject/settings.py
|
3c112f5e68e2f6134289480fa59a9d5837dd01d5
|
[] |
no_license
|
dmpinform/homework_github_action
|
4e99436991442bcafa3aa61e63c7609b64dfc587
|
2a880f3a61ed07d89f415ff6ca784310de1cfded
|
refs/heads/main
| 2023-07-08T02:49:37.442842
| 2021-08-21T18:38:53
| 2021-08-21T18:38:53
| 395,999,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,799
|
py
|
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-g$4i^f1==!x4hg9xs4#ae66+xxh%gj+764xusas@^56bz2^*s2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'picpart'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR), 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
# MEDIA
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# celery
CELERY_BROKER_URL = 'amqp://guest:guest@localhost:5672'
# CELERY_RESULT_BACKEND = 'redis://localhost:6379'
# CELERY_ACCEPT_CONTENT = ['application/json']
# CELERY_RESULT_SERIALIZER = 'json'
# CELERY_TASK_SERIALIZER = 'json'
# testing email to file
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = 'tmp/app-messages'
|
[
"dmpinform@gmail.com"
] |
dmpinform@gmail.com
|
846832ff991ad47d5a5fa26386e041136814197f
|
ed983a4dfe11b18c19e42cc39c1804c154e3e608
|
/unit_test/test/test_unet.py
|
3d0a7caf9fc916533a5d8cf33b8a046476bbef41
|
[] |
no_license
|
riven314/Keras-UNet-Foreground-Extraction
|
90d32475e9025af154bd642f123b2ea153ebed9b
|
f71535cb25cb798904b424f4a468a13087d22006
|
refs/heads/master
| 2020-09-06T11:52:58.043107
| 2019-12-10T10:00:24
| 2019-12-10T10:00:24
| 220,416,411
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,448
|
py
|
"""
checking schedule
1. model output shape
2. sample check number of channels
3. check dropout rate (12, 16)
REFERENCE:
1. converting tensor to numpy: https://stackoverflow.com/questions/34097281/how-can-i-convert-a-tensor-into-a-numpy-array-in-tensorflow
"""
import os
import sys
import itertools
MODULE_PATH = os.path.join(os.getcwd(), '..')
sys.path.append(MODULE_PATH)
import tensorflow as tf
from keras.layers import Input
from unet import unet
# (height, width, channel #)
INPUT_SIZES = [(256, 256, 1), (480, 640, 1)]
# (scale, corresponding # channels)
SCALES_N_CHANNELS = [(2, 128), (1, 64), (0.5, 32)]
DROPOUTS = [0.3, 0.5]
def check_dropout(model, dropout_rate):
"""
input:
model -- keras model object
dropout_rate -- float, ground truth dropout rate
"""
sess = tf.Session()
with sess.as_default():
rate1 = model.layers[12].rate
rate2 = model.layers[16].rate
assert dropout_rate == rate1 == rate2, 'WRONG DROPOUT RATE'
print('dropout test pass!')
def check_channel(model, channel_n):
"""
input:
channel_n: ground truth channel number
"""
n1 = model.layers[1].output.get_shape().as_list()[-1]
n2 = model.layers[-3].output.get_shape().as_list()[-1]
assert channel_n == n1 == n2, 'WRONG CHANNEL NUMBER'
print('number of channel pass!')
def check_output_shape(model, output_shape):
"""
input:
output_shape -- tuple, (height, width)
"""
h = model.layers[-1].output.get_shape().as_list()[1]
w = model.layers[-1].output.get_shape().as_list()[2]
assert output_shape[0] == h, 'WRONG HEIGHT FOR MODEL OUTPUT'
assert output_shape[1] == w, 'WRONG WIDTH FOR MODEL OUTPUT'
print('model output shape pass!')
if __name__ == '__main__':
config_gen = itertools.product(INPUT_SIZES, SCALES_N_CHANNELS, DROPOUTS)
for i in config_gen:
input_size, scale_n_channel, dropout = i
scale, channel_n = scale_n_channel
print('\n### input size = {}, scale = {}, expected channel #: {}, dropout = {}'\
.format(input_size, scale, channel_n, dropout))
model = unet(input_shape = input_size, scale = scale, dropout = dropout)
check_channel(model, channel_n)
check_dropout(model, dropout)
check_output_shape(model, input_size[:2])
print('all test cases pass!')
|
[
"alexlauwh@gmail.com"
] |
alexlauwh@gmail.com
|
c2fb7f0c7c179ee7608e4eb324287e1dba938c09
|
a41d43b788458e54d90a547b2fe71cc40b68e42b
|
/manage.py
|
8095a0f88bd37314871b8c3d47e8e42a0c55ed44
|
[] |
no_license
|
Bariss77/weight_02
|
9b74077e42e9f0ff03f0f29b0f5228f01f9cd5a6
|
915a3670d2ce4a422ac03cfa55098ee7c7e6bffd
|
refs/heads/master
| 2020-04-15T02:14:13.675448
| 2019-01-06T13:01:44
| 2019-01-06T13:01:44
| 164,307,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'weight_02.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"bariss@bk.ru"
] |
bariss@bk.ru
|
40a42af680a63e3a17bb18fe661dc09bb9d98b56
|
6ef8abce322da7a6acf8b940801d7c2286b55f42
|
/Programmers/compressString.py
|
fccb357227a2eb76df7a508063b6c1f0361d23a2
|
[] |
no_license
|
702criticcal/1Day1Commit
|
747a61308e2fae87bad6369cd0bc481bdc89b29a
|
aec375b8b41de1ed5366c714cc6a204905fb2763
|
refs/heads/master
| 2023-01-31T16:47:24.457584
| 2020-12-18T03:42:28
| 2020-12-18T03:42:28
| 287,663,502
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
def solution(s):
if len(s) == 1:
return 1
answer = len(s)
# 압축할 수 있는 문자열의 최대 길이는 길이 / 2까지이다.
for cut in range(1, len(s) // 2 + 1):
result = ''
cnt = 1
temp_str = s[:cut]
# 1부터 길이 / 2까지 잘라서 문자열 비교.
for i in range(cut, len(s) + cut, cut):
# 앞의 자른 문자열과 같다면 cnt + 1.
if s[i:i + cut] == temp_str:
cnt += 1
# 다르다면, cnt가 1이면 문자열만 결과에 추가하고, cnt가 1이 아니면 숫자와 문자열을 결과에 추가한다.
else:
if cnt == 1:
result += temp_str
else:
result += str(cnt) + temp_str
# 자를 문자열 크기 만큼 인덱스를 옮겨서 다시 비교 수행.
temp_str = s[i:i + cut]
# 카운트 초기화
cnt = 1
# 해당 길이만큼 다 잘랐다면 전체 결과 값과 해당 길이의 결과 값의 최솟값을 구하여 전체 결과값에 저장.
answer = min(answer, len(result))
return answer
|
[
"wnstn1549@gmail.com"
] |
wnstn1549@gmail.com
|
b6dbc83d15fc726846cdce079b7bf3f329227348
|
e574bafd1b3652d68b30c65695856d5e25fa7544
|
/pycmbs/tests/test_report.py
|
b09208fda8438969487a84b332dad974d8591149
|
[
"MIT"
] |
permissive
|
gitter-badger/pycmbs
|
797169f6229828511d7cf81bb52ce73cb753fc6f
|
c802cf1fc8737c98e83c0e942451720fce03b860
|
refs/heads/master
| 2020-12-30T19:36:58.750147
| 2015-02-06T08:29:56
| 2015-02-06T08:29:56
| 30,469,653
| 0
| 0
| null | 2015-02-07T20:46:42
| 2015-02-07T20:46:41
| null |
UTF-8
|
Python
| false
| false
| 3,419
|
py
|
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS. (c) 2012-2014
For COPYING and LICENSE details, please refer to the file
COPYRIGHT.md
"""
import unittest
from nose.tools import assert_raises
from pycmbs.benchmarking.report import Report
import os
import numpy as np
import matplotlib.pyplot as plt
import tempfile
class TestData(unittest.TestCase):
def setUp(self):
self._tmpdir = tempfile.mkdtemp()
if not os.path.exists(self._tmpdir):
os.makedirs(self._tmpdir)
self.R = Report('testfile', 'myreport', 'Alex Loew', outdir=self._tmpdir + os.sep)
def tearDown(self):
pass
def test_ReportInit(self):
self.assertEqual(self.R.filename, self._tmpdir + os.sep + 'testfile.tex')
self.assertEqual(self.R.format, 'png')
self.assertEqual(self.R.author, 'Alex Loew')
def test_open_report(self):
self.R.open()
self.assertTrue(os.path.exists(self.R.filename))
def test_open_report_MissingDirectory(self):
self.R = Report('testfile', 'myreport', 'Alex Loew', outdir=self._tmpdir + os.sep + 'nixdir')
self.R.open()
self.assertTrue(os.path.exists(self.R.filename))
self.assertTrue(os.path.exists(self._tmpdir + os.sep + 'nixdir'))
def test_report_features(self):
if os.path.exists(self.R.filename):
os.remove(self.R.filename)
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(np.random.random(200))
self.R.open()
self.R.section('This is a section')
self.R.subsection('This is a subsection')
self.R.subsubsection('This is a subsubsection')
self.R.newpage()
self.R.clearpage()
self.R.barrier()
self.R.open_table()
self.R.close_table(caption='This is my caption')
#self.input('filename_to_input.tex')
self.R.newpage()
self.R.figure(f, caption='My figure caption')
self.R.close()
self.R.compile()
if os.path.exists(self.R.filename):
os.remove(self.R.filename)
if os.path.exists(self.R.filename[:-3]+'.pdf'):
os.remove(self.R.filename[:-3]+'.pdf')
def test_report_InvalidFigure(self):
f = None
r = self.R.figure(f, caption='My figure caption')
self.assertEqual(r, None)
#~ def test_report_CaptureFigures(self):
#~ if os.path.exists(self.R.outdir + 'fig_00001.png'):
#~ os.remove(self.R.outdir + 'fig_00001.png')
#~ if os.path.exists(self.R.outdir + 'fig_00002.png'):
#~ os.remove(self.R.outdir + 'fig_00002.png')
#~ if os.path.exists(self.R.outdir + 'fig_00003.png'):
#~ os.remove(self.R.outdir + 'fig_00003.png')
#~
#~ f1 = plt.figure()
#~ ax1 = f1.add_subplot(111)
#~ ax1.plot(np.random.random(100))
#~ f2 = plt.figure()
#~ ax2 = f2.add_subplot(111)
#~ ax2.plot(np.random.random(200))
#~ f3 = plt.figure()
#~ ax3 = f3.add_subplot(111)
#~ ax3.plot(np.random.random(300))
#~
#~ self.R.capture_figures()
#~
#~ self.assertTrue(os.path.exists(self.R.outdir + 'fig_00001.png'))
#~ self.assertTrue(os.path.exists(self.R.outdir + 'fig_00002.png'))
#~ self.assertTrue(os.path.exists(self.R.outdir + 'fig_00003.png'))
def test_input(self):
self.R.input('testname')
|
[
"a.loew@gmx.net"
] |
a.loew@gmx.net
|
11673e6ceffc1b11d4fa20923c84c7444b026e7c
|
f559b4d607cfdd3f192daed155ed8b0d263c71b2
|
/env/bin/easy_install
|
8340023df64d2030d9cc8e363b11ec8e070ff138
|
[] |
no_license
|
chris-baby/WbOnline
|
6270015e9a7897b413a3fe97e2aca8a33f744995
|
91425f677d2e7c2a0ac9aeb8c1ee47d75f9b9321
|
refs/heads/master
| 2022-07-14T23:57:56.162760
| 2020-05-11T13:56:22
| 2020-05-11T13:56:22
| 263,056,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
#!/Users/tongtong/Desktop/root/bwOnline/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"918128078@qq.com"
] |
918128078@qq.com
|
|
ba2edcab7e8d97ac362909c1db7622d50e309aac
|
68b99720b6d3fb483c2c612beffdc23d36085bfb
|
/mlcube/mlcube/tests/test_mlcommons_mlcube_cli.py
|
2e895c880c365dc50faa6430910eccf8293fd289
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
johnugeorge/mlcube
|
50ef63264282df4c9bd9bf82afc471f7a8db6ca2
|
10bdfe859805aa8c868c5a4745259037e123e757
|
refs/heads/master
| 2023-08-22T22:31:53.892870
| 2021-09-24T19:48:07
| 2021-09-29T17:22:56
| 402,979,659
| 0
| 0
|
NOASSERTION
| 2021-09-29T17:16:39
| 2021-09-04T06:12:13
|
Python
|
UTF-8
|
Python
| false
| false
| 624
|
py
|
from click.testing import CliRunner
from mlcube.__main__ import cli
runner = CliRunner()
def test_mlcube():
response = runner.invoke(cli)
assert response.exit_code == 0
assert 'Usage: mlcube [OPTIONS] COMMAND [ARGS]...' in response.output
assert '--log-level, --log_level TEXT Log level to set, default is to do nothing.' in response.output
assert '--help Show this message and exit.' in response.output
assert 'run Run MLCube ML task.' in response.output
assert 'show_config Show MLCube configuration.' in response.output
print("All assertions passed")
|
[
"noreply@github.com"
] |
noreply@github.com
|
90e145421e0bf3f625473a6875ab80784750697a
|
b4707b1d1d5e1bdd10123c6af6a13c86c1bc9d76
|
/taxifares/taxifare/trainer/model.py
|
4affc1d6ec5c68c557167fcb11a66dc04034f542
|
[] |
no_license
|
borzoj/ml-experiments
|
d43416b1635b698856e0a8c474a8b2cc3c7686ed
|
1a1d2381366b8f4fcd7a3b91bcd0ad5992347da6
|
refs/heads/master
| 2020-04-22T22:04:42.121599
| 2019-04-03T18:15:39
| 2019-04-03T18:15:39
| 170,695,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,685
|
py
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import shutil
tf.logging.set_verbosity(tf.logging.INFO)
# List the CSV columns
CSV_COLUMNS = ['fare_amount', 'dayofweek', 'hourofday', 'pickuplon', 'pickuplat', 'dropofflon', 'dropofflat', 'passengers', 'key']
#Choose which column is your label
LABEL_COLUMN = 'fare_amount'
# Set the default values for each CSV column in case there is a missing value
DEFAULTS = [[0.0], ['Mon'], [0], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']]
# Create an input function that stores your data into a dataset
def read_dataset(filename, mode, batch_size = 512):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults = DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return add_engineered(features), label
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename)
# Create dataset from file list
dataset = tf.data.TextLineDataset(file_list).map(decode_csv)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define your feature columns
INPUT_COLUMNS = [
# Define features
tf.feature_column.categorical_column_with_vocabulary_list('dayofweek', vocabulary_list = ['Sun', 'Mon', 'Tues', 'Wed', 'Thu', 'Fri', 'Sat']),
tf.feature_column.categorical_column_with_identity('hourofday', num_buckets = 24),
# Numeric columns
tf.feature_column.numeric_column('pickuplat'),
tf.feature_column.numeric_column('pickuplon'),
tf.feature_column.numeric_column('dropofflat'),
tf.feature_column.numeric_column('dropofflon'),
tf.feature_column.numeric_column('passengers'),
# Engineered features that are created in the input_fn
tf.feature_column.numeric_column('latdiff'),
tf.feature_column.numeric_column('londiff'),
tf.feature_column.numeric_column('euclidean')
]
# Build the estimator
def build_estimator(model_dir, nbuckets, hidden_units):
"""
Build an estimator starting from INPUT COLUMNS.
These include feature transformations and synthetic features.
The model is a wide-and-deep model.
"""
# Input columns
(dayofweek, hourofday, plat, plon, dlat, dlon, pcount, latdiff, londiff, euclidean) = INPUT_COLUMNS
# Bucketize the lats & lons
latbuckets = np.linspace(38.0, 42.0, nbuckets).tolist()
lonbuckets = np.linspace(-76.0, -72.0, nbuckets).tolist()
b_plat = tf.feature_column.bucketized_column(plat, latbuckets)
b_dlat = tf.feature_column.bucketized_column(dlat, latbuckets)
b_plon = tf.feature_column.bucketized_column(plon, lonbuckets)
b_dlon = tf.feature_column.bucketized_column(dlon, lonbuckets)
# Feature cross
ploc = tf.feature_column.crossed_column([b_plat, b_plon], nbuckets * nbuckets)
dloc = tf.feature_column.crossed_column([b_dlat, b_dlon], nbuckets * nbuckets)
pd_pair = tf.feature_column.crossed_column([ploc, dloc], nbuckets ** 4 )
day_hr = tf.feature_column.crossed_column([dayofweek, hourofday], 24 * 7)
# Wide columns and deep columns.
wide_columns = [
# Feature crosses
dloc, ploc, pd_pair,
day_hr,
# Sparse columns
dayofweek, hourofday,
# Anything with a linear relationship
pcount
]
deep_columns = [
# Embedding_column to "group" together ...
tf.feature_column.embedding_column(pd_pair, 10),
tf.feature_column.embedding_column(day_hr, 10),
# Numeric columns
plat, plon, dlat, dlon,
latdiff, londiff, euclidean
]
## setting the checkpoint interval to be much lower for this task
run_config = tf.estimator.RunConfig(save_checkpoints_secs = 30,
keep_checkpoint_max = 3)
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = model_dir,
linear_feature_columns = wide_columns,
dnn_feature_columns = deep_columns,
dnn_hidden_units = hidden_units,
config = run_config)
# add extra evaluation metric for hyperparameter tuning
estimator = tf.contrib.estimator.add_metrics(estimator, add_eval_metrics)
return estimator
# Create feature engineering function that will be used in the input and serving input functions
def add_engineered(features):
# this is how you can do feature engineering in TensorFlow
lat1 = features['pickuplat']
lat2 = features['dropofflat']
lon1 = features['pickuplon']
lon2 = features['dropofflon']
latdiff = (lat1 - lat2)
londiff = (lon1 - lon2)
# set features for distance with sign that indicates direction
features['latdiff'] = latdiff
features['londiff'] = londiff
dist = tf.sqrt(latdiff * latdiff + londiff * londiff)
features['euclidean'] = dist
return features
# Create your serving input function so that your trained model will be able to serve predictions
def serving_input_fn():
feature_placeholders = {
# All the real-valued columns
column.name: tf.placeholder(tf.float32, [None]) for column in INPUT_COLUMNS[2:7]
}
feature_placeholders['dayofweek'] = tf.placeholder(tf.string, [None])
feature_placeholders['hourofday'] = tf.placeholder(tf.int32, [None])
features = add_engineered(feature_placeholders.copy())
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# Create an estimator that we are going to train and evaluate
def train_and_evaluate(args):
estimator = build_estimator(args['output_dir'], args['nbuckets'], args['hidden_units'])
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset(args['train_data_paths'],
batch_size = args['train_batch_size'],
mode = tf.estimator.ModeKeys.TRAIN),
max_steps = args['train_steps'])
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset(args['eval_data_paths'],
batch_size = 10000,
mode = tf.estimator.ModeKeys.EVAL),
steps = None,
start_delay_secs = args['eval_delay_secs'],
throttle_secs = args['throttle_secs'],
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def add_eval_metrics(labels, predictions):
pred_values = predictions['predictions']
return {
'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)
}
|
[
"michal@mercari.com"
] |
michal@mercari.com
|
ad1b85e24bddffba1588102c18d19e9a7f5c4a35
|
ca75f7099b93d8083d5b2e9c6db2e8821e63f83b
|
/z2/part2/batch/jm/parser_errors_2/925229167.py
|
e0ca9ce450c7ef5983d536f9dd4a53f2584448db
|
[
"MIT"
] |
permissive
|
kozakusek/ipp-2020-testy
|
210ed201eaea3c86933266bd57ee284c9fbc1b96
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
refs/heads/master
| 2022-10-04T18:55:37.875713
| 2020-06-09T21:15:37
| 2020-06-09T21:15:37
| 262,290,632
| 0
| 0
|
MIT
| 2020-06-09T21:15:38
| 2020-05-08T10:10:47
|
C
|
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 925229167
"""
"""
random actions, total chaos
"""
board = gamma_new(2, 3, 3, 3)
assert board is not None
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_free_fields(board, 3) == 3
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_golden_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_move(board, 3, 1, 2) == 1
assert gamma_golden_move(board, 3, 1, 1) == 1
gamma_delete(board)
|
[
"jakub@molinski.dev"
] |
jakub@molinski.dev
|
41aa210c76cd9e275d36d7bcd423bf5e38ea262f
|
c368073d2f20e7c1b42913e48f517aced5798925
|
/Python-usp/Semana 4/exer2.py
|
818c7da63fb227725c56412f01047b20b625fc8f
|
[] |
no_license
|
rodrigodasilv/demandas-py
|
b7f3f1329d154edd35ee4ed4f937b58270888854
|
2c8bd3279dd82200eeb8af387a924923d09ca055
|
refs/heads/main
| 2023-03-05T00:19:41.567881
| 2021-02-16T17:13:02
| 2021-02-16T17:13:02
| 337,445,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
# Exercício 2
# Receba um número inteiro positivo na entrada e imprima os n n n primeiros números ímpares naturais. Para a saída, siga o formato do exemplo abaixo.
# Nota:10/10
impares=1
numero = int(input('Digite o valor de n: '))
while numero>0:
print(impares)
impares=impares+2
numero=numero-1
|
[
"noreply@github.com"
] |
noreply@github.com
|
68f0a4d3a7f0b9d6cd8bf4bf9813983513210122
|
fb62825c84c0f4c0f8b4724c62fc89eebafee787
|
/Videoapp/migrations/0011_merge_20210222_1736.py
|
676d2aff7f1e680f2f0462e707d004b350ef22cf
|
[] |
no_license
|
toushi100/VODS
|
8a43c8625357a1ff9840dfcb8879a856ee5d7df0
|
053f5562b4d50286767f9f9be18dc9c2a34bb481
|
refs/heads/main
| 2023-06-14T03:57:27.764301
| 2021-07-07T22:52:45
| 2021-07-07T22:52:45
| 313,333,797
| 0
| 2
| null | 2021-03-16T05:17:31
| 2020-11-16T14:48:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 275
|
py
|
# Generated by Django 3.1.2 on 2021-02-22 17:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Videoapp', '0010_auto_20210221_2021'),
('Videoapp', '0010_auto_20210217_1648'),
]
operations = [
]
|
[
"toushi2009@gmial.com"
] |
toushi2009@gmial.com
|
39d57df74e1e8a8b0dd0cef6f723ebb14bb7e914
|
8d7d847f109183365022112a6b76cb20fa158255
|
/NewsPage/views.py
|
9a7ae5d9d34449289c2f5e638fb85cb92dc2f9fd
|
[] |
no_license
|
Xarliend/DataViewSite
|
42e59fed9483ff2f877a40b4f31a1b82c5435ecd
|
59921492547d68908e6e8f54a062baeb05b19580
|
refs/heads/master
| 2022-11-24T15:12:01.842042
| 2020-01-04T22:21:17
| 2020-01-04T22:21:17
| 227,699,982
| 0
| 0
| null | 2022-11-04T19:31:04
| 2019-12-12T21:29:47
|
Python
|
UTF-8
|
Python
| false
| false
| 469
|
py
|
from django.shortcuts import render
from api.models import BcNews
from django.http import JsonResponse
def index(request):
news_list = BcNews.objects.all()
return render(request, 'news_index.html', {'news_list':news_list})
def news_detail(request, path):
targetnews = BcNews.objects.get(title=path)
news = dict({'title':targetnews.title, 'author':targetnews.author, 'date':targetnews.date, 'content':targetnews.content})
return JsonResponse(news)
|
[
"43079993+Xarliend@users.noreply.github.com"
] |
43079993+Xarliend@users.noreply.github.com
|
523370407de74d208a58350773a65f4cdeb3db70
|
1b9bd441c500e79042c48570035071dc20bfaf44
|
/sources/Content_Quality/maaseh_rav.py
|
dc357b5ead6ad4f0a88e432e20e9a2c905ee8b98
|
[] |
no_license
|
Sefaria/Sefaria-Data
|
ad2d1d38442fd68943535ebf79e2603be1d15b2b
|
25bf5a05bf52a344aae18075fba7d1d50eb0713a
|
refs/heads/master
| 2023-09-05T00:08:17.502329
| 2023-08-29T08:53:40
| 2023-08-29T08:53:40
| 5,502,765
| 51
| 52
| null | 2023-08-29T11:42:31
| 2012-08-22T00:18:38
| null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
import django
django.setup()
from sefaria.model import *
import csv
from sources.functions import *
nodes = []
with open("Maaseh_Rav_Alt_Structure_Guide.csv") as f:
for n, row in enumerate(csv.reader(f)):
if n == 0:
continue
he, en, simanim = row[0:3]
node = ArrayMapNode()
node.add_primary_titles(en, he)
node.depth = 0
node.wholeRef = "Maaseh Rav {}".format(simanim.split("-")[0])
node.refs = []
nodes.append(node.serialize())
index = get_index_api("Maaseh Rav", server="http://ste.sandbox.sefaria.org")
index['alt_structs'] = {"Subject": {"nodes": nodes}}
post_index(index, server="http://ste.sandbox.sefaria.org")
|
[
"stevenkaplan@stevens-MacBook-Pro.local"
] |
stevenkaplan@stevens-MacBook-Pro.local
|
d6e08e3dc028e3cb4f87c36d69bdb3c0a4b371a7
|
282e756222c6db88589ae7bc9fb6c7bb010d7516
|
/src/project2/lab2.py
|
4b6eb36d660bf37f8d02aba4c0020a4edd78f1cc
|
[] |
no_license
|
luyixiao95/CS5001
|
0497a4904afb7e24900216d3846f145e3167421a
|
f7f50dda5b3f217a2c68eb463befc44da85dd222
|
refs/heads/master
| 2023-06-25T20:47:27.826215
| 2021-07-20T03:22:19
| 2021-07-20T03:22:19
| 387,658,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
#luyi xiao
#CS 5001 2021 Spring
#Lab 2 code file
import turtle as t, random
#move turtle to location(x,y)
def goto(x,y):
t.up()
t.goto(x,y)
t.down()
#draw a rectangle by looping
def rectangle(x , y , scale):
#starting point is (x,y)
goto(x,y)
#looping for a rectangle with certain scales and specific height and width
for i in range(4):
if i%2 == 0:
t.forward(50* scale)
t.right(90)
else:
t.forward(100* scale)
t.right(90)
#house contains three blocks(rectangles)
def house(x, y, scale):
rectangle(x , y, scale)
rectangle(x+(50*scale)/4, y+100*scale/2, scale/2)
rectangle(x+(50*scale)/4+(50*scale/8),y+100*scale/2+100*scale/4, scale/4)
t.tracer(False)
house(-100,-100,1.5)
def random_house(n):
for i in range(n):
house(random.randint(-50,50), random.randint(-50,50), random.random())
t.mainloop()
|
[
"xiaoluyi95@gmail.com"
] |
xiaoluyi95@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.