blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8762d9569f6a4062b10098bbe7048b61fbeb966f
|
7ac75b06cae17762da5ac1e0d1434ee6f8e9f992
|
/py4e/chapter02/ex02_05.py
|
cf7d0ae0c19f54510b6419ce87afebe5f1e07b60
|
[] |
no_license
|
reeeborn/py4e
|
6f7ab25afa015680e7e87cf3a6da7d3ea0fefac2
|
6df41c64024491072aa3d62c67192645d5210b20
|
refs/heads/master
| 2020-11-27T04:38:38.332462
| 2018-09-02T00:07:25
| 2018-09-02T00:07:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
# Python for Everyone
# Chapter 2 Exercise 5
# Convert Celsius temp to Farenheit temp
tempC = input('Enter Temperature(Degrees Celsius) ')
tempF = (int(tempC) * 9 / 5) + 32
print("The temperature is", tempF, "degrees Farenheit")
|
[
"djmartz77@gmail.com"
] |
djmartz77@gmail.com
|
4ece02798c873f833c7e4af2748359a5b373b3a5
|
975effcca2be35e52b4beab408340a9e7a43c62b
|
/food/firebase.py
|
6fd6486b5f022bcfe17e5a5bbe8be640a20fe701
|
[] |
no_license
|
hello1988/food
|
5f95e8b3d64d2a3e214fe3b173dd17deb7f7d537
|
3e593bb0ac8603420fc9da7e4f544c3a59969b2d
|
refs/heads/master
| 2022-12-11T01:27:14.586275
| 2018-06-27T03:34:03
| 2018-06-27T03:34:03
| 122,143,797
| 0
| 0
| null | 2022-09-23T21:39:19
| 2018-02-20T01:45:04
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
# encoding=utf8
import pyrebase
import datetime
import time
from django.conf import settings
class Firebase():
def datetime_timestamp(self):
dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
s = time.mktime(time.strptime(dt, '%Y-%m-%d %H:%M:%S'))
return int(s)
firebase = pyrebase.initialize_app(settings.FIREBASE_CONFIG)
storage = firebase.storage()
db = firebase.database()
def uploadImg(self, event, imagePath, imgName):
line_id = event.source.sender_id
firebase_folder = 'food'
return self.uploadImgWithToken(firebase_folder, line_id, imagePath, imgName)
def uploadImgWithToken(self, firebase_folder, line_id, imagePath, imgName ):
timestamp = self.datetime_timestamp()
# imagePathForFirebase = firebase_folder + userToken + str(timestamp) + imgName
imagePathForFirebase = '{}/{}/{}'.format( firebase_folder, line_id, imgName)
saveImgStatusJson = self.storage.child(imagePathForFirebase).put(imagePath)
urlToken = saveImgStatusJson['downloadTokens']
# url = self.storage.child("userImages/" + imgName).get_url()
imageUrl = self.storage.child(imagePathForFirebase).get_url(urlToken)
# https://firebasestorage.googleapis.com/v0/b/storage-url.appspot.com/o/images%2Fexample.jpg?alt=media
return imageUrl
|
[
"hello19881990@gmail.com"
] |
hello19881990@gmail.com
|
c53dee9264542f7b3e2324679aef5f054ab766b0
|
1ad8ed97b5d1886be33fd0d11319f2ef90d52e97
|
/opencv/face_randmark.py
|
d2b0e3a146fa0fb28ee0022308a92a656ca60aa9
|
[] |
no_license
|
hmoni2/python
|
f8001cf62c3e23ade848b51f47d68feac1586015
|
fff57494e6109ed04e7b2fd0f2ca6727b9ad8c66
|
refs/heads/master
| 2023-04-22T19:58:31.862121
| 2021-04-27T00:43:45
| 2021-04-27T00:43:45
| 359,983,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
import cv2
import time
# 초기 설정
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
cap.set(3,480)
cap.set(4,320)
cascPath = "haarcascade_frontalface_default.xml" ##python 폴더에 있어야함
faceCascade = cv2.CascadeClassifier(cascPath)
#이미지 읽기
#image = cv2.imread(imagepath)
start_time = time.time()
count = 0 #웹캠 사진 찍어서 이미지 저장
while True :
ret,frame = cap.read()
if not ret :
print("비디오 읽기 오류 ")
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale( #크기가 다른 오브젝트 검출
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(10, 10),
flags = cv2.CASCADE_SCALE_IMAGE
)
# 이미지 프레임 (x,y)부터 시작, (x+넓이, y+길이)까지의 사각형을 그림(색 0 255 0 , 굵기 2)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 0), 2)
cv2.imshow("video show", frame)
key = cv2.waitKey(1)
if key == 27 :
break
# 1초 마다 사진 찍게
if time.time() - start_time >= 1 :
img_name = "opencv_frame_{}.png".format(count)
cv2.imwrite(img_name,frame,params=[cv2.IMWRITE_PNG_COMPRESSION,0])
print("{0} written".format(count))
start_time = time.time()
count += 1
cap.release()
cv2.destroyAllWindow()
|
[
"hmmmoni2@gmail.com"
] |
hmmmoni2@gmail.com
|
0a7ad3d21b62c28ce3cf592d4bc5bfe95cfbece9
|
1ed98f507301298216c95f55696909b8d61256b4
|
/django_project/urls.py
|
0f43009e3c3de892042246824f6c47c8772398ef
|
[] |
no_license
|
KevinLi3/Blog-web-app
|
1b030ed6d4ee5229597d74b19256327efc6eccce
|
358039a85db709bee8a207dd0d5a128efbad6bbb
|
refs/heads/master
| 2022-12-09T10:23:33.116779
| 2020-09-19T03:09:45
| 2020-09-19T03:09:45
| 294,915,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
"""django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', user_views.register, name='register'),
path('profile/', user_views.profile, name='profile'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('', include('blog.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"li.kevin5599@gmail.com"
] |
li.kevin5599@gmail.com
|
b9600c0958de6b5ab678b485bd0979bbea995dbc
|
caea972fef4196f0e76e935a930986ce3538441c
|
/main.py
|
45c37ccf4e2debb505503bec91c707c8c39c9694
|
[] |
no_license
|
CassieBoyd/student-exercises
|
ad9bf10b5e8616df0663ce2b0da5a3b0b3d3529e
|
2e031c2453b401fa9c61589924af8984676758af
|
refs/heads/master
| 2020-12-18T14:46:07.856561
| 2020-02-04T21:12:06
| 2020-02-04T21:12:06
| 235,424,987
| 0
| 0
| null | 2020-02-04T21:12:07
| 2020-01-21T19:31:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,911
|
py
|
from exercise import Exercise
from cohort import Cohort
from student import Student
from instructor import Instructor
"""
Once you have defined all of your custom types, go to main.py, import the classes you need, and implement the following logic.
Create 4, or more, exercises.
Create 3, or more, cohorts.
Create 4, or more, students and assign them to one of the cohorts.
Create 3, or more, instructors and assign them to one of the cohorts.
Have each instructor assign 2 exercises to each of the students.
"""
# Creating exercises
paint_the_fence = Exercise("Paint The Fence","JavaScript")
sand_the_floor = Exercise("Sand The Floor", "JavaScript")
paint_the_house = Exercise("Paint The House", "JavaScript")
wax_on = Exercise("Wax On", "Python")
wax_off = Exercise("Wax Off", "Python")
# Creating cohorts
day_cohort_98 = Cohort("Day Cohort 98")
cobra_kai = Cohort("Night Cohort Cobra Kai")
miyagi_dojo = Cohort("Day Cohort Miyagi Dojo")
# Creating students and assigning them to cohorts with .append()
daniel_larusso = Student("Daniel", "LaRusso", "Daniel-san", "Day Cohort Miyagi Dojo")
miyagi_dojo.students.append(daniel_larusso)
johhny_lawrence = Student("Johnny", "Lawrence", "L3g5w33p3r", "Night Cohort Cobra Kai")
cobra_kai.students.append(johhny_lawrence)
john_doe = Student("John", "Doe", "DoeBoi", "Day Cohort 98")
day_cohort_98.students.append(john_doe)
janet_doe = Student("Janet", "Doe", "DammitJanet", "Day Cohort 98")
day_cohort_98.students.append(janet_doe)
# Checking that students are in their cohorts
# print(cobra_kai.students[0].first_name)
# print(miyagi_dojo.students[0].first_name)
# print(day_cohort_98.students[0].first_name)
# print(day_cohort_98.students[1].first_name)
# Creating instructors
nariyoshi_miyagi = Instructor("Nariyoshi", "Miyagi", "Mr.Miyagi", "Day Cohort Miyagi Dojo", "Crane Kick")
john_kreese = Instructor("John", "Kreese", "NoMercy", "Night Cohort Cobra Kai", "Sweep The Leg")
frank_n_furter = Instructor("Frank", "Furter","Sw337_7ransv3s7173", "Day Cohort 98", "The Time Warp")
# Instructors assigning exercises
nariyoshi_miyagi.assign_exercise(daniel_larusso, paint_the_fence)
nariyoshi_miyagi.assign_exercise(daniel_larusso, paint_the_house)
print("Daniel,", daniel_larusso.exercises[0].name)
print("Daniel,", daniel_larusso.exercises[1].name)
john_kreese.assign_exercise(johhny_lawrence, wax_on)
john_kreese.assign_exercise(johhny_lawrence, wax_off)
print("Johnny,", johhny_lawrence.exercises[0].name)
print("Johnny,", johhny_lawrence.exercises[1].name)
for student in day_cohort_98.students:
frank_n_furter.assign_exercise(student, wax_on)
frank_n_furter.assign_exercise(student, sand_the_floor)
for exercise in janet_doe.exercises:
print("Janet,", exercise.name)
for exercise in john_doe.exercises:
print("John,", exercise.name)
print(frank_n_furter.first_name)
print(frank_n_furter.specialty)
print(janet_doe.first_name)
|
[
"cassandra.bacon@gmail.com"
] |
cassandra.bacon@gmail.com
|
374ad93c2c3d59b6be6b5e6933cd527e0fb38404
|
4541aa803edcc669d9c30da84713dab0372147b7
|
/dj-wineshop/cart/migrations/0001_initial.py
|
c3fec171495942796e4a0348594e1046531e2738
|
[] |
no_license
|
gflexx/wineshop
|
c99c08d07324b51f80d9dc3cbe2de345d899ce0e
|
79d9c3024b61a7b839b5caef5482a33ae31cea04
|
refs/heads/master
| 2023-05-02T01:51:58.961504
| 2021-05-15T10:12:46
| 2021-05-15T10:12:46
| 367,459,429
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,404
|
py
|
# Generated by Django 3.2 on 2021-05-14 18:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('wine', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('checked_out', models.BooleanField(default=False)),
('total', models.DecimalField(decimal_places=1, default=0.0, max_digits=12)),
('wines', models.ManyToManyField(blank=True, to='wine.Wine')),
],
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1)),
('total', models.DecimalField(decimal_places=1, default=0.0, max_digits=12)),
('added', models.DateTimeField(auto_now_add=True)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cart.cart')),
('wine', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wine.wine')),
],
),
]
|
[
"glennenri24@gmail.com"
] |
glennenri24@gmail.com
|
5af737dfb28d40b677608a602d3dc01dd931f3e4
|
bf9ba25347fac407af678dda907adce43d8d1abd
|
/lesson2.py
|
515002972d20f35efc23fda8358d9957fb06ac87
|
[] |
no_license
|
Safintim/fp
|
7026626bbd1c257ade2534dbfc5447ea0497afb8
|
966508246af95f2dafd4d77d5c3026b883a5f318
|
refs/heads/master
| 2020-03-31T07:41:11.626933
| 2018-10-09T09:56:22
| 2018-10-09T09:56:22
| 152,030,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
from pymonad import curry
# 2.3.1
@curry
def concatenation(x, y):
return x + y
hello_arg = concatenation('Hello, ')
print(hello_arg('Sergey'))
# 2.3.1
@curry
def greeting_func(greeting, mark1, mark2, name):
return greeting + mark1 + name + mark2
greeting = greeting_func('Hello', ', ', '!')
print(greeting('Petya'))
|
[
"Sarami96"
] |
Sarami96
|
62cfd539b1bebbe78a6871db701185082f9c9562
|
248f2b43ad73ebe3b6bb494ecc04d358ac5bb650
|
/extension.py
|
d7b886dd5569c96038f305cbcd7ce2ebd784ef07
|
[] |
no_license
|
Wingzxy/Cloud-Computing
|
36bfd288d748d845b6ce900e5b3cc3e7e7b0c0b8
|
9612edf2b84b02776b4a48e02230ffd0d5f0b0de
|
refs/heads/master
| 2020-09-26T11:06:16.205563
| 2019-12-06T11:49:21
| 2019-12-06T11:49:21
| 226,242,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
from init import multi_upload
import sys
def gpfs(time, confidence):
if time < 60:
print("Can't find a golden nonce")
else:
multi_upload(8, 8)
if __name__ == '__main__':
t = sys.argv[1]
T = int(t)
l = sys.argv[2]
L = int(l)
gpfs(T, L)
|
[
"noreply@github.com"
] |
Wingzxy.noreply@github.com
|
c70eed05a92b16acabfa6d414235d7b4202bfb57
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/12145101.py
|
384c0e17cafb0bcd9dad51a29f6d1105b5c8d67b
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/12145101.py generated: Wed, 25 Jan 2017 15:25:20
#
# Event Type: 12145101
#
# ASCII decay Descriptor: [B+ -> (J/psi(1S) -> mu+ mu-) (K*+ -> K0S pi+)]cc
#
from Configurables import Generation
Generation().EventType = 12145101
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_JpsiKst,mm=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12145101
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
f2cc87fdd70ce779e399f29725f12173499bfb4e
|
a9e173b920222f8a5fa78af805cb0afaf0f1f26d
|
/accounts/views.py
|
4925289103105fce4cd2029b217a014f16b1b6c7
|
[] |
no_license
|
mmomiof/Social
|
be34bfddc9502812800de5bb9480620abaff3b9f
|
7838a78461e5cc7ce0d3418ba92da0a81c1bf377
|
refs/heads/master
| 2022-12-11T09:52:37.885545
| 2020-09-17T14:51:46
| 2020-09-17T14:51:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,164
|
py
|
from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.contrib.auth.models import User
def register(request):
if request.method == 'POST':
# Get form values
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
password2 = request.POST['password2']
# Check if passwords match
if password == password2:
# Check for new username
if User.objects.filter(username=username).exists():
messages.error(request, 'That username is taken')
return redirect('register')
else:
# Check for new email
if User.objects.filter(email=email).exists():
messages.error(request, 'That email is being used')
return redirect('register')
else:
# Looks good
user = User.objects.create_user(
username=username,
password=password,
email=email,
first_name=first_name,
last_name=last_name
)
user.save()
messages.success(request, 'You are now registered and can log in')
return redirect('login')
else:
messages.error(request, 'Passwords do not match')
return redirect('register')
else:
return render(request, 'register.html')
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
messages.success(request, 'You are now logged in')
return redirect('dashboard')
else:
messages.error(request, 'Invalid credentials')
return redirect('login')
else:
return render(request, 'login.html')
def logout(request):
if request.method == 'POST':
auth.logout(request)
messages.success(request, 'You are now logged out')
return redirect('index')
def dashboard(request):
return render(request, 'dashboard.html')
|
[
"65576248+nayazer@users.noreply.github.com"
] |
65576248+nayazer@users.noreply.github.com
|
7b62197a3a0395b563b62d68bc34066d8fa2643f
|
b972ff5e0d3160130c5fb626f8480e601a6e02d6
|
/myDB.db.py
|
bc78ac542e0fb5add9980d5895121f81b756324b
|
[] |
no_license
|
Libbybacon/Python_Projects
|
5f67cece102b00aedbf6f2fe680ecfb5da67c2ce
|
b48993c22f4c93d23bf9269448bd9f2b4630d4cd
|
refs/heads/main
| 2023-03-06T12:03:59.564868
| 2021-02-18T03:59:30
| 2021-02-18T03:59:30
| 335,131,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
# This script creates a new database and adds certain files from a given list into it.
import sqlite3
conn = sqlite3.connect('filesDB.db')
fileList = ('information.docx','Hello.txt','myImage.png', \
'myMovie.mpg','World.txt','data.pdf','myPhoto.jpg')
# Create a new table with two fields in filesDB database
with conn:
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS fileNames(\
ID INTEGER PRIMARY KEY AUTOINCREMENT, \
fileName STRING \
)")
conn.commit()
conn.close()
# Iterates through fileList to find files that end in '.txt'
# Then it adds those files to the fileNames table in dB
# and prints names of .txt files to console
conn = sqlite3.connect('filesDB.db')
for file in fileList:
if file.endswith('.txt'):
with conn:
cur = conn.cursor()
cur.execute("INSERT INTO fileNames(fileName) VALUES (?)", (file,))
conn.commit()
print("The following file has the '.txt' extenstion: {}".format(file))
conn.close()
|
[
"libbybacon@github.com"
] |
libbybacon@github.com
|
772a6ad3a6397d97884d96b0c2eff4b9458abb49
|
5c442b0d98de3227d00ca665636b95482b983d00
|
/QBN_train.py
|
302ee99dcf000c06693ffa2bdaae09010c62f9ab
|
[] |
no_license
|
modanesh/visTorch
|
01752160e66d7b6d70034b8a6f52fbcff01b2456
|
589e0d51346f3489ea85bbe3fc9225e4e2ead6d2
|
refs/heads/master
| 2020-05-17T04:59:54.970409
| 2019-11-14T01:22:25
| 2019-11-14T01:22:25
| 183,521,373
| 1
| 0
| null | 2019-04-25T23:02:37
| 2019-04-25T23:02:37
| null |
UTF-8
|
Python
| false
| false
| 23,925
|
py
|
import math
import os
import pickle
import random
import torch
import torch.nn as nn
from torch import optim
import numpy as np
from env_wrapper import atari_wrapper
from additional_functions import TernaryTanh
from torch.autograd import Variable
import tools as tl
import torch.nn.functional as F
from tools import plot_data
import logging
class GRUNet(nn.Module):
"""
Gated Recurrent Unit Network(GRUNet) definition.
"""
def __init__(self, input_size, gru_cells, total_actions):
super(GRUNet, self).__init__()
self.gru_units = gru_cells
self.noise = False
self.conv1 = nn.Conv2d(input_size, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 16, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(16, 8, 3, stride=2, padding=1)
self.input_ff = nn.Sequential(self.conv1, nn.ReLU(),
self.conv2, nn.ReLU(),
self.conv3, nn.ReLU(),
self.conv4, nn.ReLU6())
self.input_c_features = 8 * 5 * 5
self.input_c_shape = (8, 5, 5)
self.gru = nn.GRUCell(self.input_c_features, gru_cells)
self.critic_linear = nn.Linear(gru_cells, 1)
self.actor_linear = nn.Linear(gru_cells, total_actions)
self.apply(tl.weights_init)
self.actor_linear.weight.data = tl.normalized_columns_initializer(self.actor_linear.weight.data, 0.01)
self.actor_linear.bias.data.fill_(0)
self.critic_linear.weight.data = tl.normalized_columns_initializer(self.critic_linear.weight.data, 1.0)
self.critic_linear.bias.data.fill_(0)
self.gru.bias_ih.data.fill_(0)
self.gru.bias_hh.data.fill_(0)
def forward(self, input, input_fn=None, hx_fn=None, inspect=False):
input, hx = input
c_input = self.input_ff(input)
c_input = c_input.view(-1, self.input_c_features)
input, input_x = input_fn(c_input) if input_fn is not None else (c_input, c_input)
ghx = self.gru(input, hx)
# Keep the noise during both training as well as evaluation
# c_input = gaussian(c_input, self.training, mean=0, std=0.05, one_sided=True)
# c_input = tl.uniform(c_input, self.noise, low=-0.01, high=0.01, enforce_pos=True)
# ghx = tl.uniform(ghx, self.noise, low=-0.01, high=0.01)
hx, bhx = hx_fn(ghx) if hx_fn is not None else (ghx, ghx)
if inspect:
return self.critic_linear(hx), self.actor_linear(hx), hx, (ghx, bhx, c_input, input_x)
else:
return self.critic_linear(hx), self.actor_linear(hx), hx
def init_hidden(self, batch_size=1):
return torch.zeros(batch_size, self.gru_units)
def get_action_linear(self, state):
return self.actor_linear(state)
def transact(self, o_x, hx):
hx = self.gru(o_x, hx)
return hx
class ConvObsQBNet(nn.Module):
def __init__(self, channels, x_features):
super(ConvObsQBNet, self).__init__()
self.noise = False
self.qbn_input_size = 8 * 5 * 5
self.latent_size = x_features
f1 = int(8 * x_features)
self.conv_encoder = nn.Sequential(nn.Conv2d(channels, 32, 3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 16, 3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 8, 3, stride=2, padding=1),
nn.ReLU6())
self.linear_encoder = nn.Sequential(nn.Linear(self.qbn_input_size, f1),
nn.Tanh(),
nn.Linear(f1, x_features),
TernaryTanh())
self.linear_decoder = nn.Sequential(nn.Linear(x_features, f1),
nn.Tanh(),
nn.Linear(f1, self.qbn_input_size),
nn.ReLU6())
self.conv_decoder = nn.Sequential(nn.ConvTranspose2d(8, 16, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(16, 32, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, channels, 3, stride=2, padding=1, output_padding=1),
nn.Sigmoid()
)
def forward(self, x):
conv_encoded = self.conv_encoder(x)
linear_encoder_input = torch.reshape(conv_encoded, (x.shape[0], self.qbn_input_size))
linear_encoded = self.linear_encoder(linear_encoder_input)
linear_decoded = self.linear_decoder(linear_encoded)
conv_decoder_input = torch.reshape(linear_decoded, (x.shape[0], 8, 5, 5))
conv_decoded = self.conv_decoder(conv_decoder_input.detach())
return conv_decoded, linear_encoded
def encode(self, x):
conv_encoded = self.conv_encoder(x)
linear_encoder_input = conv_encoded.view(-1, self.qbn_input_size)
linear_encoded = self.linear_encoder(linear_encoder_input)
return linear_encoded
def decode(self, x):
linear_decoded = self.linear_decoder(x)
conv_decoder_input = torch.reshape(linear_decoded, (1, 8, 5, 5))
conv_decoded = self.conv_decoder(conv_decoder_input)
return conv_decoded
def init_hidden(self, batch_size=1):
return torch.zeros(batch_size, self.gru_units)
class ObsQBNet(nn.Module):
"""
Quantized Bottleneck Network(QBN) for observation features.
"""
def __init__(self, input_size, x_features):
super(ObsQBNet, self).__init__()
self.bhx_size = x_features
f1 = int(8 * x_features)
self.encoder = nn.Sequential(nn.Linear(input_size, f1),
nn.Tanh(),
nn.Linear(f1, x_features),
TernaryTanh())
self.decoder = nn.Sequential(nn.Linear(x_features, f1),
nn.Tanh(),
nn.Linear(f1, input_size),
nn.ReLU6())
def forward(self, x):
encoded = self.encode(x)
decoded = self.decode(encoded)
return decoded, encoded
def encode(self, x):
return self.encoder(x)
def decode(self, x):
return self.decoder(x)
def generate_bottleneck_data(net, env, episodes, save_path, cuda=False, eps=(0, 0), max_steps=None):
"""
Generating bottleneck data for the given network.
:param net: given network
:param env: given environment
:param episodes: number of episodes
:param save_path: path to save data in
:param cuda: check if cuda is available
:param max_steps: maximum number of steps to take. used for exploration.
:return: observation and hidden state bottleneck data
"""
if os.path.exists(save_path):
# unpickling after reading the file is efficient
hx_train_data, hx_test_data, obs_train_data, obs_test_data = pickle.loads(open(save_path, "rb").read())
else:
logging.info('No Data Found @ path : {}'.format(save_path))
logging.info('Generating BottleNeck Data..')
bottleneck_data = {}
hx_data, obs_data, action_data = [], [], []
all_ep_rewards = []
with torch.no_grad():
for ep in range(episodes):
done = False
obs = env.reset()
hx = Variable(net.init_hidden())
ep_reward = 0
act_count = 0
exploration_start_step = random.choice(range(0, max_steps, int(0.02 * max_steps)))
while not done:
# env.render()
obs = Variable(torch.Tensor(obs)).unsqueeze(0)
if cuda:
hx = hx.cuda()
obs = obs.cuda()
critic, logit, hx, (_, _, obs_c, _) = net((obs, hx), inspect=True)
if exploration_start_step >= act_count and random.random() < eps[ep % len(eps)]:
action = env.action_space.sample()
else:
prob = F.softmax(logit, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy())
if action not in bottleneck_data:
bottleneck_data[action] = {'hx_data': [], 'obs_data': []}
bottleneck_data[action]['obs_data'].append(obs.data.cpu().numpy()[0].tolist())
bottleneck_data[action]['hx_data'].append(hx.data.cpu().numpy()[0].tolist())
obs, reward, done, info = env.step(action)
action_data.append(action)
act_count += 1
done = done if act_count <= max_steps else True
ep_reward += reward
logging.info('episode:{} reward:{}'.format(ep, ep_reward))
all_ep_rewards.append(ep_reward)
logging.info('Average Performance:{}'.format(sum(all_ep_rewards) / len(all_ep_rewards)))
hx_train_data, hx_test_data, obs_train_data, obs_test_data = [], [], [], []
for action in bottleneck_data.keys():
hx_train_data += bottleneck_data[action]['hx_data']
hx_test_data += bottleneck_data[action]['hx_data']
obs_train_data += bottleneck_data[action]['obs_data']
obs_test_data += bottleneck_data[action]['obs_data']
# logging.info('Action: {} Hx Data: {} Obs Data: {}'.format(action, len(np.unique(bottleneck_data[action]['hx_data'], axis=0).tolist()), len(np.unique(bottleneck_data[action]['obs_data'], axis=0).tolist())))
obs_test_data = np.unique(obs_test_data, axis=0).tolist()
hx_test_data = np.unique(hx_test_data, axis=0).tolist()
random.shuffle(hx_train_data)
random.shuffle(obs_train_data)
random.shuffle(hx_test_data)
random.shuffle(obs_test_data)
pickle.dump((hx_train_data, hx_test_data, obs_train_data, obs_test_data), open(save_path, "wb"))
logging.info('Data Sizes:')
logging.info('Hx Train:{} Hx Test:{} Obs Train:{} Obs Test:{}'.format(len(hx_train_data), len(hx_test_data), len(obs_train_data), len(obs_test_data)))
return hx_train_data, hx_test_data, obs_train_data, obs_test_data
def train(net, obs_train_data, obs_test_data, optimizer, model_path, plot_dir, batch_size, epochs, target_net, cuda=False, grad_clip=None, env=None, low=0, high=0.05, target_test_episodes=1, base_background=None):
"""
Train the QBN
:param net: given network
:param data: given data to train the network on
:param optimizer: optimizer method(Adam is preferred)
:param model_path: path to where save the model
:param plot_dir: path to where save the plots
:param batch_size: batch size
:param epochs: number of training epochs
:param cuda: check if cuda is available
:param grad_clip: max norm of the gradients
:param env: environment
:param low: lower bound of noise data
:param high: upper bound of noise data
:param target_test_episodes: number of episodes to test on
:return: returns the trained model
"""
mse_loss = nn.MSELoss().cuda() if cuda else nn.MSELoss()
train_data, test_data = obs_train_data, obs_test_data
min_loss_i, best_perf_i = None, None
batch_loss_data, epoch_losses, test_losses, test_perf_data = [], [], [], []
total_batches = math.ceil(len(train_data) / batch_size)
for epoch in range(epochs):
net.train()
batch_losses = []
random.shuffle(train_data)
# for t_i in range(len(train_data)):
for b_i in range(total_batches):
batch_input = train_data[b_i:b_i + batch_size]
# input = train_data[t_i]
target = Variable(torch.FloatTensor(batch_input))
# batch_input = torch.FloatTensor(batch_input)
batch_input = Variable(torch.FloatTensor(batch_input), requires_grad=True)
if cuda:
batch_input, target = batch_input.cuda(), target.cuda()
if base_background is None:
batch_output, _ = net(batch_input)
else:
batch_delta_output, _ = net(batch_input)
batch_output = Variable(torch.FloatTensor(base_background)) + batch_delta_output
optimizer.zero_grad()
loss = mse_loss(batch_output, target)
loss.backward()
batch_losses.append(loss.item())
if grad_clip is not None:
torch.nn.utils.clip_grad_norm_(net.parameters(), grad_clip)
optimizer.step()
# print('epoch: %d step: %d loss: %f' % (epoch, t_i, loss.item()))
print('epoch: %d batch: %d loss: %f' % (epoch, b_i, loss.item()))
batch_loss_data += batch_losses
epoch_losses.append(round(np.average(batch_losses), 5))
test_losses.append(round(test(net, test_data, len(test_data), cuda=cuda), 5))
# test_perf = test_with_env(target_net, env, target_test_episodes, cuda=cuda)
# test_perf_data.append(test_perf)
# if (best_perf_i is None) or (test_perf_data[best_perf_i] <= test_perf_data[-1]) or test_perf_data[
# -1] == env.spec.reward_threshold:
# torch.save(net.state_dict(), model_path)
# print('Bottle Net Model Saved!')
# if (best_perf_i is None) or (test_perf_data[best_perf_i] < test_perf_data[-1]):
# best_perf_i = len(test_perf_data) - 1
# print('Best Perf i updated')
if (min_loss_i is None) or (test_losses[min_loss_i] > test_losses[-1]):
min_loss_i = len(test_losses) - 1
torch.save(net.state_dict(), model_path)
print('min_loss_i updated')
print('Bottle Net Model Saved!')
# plot_data(verbose_data_dict(test_losses, epoch_losses, batch_loss_data, test_perf_data), plot_dir)
plot_data(verbose_data_dict(test_losses, epoch_losses, batch_loss_data), plot_dir, env_name)
# print('epoch: %d test loss: %f best perf i: %d min loss i: %d' % (epoch, test_losses[-1], best_perf_i,
print('epoch: %d test loss: %f min loss i: %d' % (epoch, test_losses[-1], min_loss_i))
if np.isnan(batch_losses[-1]):
print('Batch Loss: Nan')
break
if ((len(test_losses) - 1 - min_loss_i) > 50) or (test_losses[-1] == 0):
print('Test Loss hasn\'t improved in last 50 epochs' if test_losses[-1] != 0 else 'Zero Test Loss!!')
print('Stopping!')
break
torch.save(net.state_dict(), model_path.replace("pongD_gru_model.p", "last_model.p"))
net.load_state_dict(torch.load(model_path))
return net
def test(net, data, batch_size, cuda=False):
"""
Test the trained network.
:param net: given network
:param data: given data to test the network on
:param batch_size: batch size
:param cuda: check if cuda is available
:return: test performance
"""
mse_loss = nn.MSELoss().cuda() if cuda else nn.MSELoss()
net.eval()
batch_losses = []
# total_batches = int(len(data) / batch_size)
# if len(data) % batch_size != 0:
# total_batches += 1
with torch.no_grad():
for t_i in range(len(data)):
# input = data[b_i:b_i + batch_size]
input = data[t_i]
# input = Variable(torch.FloatTensor(input))
input = Variable(torch.FloatTensor(input).unsqueeze(0), requires_grad=True)
target = Variable(torch.FloatTensor(input))
if cuda:
target, input = target.cuda(), input.cuda()
batch_output, _ = net(input)
loss = mse_loss(batch_output, target)
batch_losses.append(float(loss.item()))
return sum(batch_losses) / len(batch_losses)
def gru_nn_test(net, env, total_episodes, test_seeds=None, cuda=False, log=False, render=False, max_actions=5000):
"""
Test the performance of the given network.
:param net: trained Bottleneck GRU network
:param env: environment
:param total_episodes: number of episodes of testing
:param test_seeds: test seeds
:param cuda: check if cuda is available
:param log: check to print out test log
:param render: check to render environment
:param max_actions: max number of actions
:return: test performance on trained model
"""
net.eval()
total_reward = 0
with torch.no_grad():
for ep in range(total_episodes):
obs = env.reset()
done = False
ep_reward = 0
ep_actions = []
hx = Variable(net.init_hidden())
all_observations = [obs]
action_count = 0
while not done:
# if render:
# env.render()
obs = Variable(torch.Tensor(obs)).unsqueeze(0)
if cuda:
obs, hx = obs.cuda(), hx.cuda()
critic, logit, hx = net((obs, hx))
prob = F.softmax(logit, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy())
obs, reward, done, _ = env.step(action)
action_count += 1
done = done if action_count <= max_actions else True
ep_actions.append(action)
# A quick hack to prevent the agent from stucking
max_same_action = 5000
if action_count > max_same_action:
actions_to_consider = ep_actions[-max_same_action:]
if actions_to_consider.count(actions_to_consider[0]) == max_same_action:
done = True
ep_reward += reward
if not done:
all_observations.append(obs)
total_reward += ep_reward
if log:
print('Episode =>{} Score=> {} Actions=> {} ActionCount=> {}'.format(ep, ep_reward, ep_actions, action_count))
return total_reward / total_episodes
def verbose_data_dict(test_loss, reconstruction_epoch_losses, reconstruction_batch_losses, regularized_epoch_losses=None, regularized_batch_losses=None):
"""
Makes data(losses and performance) into a dictionary for the sake of data plotting.
:param test_loss: test performance
:param epoch_losses: MSE and CE epoch loss
:param batch_losses: MSE and CE batch loss
:param test_env_perf: performance on test environment
:return: data info dictionary
"""
if regularized_epoch_losses is not None and regularized_batch_losses is not None:
data_dict = [
{'title': "Test_Loss_vs_Epoch", 'data': test_loss, 'y_label': 'Loss(' + str(min(test_loss)) + ')',
'x_label': 'Epoch'},
{'title': "Reconstruction Loss_vs_Epoch", 'data': reconstruction_epoch_losses, 'y_label': 'Loss(' + str(min(reconstruction_epoch_losses)) + ')',
'x_label': 'Epoch'},
{'title': "Regularized Loss_vs_Epoch", 'data': regularized_epoch_losses,
'y_label': 'Loss(' + str(min(regularized_epoch_losses)) + ')',
'x_label': 'Epoch'},
{'title': "Reconstruction Loss_vs_Batches", 'data': reconstruction_batch_losses, 'y_label': 'Loss(' + str(min(reconstruction_batch_losses)) + ')',
'x_label': 'Batch'},
{'title': "Regularized Loss_vs_Batches", 'data': regularized_batch_losses,
'y_label': 'Loss(' + str(min(regularized_batch_losses)) + ')',
'x_label': 'Batch'}
]
else:
data_dict = [
{'title': "Test_Loss_vs_Epoch", 'data': test_loss, 'y_label': 'Loss(' + str(min(test_loss)) + ')',
'x_label': 'Epoch'},
{'title': "Construction Loss_vs_Epoch", 'data': reconstruction_epoch_losses,
'y_label': 'Loss(' + str(min(reconstruction_epoch_losses)) + ')',
'x_label': 'Epoch'},
{'title': "Loss_vs_Batches", 'data': reconstruction_batch_losses, 'y_label': 'Loss(' + str(min(reconstruction_batch_losses)) + ')',
'x_label': 'Batch'}
]
return data_dict
def gather_base_image(bottleneck_data_path):
hx_train_data, hx_test_data, obs_train_data, obs_test_data = pickle.loads(open(bottleneck_data_path, "rb").read())
numpied_obs = np.array(obs_train_data)
avg_base = np.mean(numpied_obs, axis=0)
return avg_base.tolist()
if __name__ == '__main__':
episodes = 2
gru_size = 32
bhx_size = 64
ox_size = 100
input_c_features = 8 * 5 * 5
bn_episodes = 10
# bn_episodes = 1
num_epoch = 400
# num_epoch = 20
bottleneck_data_path = "./resources/pongD_bottleneck_data.p"
generate_max_steps = 10000
env_name = "PongDeterministic-v4"
env = atari_wrapper(env_name)
obs = env.reset()
gru_net_path = "./resources/pongD_gru_model.p"
gru_net = GRUNet(len(obs), gru_size, int(env.action_space.n))
gru_net.load_state_dict(torch.load(gru_net_path))
gru_net.noise = False
gru_net.eval()
ox_net_path = "./resources/pongD_obs_model.p"
ox_net = ObsQBNet(gru_net.input_c_features, ox_size)
ox_net.load_state_dict(torch.load(ox_net_path))
ox_net.eval()
conv_ox_net = ConvObsQBNet(len(obs), ox_size)
hx_train_data, hx_test_data, obs_train_data, obs_test_data = generate_bottleneck_data(gru_net, env, bn_episodes, bottleneck_data_path, cuda=False, eps=(0, 0.3), max_steps=generate_max_steps)
for name, param in gru_net.state_dict().items():
if name == "conv1.weight":
conv_ox_net.conv_encoder[0].load_state_dict(gru_net.conv1.state_dict())
elif name == "conv2.weight":
conv_ox_net.conv_encoder[2].load_state_dict(gru_net.conv2.state_dict())
elif name == "conv3.weight":
conv_ox_net.conv_encoder[4].load_state_dict(gru_net.conv3.state_dict())
elif name == "conv4.weight":
conv_ox_net.conv_encoder[6].load_state_dict(gru_net.conv4.state_dict())
for name, param in ox_net.state_dict().items():
if name == "encoder.0.weight":
conv_ox_net.linear_encoder[0].load_state_dict(ox_net.encoder[0].state_dict())
elif name == "encoder.2.weight":
conv_ox_net.linear_encoder[2].load_state_dict(ox_net.encoder[2].state_dict())
elif name == "decoder.0.weight":
conv_ox_net.linear_decoder[0].load_state_dict(ox_net.decoder[0].state_dict())
elif name == "decoder.2.weight":
conv_ox_net.linear_decoder[2].load_state_dict(ox_net.decoder[2].state_dict())
optimizer = optim.Adam(conv_ox_net.parameters(), lr=1e-4, weight_decay=0)
target_conv_ox_net = conv_ox_net
base_image = gather_base_image(bottleneck_data_path)
train(conv_ox_net, obs_train_data, obs_test_data, optimizer, "./resources/pongD_deconv_obs_model_v1.p", "./data", 32, num_epoch, target_conv_ox_net, cuda=False, grad_clip=None, env=env, low=0, high=0.05, target_test_episodes=1, base_background=None)
# train(conv_ox_net, obs_train_data, obs_test_data, optimizer, "./resources/pongD_deconv_obs_model_v2.p", "./data", 32, num_epoch, target_conv_ox_net, cuda=False, grad_clip=10, env=env, low=0, high=0.05, target_test_episodes=1, base_background=base_image)
|
[
"mohamad4danesh@gmail.com"
] |
mohamad4danesh@gmail.com
|
3669c5113cce4c275dc8a863ede897e74d63e585
|
ef4aff3a1544e7f0ae615312cc8233f3682c03ed
|
/tensorflow/contrib/optimizer_v2/checkpointable_utils_test.py
|
54bc23cdefab58bd84c378a2cf99327c48f0a3f1
|
[
"Apache-2.0"
] |
permissive
|
linusmartensson/tensorflow
|
ecbf85a9fefb6f0e8e4ed96b13a0498ca1aa33ee
|
eb5db1ae67c5654aa5c91567fdee22182452972a
|
refs/heads/master
| 2020-03-10T14:38:59.897325
| 2018-04-13T17:07:38
| 2018-04-13T17:07:38
| 129,431,840
| 1
| 0
|
Apache-2.0
| 2018-04-13T17:08:37
| 2018-04-13T17:08:36
| null |
UTF-8
|
Python
| false
| false
| 32,824
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(josh11b): Forked from contrib/eager/python to test OptimizerV2 the same way
# OptimizerV1 is tested. This file should be removed once the fork is resolved.
import functools
import os
import six
from tensorflow.contrib.eager.python import checkpointable_utils
from tensorflow.contrib.optimizer_v2 import adam
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras._impl.keras.engine import training
from tensorflow.python.keras._impl.keras.layers import core
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpointable
from tensorflow.python.training import saver as core_saver
from tensorflow.python.training import training_util
class NonLayerCheckpointable(checkpointable.Checkpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = checkpointable_utils.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class _MirroringSaveable(
core_saver.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(checkpointable.CheckpointableBase):
"""A Checkpointable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {checkpointable.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value),
global_step=optimizer_step)
optimizer.minimize(
lambda: other_model(input_value),
global_step=optimizer_step)
else:
train_op = optimizer.minimize(
model(input_value), global_step=optimizer_step)
optimizer.minimize(
other_model(input_value),
global_step=optimizer_step)
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
named_variables, serialized_graph = (
checkpointable_utils._serialize_object_graph(root_checkpointable))
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"optimizer_step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
# The optimizer creates two non-slot variables
"optimizer/beta1_power",
"optimizer/beta2_power",
# Slot variables
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step:0",
named_variables["optimizer_step" + suffix].name)
self.assertEqual(
"my_model/dense_1/kernel:0",
named_variables["model/_second/kernel" + suffix].name)
self.assertEqual(
"my_model/dense/kernel:0",
named_variables["model/_named_dense/kernel" + suffix].name)
self.assertEqual(
"beta1_power:0",
named_variables["optimizer/beta1_power" + suffix].name)
self.assertEqual(
"beta2_power:0",
named_variables["optimizer/beta2_power" + suffix].name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
self.assertEqual("beta1_power",
optimizer_node.children[0].local_name)
self.assertEqual("beta1_power",
serialized_graph.nodes[optimizer_node.children[0].node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.original_variable_node_id]
.attributes[0].full_name)
# We strip off the :0 suffix, as variable.name-based saving does.
self.assertEqual(
"my_model/dense/kernel/Adam",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.slot_variable_node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(
var=named_variables["model/_named_dense/kernel" + suffix],
name="m").name)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.original_variable_node_id].attributes[0].checkpoint_key)
self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
self.assertEqual(
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes()
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value))
else:
train_op = optimizer.minimize(model(input_value))
# TODO(allenl): Make initialization more pleasant when graph building.
root_checkpointable.save_counter # pylint: disable=pointless-statement
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_checkpointable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_checkpointable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.AdamOptimizer(
0.001,
# Preserve beta1_power and beta2_power when appying gradients so we can
# test that they've been restored correctly.
beta1=1.0, beta2=1.0)
on_create_root = checkpointable_utils.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value)
status.assert_consumed()
beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))
self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(core_saver.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
optimizer.minimize(
lambda: model(input_value), # pylint: disable=cell-var-from-loop
global_step=root.optimizer_step)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
input_value = constant_op.constant([[3.]])
train_op = optimizer.minimize(
model(input_value),
global_step=root.global_step)
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
with self.test_session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
else:
status.assert_consumed()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.global_step))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes()
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes()
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.AdamOptimizer(0.)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@function.defun
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def _get_checkpoint_name(self, name):
root = checkpointable.Checkpointable()
checkpointable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
named_variables, _ = checkpointable_utils._serialize_object_graph(root)
checkpoint_name, = named_variables.keys()
with ops.name_scope("root/" + checkpoint_name):
pass # Make sure we can use this as an op name if we prefix it.
return checkpoint_name
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = checkpointable_utils.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = checkpointable.Checkpointable()
root.var = checkpointable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = adam.AdamOptimizer(0.1)
if context.executing_eagerly():
optimizer.minimize(root.var.read_value)
else:
train_op = optimizer.minimize(root.var)
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(checkpointable_utils.gather_initializers(
checkpointable_utils.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = checkpointable_utils.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
14.))
slots_path = checkpointable_utils.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "with_slots"))
new_root = checkpointable.Checkpointable()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = checkpointable_utils.CheckpointableSaver(
new_root).restore(slots_path)
no_slot_status = checkpointable_utils.CheckpointableSaver(
new_root).restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = checkpointable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.AdamOptimizer(0.1)
with self.assertRaisesRegexp(AssertionError, "beta1_power"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
else:
self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
None)
if context.executing_eagerly():
new_root.optimizer.minimize(new_root.var.read_value)
else:
train_op = new_root.optimizer.minimize(new_root.var)
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = checkpointable.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(checkpointable_utils.gather_initializers(obj))
saver = checkpointable_utils.CheckpointableSaver(obj)
saver.save(checkpoint_prefix)
before_ops = graph.get_operations()
saver.save(checkpoint_prefix)
self.assertEqual(before_ops, graph.get_operations())
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = checkpointable.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(checkpointable_utils.gather_initializers(obj))
saver = checkpointable_utils.CheckpointableSaver(obj)
save_path = saver.save(checkpoint_prefix)
saver.restore(save_path)
before_ops = graph.get_operations()
saver.restore(save_path)
self.assertEqual(before_ops, graph.get_operations())
def testMultipleGraphsNonSlotVariables(self):
with context.graph_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer = adam.AdamOptimizer(0.001)
# Construct a model in one graph
first_graph = ops.Graph()
first_session = session_lib.Session(graph=first_graph)
with first_graph.as_default(), first_session.as_default():
first_variable = resource_variable_ops.ResourceVariable([1.])
first_root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, variable=first_variable)
train_op = optimizer.minimize(first_variable.read_value)
self.evaluate(checkpointable_utils.gather_initializers(
first_root_checkpointable))
self.evaluate(train_op)
self.evaluate(first_variable.assign([1.]))
self.evaluate(optimizer.get_slot(
var=first_variable, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
# Save and load in a second graph
second_graph = ops.Graph()
with second_graph.as_default(), session_lib.Session(graph=second_graph):
second_variable = resource_variable_ops.ResourceVariable([1.])
second_root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, variable=second_variable)
train_op = optimizer.minimize(second_variable.read_value)
second_root_checkpointable.restore(None).initialize_or_restore()
self.evaluate(train_op)
self.evaluate(second_variable.assign([4.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([5.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(6.))
save_path = second_root_checkpointable.save(checkpoint_prefix)
self.evaluate(second_variable.assign([7.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([8.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta1_power))
status = second_root_checkpointable.restore(save_path)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([4.], self.evaluate(second_variable))
self.assertAllEqual([5.], self.evaluate(optimizer.get_slot(
var=second_variable, name="m")))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta1_power))
# Check that the first graph is unmolested
with first_graph.as_default(), first_session.as_default():
self.assertAllEqual([1.], self.evaluate(first_variable))
self.assertAllEqual([2.], self.evaluate(optimizer.get_slot(
var=first_variable, name="m")))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = core_saver.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes()
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = checkpointable_utils.CheckpointableSaver(root)
status = object_saver.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_consumed()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status.initialize_or_restore()
self._check_sentinels(root)
# TODO(allenl): Test for the core name-based saver loading object-based
# checkpoints once object-based checkpointing is in core.
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(
session=session, file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
2bd43d18862d348ebdf02e18618f7228b7bb0b65
|
bcfc9977a83b61a8af4ff3ebda68e863e1ea4efa
|
/JDCode.py
|
68c2786ed42b47fdf0453b83bc7c1ab963ac4ade
|
[] |
no_license
|
dataman105/update_table_upload_download
|
0ddff67a18477f9c31ded0140c8152648fc40bfc
|
5098059b4b5a31c0a9330d6b1be0ed1e4e999c43
|
refs/heads/master
| 2020-04-18T05:24:55.000349
| 2019-01-25T09:02:29
| 2019-01-25T09:02:29
| 167,278,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,866
|
py
|
import requests
import re
import time
import random
import json
import pymysql
import urllib.request
###全量地址数据
ip='111.72.107.240:36410'
headers={
'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'accept-encoding':'gzip, deflate, br'
}
oriurl='https://search.jd.com/Search?keyword=%E8%9C%82%E8%9C%9C&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E8%9C%82%E8%9C%9C&stock=1&page=3&s=60&click=0'
def GetComment(url,j,ip):
print('---------正在采集商品评论')
url = url.replace("page=3", "page=" + str(j))
r = requests.get(url,proxies={'http':ip},headers=headers)
ProductCodes = re.findall('href="//item.jd.com/(.*?)\.html', r.text)
ProductCodes = list(set(ProductCodes))
with open('comment.txt', 'a', encoding='utf-8') as ff:
for ProductCode in ProductCodes:
try:
e = 'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv15375&productId='
f = '&score=0&sortType=5&page='
r2 = '&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1'
r3 = requests.get(e + str(ProductCode) + r2,proxies={'http':ip},headers=headers)
data2 = json.loads(r3.text[27:-2])
page = data2["maxPage"]
for k in range(0, page):
h = '&pageSize=10&isShadowSku=0&fold=1'
html = urllib.request.urlopen(e + str(ProductCode) + f + str(k) + h).read().decode('gbk')
data3 = json.loads(html[27:-2])
for i in data3['comments']:
ProductCode = str(ProductCode)
try:
productColor = i['productColor']
content = i['content']
referenceTime = i['referenceTime']
except:
productColor = '0'
content = '0'
referenceTime = '0'
ff.write(str(ProductCode)+' '+str(content)+' '+str(productColor)+' '+str(referenceTime)+'\n')
except:
continue
def GetContent(url,j,BigType,SmallType1,ip):
print('---------正在采集商品信息')
url = url.replace("page=3", "page=" + str(j))
r = requests.get(url,proxies={'http':ip},headers=headers)
ProductCodes = re.findall('href="//item.jd.com/(.*?)\.html', r.text)
ProductCodes = set(ProductCodes)
with open('content.txt', 'a', encoding='utf-8') as fff:
for ProductCode in ProductCodes:
href = 'https://item.jd.com/' + str(ProductCode) + '.html'
r = requests.get(href,proxies={'http':ip},headers=headers)
html = r.text
# 商品名称
ProductName = re.findall(r'<div class="sku-name".*?>(.*?)</', r.text, re.S)[0].replace(' ','').replace('\n','') if re.findall(r'<div class="sku-name">.*?>(.*?)</', r.text, re.S) else ''
# print(ProductName)
# 商品介绍
try:
result1 = re.findall(r'<ul class="parameter2 p-parameter-list">(.*?)</ul>.*?<p class="more-par">', html, re.S)[
0]
result2 = re.findall('>(.*?)<', result1)
introduction = {}
for param in result2:
if ':' in param:
p1 = param.split(':')
introduction[p1[0]] = p1[1]
except:
introduction = {}
# 规格与包装
try:
result3 = re.findall(r'<div class="Ptable-item">(.*?)<div class="package-list">', html, re.S)[0]
result4 = re.findall('<dt>(.*?)</dd>', result3)
Packaging = {}
for param in result4:
if '</dt><dd>' in param:
p1 = param.split('</dt><dd>')
Packaging[p1[0]] = p1[1]
except:
Packaging = {}
# 各种评价数量
urlLeft1 = 'https://club.jd.com/comment/productCommentSummaries.action?referenceIds='
urlRight1 = '&callback=jQuery600824&_=1534812709081'
url = urlLeft1 + ProductCode + urlRight1
html1 = requests.get(url,proxies={'http':ip},headers=headers)
data1 = json.loads(html1.text[31:-4])
evaluations = {}
evaluations["GoodRate"] = data1["GoodRate"]
evaluations["CommentCountStr"] = data1["CommentCountStr"]
evaluations["AfterCountStr"] = data1["AfterCountStr"]
evaluations["GoodCountStr"] = data1["GoodCountStr"]
evaluations["GeneralCountStr"] = data1["GeneralCountStr"]
evaluations["PoorCountStr"] = data1["PoorCountStr"]
# 价格
urlLeft = 'https://p.3.cn/prices/mgets?callback=jQuery8554643&type=1&area=24_2144_21037_21082&pdtk=&pduid=460357822&pdpin=&pin=null&pdbp=0&skuIds=J_'
urlRight = '&ext=11100000&source=item-pc'
url1 = urlLeft + ProductCode + urlRight
for i in range(3):
try:
html2 = requests.get(url1,proxies={'http':ip},headers=headers)
x = json.loads(html2.text[15:-4])
price = x["p"]
except:
price = '无'
fff.write(str(ProductCode)+' '+str(ProductName)+' '+str(price)+' '+str(Packaging)+' '+str(introduction)+' '+str(evaluations)+' '+str(BigType)+' '+str(SmallType1)+'\n')
def GetJdDatacomment(ip):
url=oriurl
r = requests.get(url,proxies={'http':ip},headers=headers)
npage = re.findall('<b>.*?</b><em>/</em><i>(.*?)</i>', r.text, re.S)[0]
###判断地址是否为奇数型页码,“%”为奇数型页码,没有为顺序型
if '%' in url:
for j in range(int(npage)*2):
if j%2==1:
GetComment(url, j, ip)
else:
for j in range(int(npage)):
GetComment(url, j,ip)
def GetJdDatacontent(BigType, SmallType, ip):
url=oriurl
r = requests.get(url,proxies={'http':ip},headers=headers)
npage = re.findall('<b>.*?</b><em>/</em><i>(.*?)</i>', r.text, re.S)[0]
print(npage)
###判断地址是否为奇数型页码,“%”为奇数型页码,没有为顺序型
if '%' in url:
for j in range(int(npage)*2):
if j%2==1:
GetContent(url,j,BigType,SmallType,ip)
else:
for j in range(int(npage)):
GetContent(url,j,BigType,SmallType,ip)
while 1:
try:
print('正在爬取--京东content')
GetJdDatacontent('蜂蜜', '蜂蜜', ip)
except:
print('正在爬取--京东comment')
GetJdDatacomment(ip)
|
[
"398769187@qq.com"
] |
398769187@qq.com
|
2be7a8a9ee715680ffbf093d6346beccfab3be45
|
e83eb457ac7a9a3da8fac20dd0313e2794e798b0
|
/app/sudokugame.py
|
8ce179e7e54ff3001da5c302442192b9dc02bf2a
|
[] |
no_license
|
atfelix/sudoku
|
a2669286035d59f24b4520d9f39107f0973ed84a
|
c5c645b26704f187021852625504287f66800312
|
refs/heads/master
| 2021-01-18T13:12:16.193951
| 2017-02-24T20:32:36
| 2017-02-24T20:32:36
| 80,746,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,199
|
py
|
# file: gui/sudokugame.py
# author: Adam Felix
# help: new coder tutorials
from constants import *
class SudokuError(Exception):
"""
An application specific error.
"""
pass
class SudokuBoard(object):
"""
Sudoku Board representation
"""
def __init__(self, board_file):
self.board = self.__create_board(board_file)
def __create_board(self, board_file):
board = []
for line in board_file:
line = line.strip()
if len(line) != 9:
raise SudokuError(
'Each line in the sudoku puzzle must be 9 chars long.'
)
board.append([])
for ch in line:
if not ch.isdigit():
raise SudokuError(
'Valid characters for the puzzle must be 0-9.'
)
board[-1].append(int(ch))
if len(board) != 9:
raise SudokuError('Each puzzle must be 9 lines long.')
return board
class SudokuGame(object):
"""
A Sudoku game, in charge of storing the state of the board
and checking whether the puzzle is completed.
"""
def __init__(self, board_file):
self.board_file = board_file
self.start_puzzle = SudokuBoard(board_file).board
self.start()
self.entries = []
for i in range(9):
self.entries.append([])
for j in range(9):
self.entries[-1].append([-1] * 9)
self.__find_permissible_entries()
def start(self):
self.game_over = False
self.puzzle = []
self.__reset_entries()
def __bool__(self):
return self.game_over
def get_entry(self, row, column, number=-1):
if number == -1:
return self.entries[row][column]
else:
return self.entries[row][column][number]
def get_puzzle_entry(self, row, column):
return self.puzzle[row][column]
def get_start_puzzle_entry(self, row, column):
return self.start_puzzle[row][column]
def set_puzzle_entry(self, row, column, number):
self.puzzle[row][column] = number
def __reset_entries(self):
self.entries = []
for i in range(9):
self.puzzle.append([])
self.entries.append([])
for j in range(9):
self.puzzle[i].append(self.start_puzzle[i][j])
self.entries[-1].append([-1] * 9)
self.__find_permissible_entries()
def update_entries(self):
for i in range(9):
for j in range(9):
if self.start_puzzle[i][j] != 0:
continue
if self.puzzle[i][j] != 0:
for k in range(9):
if k + 1 != self.puzzle[i][j] and self.entries[i][j][k] != 0:
self.entries[i][j][k] = 2
self.entries[i][j][self.puzzle[i][j] - 1] = 1
for i in range(9):
for j in range(9):
if self.start_puzzle[i][j] != 0 or self.puzzle[i][j] != 0:
continue
for value in range(9):
if self.entries[i][j][value] in [0, 3]:
continue
if self.entries[i][j][value] == 1:
for row in range(9):
if row == i:
continue
if self.puzzle[row][j] == value + 1:
self.entries[i][j][value] = 2
for col in range(9):
if col == j:
continue
if self.puzzle[i][col] == value + 1:
self.entries[i][j][value] = 2
_row, _col = i // 3, j // 3
for ii in range(3):
new_row = 3 * _row + ii
for jj in range(3):
new_col = 3 * _col + jj
if new_row == i and new_col == j:
continue
if self.puzzle[new_row][new_col] == value + 1:
self.entries[i][j][value] = 2
if self.entries[i][j][value] == 2:
change = True
for row in range(9):
if row == i:
continue
if self.puzzle[row][j] == value + 1:
change &= False
for col in range(9):
if col == j:
continue
if self.puzzle[i][col] == value + 1:
change &= False
_row, _col = i // 3, j // 3
for ii in range(3):
new_row = 3 * _row + ii
for jj in range(3):
new_col = 3 * _col + jj
if new_row == i and new_col == j:
continue
if self.puzzle[new_row][new_col] == value + 1:
change &= False
if change:
self.entries[i][j][value] = 1
def check_win(self):
for row in range(9):
if not self.__check_row(row):
return False
for column in range(9):
if not self.__check_column(column):
return False
for row in range(3):
for column in range(3):
if not self.__check_box(row, column):
return False
self.game_over = True
return True
def __check_block(self, block):
return set(block) == set(range(1, 10))
def __check_row(self, row):
return self.__check_block(self.puzzle[row])
def __check_column(self, column):
return self.__check_block([self.puzzle[row][column] for row in range(9)])
def __check_box(self, row, column):
box = []
for r in range(row * 3, (row + 1) * 3):
for c in range(column * 3, (column + 1) * 3):
box.append(self.puzzle[r][c])
return self.__check_block(box)
def find_permissible_entries(self, start_puzzle=True):
self.__find_permissible_entries(start_puzzle=start_puzzle)
def __find_permissible_entries(self, start_puzzle=True):
grid = self.start_puzzle if start_puzzle else self.puzzle
for i in range(9):
for j in range(9):
value = grid[i][j]
if value != 0:
self.entries[i][j] = [0] * 9
self.entries[i][j][value - 1] = 1
self.__helper_find(i, j, value)
for i in range(9):
for j in range(9):
for k in range(9):
if self.entries[i][j][k] == -1:
self.entries[i][j][k] = 1
def __helper_find(self, row, col, value):
for x in range(9):
if x != col:
self.entries[row][x][value - 1] = 0
if x != row:
self.entries[x][col][value - 1] = 0
i, j = row // 3, col // 3
for ii in range(3):
for jj in range(3):
if 3 * i + ii != row or 3 * j + jj != col:
self.entries[3 * i + ii][3 * j + jj][value - 1] = 0
def find_offending_entries(self, offending_number, row, column):
"""
"""
return self.__find_offending_entries(offending_number, row, column)
def __find_offending_entries(self, offending_number, row, column):
"""
"""
contradictions = []
for i in range(SUDOKU_SIZE):
if i != row and self.puzzle[i][column] == offending_number:
contradictions.append((i, column))
for j in range(SUDOKU_SIZE):
if j != column and self.puzzle[row][j] == offending_number:
contradictions.append((row, j))
_row, _col = row // 3, column // 3
for ii in range(SUDOKU_BOX_SIZE):
for jj in range(SUDOKU_BOX_SIZE):
cell_row, cell_col = _row * 3 + ii, _col * 3 + jj
if cell_row == row and cell_col == column:
continue
if self.puzzle[cell_row][cell_col] == offending_number:
contradictions.append((cell_row, cell_col))
return contradictions
def toggle_subrow_and_subcol(self, row, col, number, value=-1):
"""
"""
if value != -1:
self.entries[row][col][number] = value
elif self.entries[row][col][number] == 1:
self.entries[row][col][number] = 2
elif self.entries[row][col][number] == 2:
self.entries[row][col][number] = 1
|
[
"adam.tyler.felix@gmail.com"
] |
adam.tyler.felix@gmail.com
|
fdf14e8fe1c0dc4e8126c7755b6c6ba328dcd0b3
|
d2035f8c6f464f8449a5c0a88971e8e9fef7dd14
|
/user_test.py
|
5fcdc742600e0d6e83b513a0b60945576f3a8bbf
|
[] |
no_license
|
Reilly-Oduory/password-locker
|
ff14b1f1ef4fd7344a5b5274f198aa333a8dc383
|
e0b6e75718832051bd8c5257543f39d93a1aabbf
|
refs/heads/master
| 2023-05-06T10:21:52.288057
| 2021-06-01T00:11:08
| 2021-06-01T00:11:08
| 371,707,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
import unittest
from user import User
class TestUser(unittest.TestCase):
def setUp(self):
self.new_user = User("reilly", "1234")
def tearDown(self):
User.user_list = []
def test_instance(self):
self.assertEqual(self.new_user.user, "reilly")
self.assertEqual(self.new_user.password, "1234")
def test_add_user(self):
self.new_user.add_user()
self.assertEqual(len(User.user_list), 1)
def test_user_login(self):
self.new_user.add_user()
self.assertEqual(User.user_login("reilly", "1234"), True)
# if __name__ == '__main__':
# unittest.main()
|
[
"reilly.oduory@student.moringaschool.com"
] |
reilly.oduory@student.moringaschool.com
|
dc1671dccf55d68ba1cfd63132b1dd6164c5e2b6
|
d5689f76df60788196c6fbeffa04ec79c7b7a9e6
|
/main/admin.py
|
7571c7ee4fdf497c1d4e4795cb3b9273f87b9463
|
[] |
no_license
|
deepanshugarg2812/Pooling-app
|
b78d151cf009f357952b5b407b32993ac2277660
|
0b7c3c29ff24b1b922506b95adee1ffc5fe7d4b1
|
refs/heads/master
| 2021-01-06T09:19:45.377535
| 2020-02-18T05:09:55
| 2020-02-18T05:09:55
| 241,277,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
from django.contrib import admin
from main import models
admin.site.register([
models.Question,
models.Choice,
models.Answer
])
|
[
"deepanshugarg2812@gmail.com"
] |
deepanshugarg2812@gmail.com
|
dafa64c24bb4f0e4560a9d47f56637395ffbd06c
|
1ad9426c5851eb4474296273cc72a3c9d8857aec
|
/chap5.py
|
c4081724aa833f075da2c29515bc970a3fb029a4
|
[] |
no_license
|
manellbh/nltk-cheatsheet
|
49c994472372d8d01f43c9bcad51d1bc28a078f5
|
7cb99273d83c211d846ed9e57bc0e07522ab5779
|
refs/heads/master
| 2022-01-05T22:54:20.114738
| 2013-09-16T01:18:29
| 2013-09-16T01:18:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,457
|
py
|
from __future__ import division
import nltk,re, pprint
from nltk.corpus import brown
import operator
""" P.14: Use sorted and set to get a sorted list of tags used in the Brown Corpus """
def sort_tags():
brown_news_tagged = brown.tagged_words(simplify_tags=True)
tags = []
for (word, tag) in brown_news_tagged:
if tag not in tags:
tags.append(tag)
return sorted(set(tags))
print sort_tags()
""" P. 15: Write programs to process the Brown Corpus and find answers to the following 4 questions:
a. Which nouns are more common in their plural form
b. Which word has the greatest number of distinct tags?
c. List tags in decreasing order of frequency?
d. Which tags are nouns most commonly after?"""
def often_plural():
brown_news_tagged = brown.tagged_words()
sing = {}
plural = {}
normally_plural = []
for (word, tag) in brown_news_tagged:
if re.findall(r'^N', tag):
if re.findall(r'^N+.+S$', tag):
if word in plural:
plural[word] = plural[word] + 1
else:
plural[word] = 1
if word not in sing:
sing[word] = 0
else:
if word in sing:
sing[word] = sing[word] + 1
else:
sing[word] = 1
for word in plural:
if word in sing:
if plural[word] > sing[word]:
normally_plural.append(word)
return normally_plural
print often_plural()
def most_popular_tags():
brown_news_tagged = brown.tagged_words()
tag_fd = nltk.FreqDist(tag for (word, tag) in brown_news_tagged)
sorted_pos = sorted(tag_fd.iteritems(), key=operator.itemgetter(1), reverse=True)
return sorted_pos
print most_popular_tags()
def nouns_after():
brown_news_tagged = brown.tagged_words(simplify_tags=True)
previous = {}
ct = 0
for (word, tag) in brown_news_tagged:
if ct != 0:
if re.findall(r'^N', tag):
previous_pos = prev
if previous_pos not in previous:
previous[previous_pos] = 1
else:
previous[previous_pos] = previous[previous_pos] + 1
prev = tag
ct += 1
return sorted(previous.iteritems(), key=operator.itemgetter(1), reverse=True)
print nouns_after()
def most_tags():
brown_news_tagged = brown.tagged_words(simplify_tags=True)
distinct = {}
for (word, tag) in brown_news_tagged:
if word not in distinct:
distinct[word]=[tag]
else:
if tag not in distinct[word]:
distinct[word].append(tag)
for word in distinct:
distinct[word] = len(distinct[word])
return sorted(distinct.iteritems(), key=operator.itemgetter(1), reverse=True)[0]
print most_tags()
|
[
"khoatran@berkeley.edu"
] |
khoatran@berkeley.edu
|
b9fe656bbcb5367df4eccfdcab5d0f629d229d25
|
30b1a1d384a74c3283df34bf7c9de5f2f3173e5f
|
/budget/filters.py
|
ab57893e664dd6a0a7a2120b40674597254eb682
|
[
"MIT"
] |
permissive
|
adrianyim/Budget-Balance-Senior-Project
|
0eba4b03633a685b1823f80cf76fd8b2c1f3843a
|
75bd3e7399c96e424d2d00637364e83d3820b5ca
|
refs/heads/master
| 2022-12-31T11:36:26.588472
| 2020-10-23T22:10:02
| 2020-10-23T22:10:02
| 229,025,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
import django_filters
from .models import New_item
class ItemFilters(django_filters.FilterSet):
item_type_choices = [
('Housing', 'Housing'),
('Transportation', 'Transportation'),
('Utilities', 'Utilities'),
('Food', 'Food'),
('Entertainment', 'Entertainment'),
('Education', 'Education'),
('Insurance', 'Insurance'),
('Medical/Healthcare', 'Medical/Healthcare'),
('Donations', 'Donations'),
("Finance Costs", "Finance Costs"),
('Others', 'Others')
]
cost_type_choices = [
('Income', 'Income'),
('Expense', 'Expense')
]
date_choices = [
('today', 'Today'),
('month', 'Month'),
('year', 'Year')
]
item = django_filters.CharFilter(lookup_expr='icontains')
item_type = django_filters.MultipleChoiceFilter(label='Item Type', choices=item_type_choices)
cost = django_filters.RangeFilter(widget=django_filters.widgets.RangeWidget(attrs={'type': 'number', 'min': '0', 'step': '.01'}))
cost_type = django_filters.MultipleChoiceFilter(label='Cost Type', choices=cost_type_choices)
date = django_filters.DateFromToRangeFilter(widget=django_filters.widgets.RangeWidget(attrs={'class': 'datepicker', 'type': 'date'}))
class Meta:
model = New_item
fields = '__all__'
exclude = ['remark', 'username']
|
[
"adriany93@gmail.com"
] |
adriany93@gmail.com
|
47c6b68496885ab34704d2382192d5877d046df7
|
251f5c092d4b7760cec8c2b6324e5290b917721f
|
/locations/spiders/auchan.py
|
59d0482876134b5e1ddcdafcbabe9b24efad50b8
|
[
"MIT",
"CC0-1.0"
] |
permissive
|
thomas536/alltheplaces
|
663a2441054ba62df6d6e070c19b1ba91f2f4f1f
|
ac4d4783572d55c0799fe6aeb5f6c0e72fad55fb
|
refs/heads/master
| 2021-11-27T12:21:46.387422
| 2021-09-08T18:33:46
| 2021-09-08T18:33:46
| 242,420,362
| 0
| 0
|
NOASSERTION
| 2021-09-09T05:00:22
| 2020-02-22T22:25:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,507
|
py
|
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
DAYS = {
"1": "Mo",
"2": "Tu",
"3": "We",
"4": "Th",
"5": "Fr",
"6": "Sa",
"7": "Su"
}
class AuchanSpider(scrapy.Spider):
name = "auchan"
item_attributes = {'brand': 'Auchan', 'brand_wikidata': 'Q758603'}
allowed_domains = ['auchan.fr', "woosmap.com"]
start_urls = [
'https://www.auchan.fr/magasins/votremagasin?store_id=152',
]
def parse(self, response):
yield scrapy.Request('https://api.woosmap.com/stores/?key=auchan-woos&page=1', callback=self.parse_api)
def parse_hours(self, hours):
opening_hours = OpeningHours()
for day, times in hours.items():
if day == "default":
continue
opening_hours.add_range(day=DAYS[day],
open_time=times[0]["start"],
close_time=times[-1]["end"]
)
return opening_hours.as_opening_hours()
def parse_api(self, response):
data = json.loads(response.body_as_unicode())
stores = data['features']
for store in stores:
properties = {
'name': store["properties"]["name"],
'ref': store["properties"]["store_id"],
'addr_full': " ".join(store["properties"]["address"]["lines"]),
'city': store["properties"]["address"]["city"],
'postcode': store["properties"]["address"]["zipcode"],
'country': store["properties"]["address"]["country_code"],
'phone': store["properties"]["contact"]["phone"],
'website': store["properties"]["contact"]["website"],
'lat': float(store["geometry"]["coordinates"][1]),
'lon': float(store["geometry"]["coordinates"][0]),
}
hours = self.parse_hours(store["properties"]["opening_hours"]["usual"])
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
if data['pagination']['page'] < data['pagination']['pageCount']:
page = data['pagination']['page'] + 1
yield scrapy.Request(
url='https://api.woosmap.com/stores/?key=auchan-woos&page={page}'.format(page=page),
callback=self.parse_api
)
|
[
"2924736+thismakessand@users.noreply.github.com"
] |
2924736+thismakessand@users.noreply.github.com
|
68de86a98286ce40597f77071926c91c19f1cfe7
|
8b632de61821174ff816dd458c6e3550b4449f13
|
/50.py
|
815049058042f4827f15919d8eabcb5d8b0146c2
|
[] |
no_license
|
nzhoucsu/leetcode
|
ed6e99c88c61628b98099c4f25b97db3cfe15287
|
3f16ebe9da5c68b5748e17c6eefb735dadae0f1f
|
refs/heads/master
| 2020-04-17T08:59:02.068772
| 2019-01-26T17:31:29
| 2019-01-26T17:31:29
| 166,437,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
if x == 0:
return 0.0
if n == 0:
return 1.0
if n < 0:
x = 1.0/x
n = -n
half = self.myPow(x, n//2)
if n%2:
return half*half*x
else:
return half*half
|
[
"nzhoucsu@gmail.com"
] |
nzhoucsu@gmail.com
|
c08c441e6197b6499cf91b96336b88b38c3942e9
|
4cd6edef8553c6d49417b437e9ea984538125150
|
/SModelWrap.py
|
693bc4c6f12815a269ca985ae4ff1db5071bff7d
|
[
"WTFPL"
] |
permissive
|
SrzStephen/ModelWrap
|
280e56e770e52e5dc9106c54c912785191702def
|
b80bced09e5d2ae68193d46a84810cd46a907c79
|
refs/heads/main
| 2023-02-27T01:27:38.092074
| 2021-02-06T17:41:29
| 2021-02-06T17:41:29
| 336,590,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
from pandas import DataFrame
from numpy import ndarray
import pandas as pd
class ModelPerClass:
"""
Wrapper around a generic Model (SciKitLearn or otherwise).
Goal is for you to provide a dict of trained models and the relevent key,
Does not implement predict_proba (because array sizes are going to be different sizes so this won't work with
stacking.
"""
def __init__(self, model_dict: dict, column_key: str):
"""
:param model_dict:
:type model_dict: Any trained model that has a predict method
:param column_key: key of the column to get this key from
:type column_key: str
"""
self.model_dict = model_dict
self.column_key = column_key
self._data = DataFrame
def predict(self, x_val: DataFrame) -> ndarray:
"""
:param x_val: X values to be predicted
:type x_val: Pandas DataFrame
:return: array of predictions
:rtype: ndarray
"""
return_df = None
if not isinstance(x_val, DataFrame):
raise ValueError(f"Provided values were of type {type(x_val)}, expected DataFrame")
x_val.reset_index(drop=True, inplace=True)
for key, model in self.model_dict.items():
data_to_use = x_val.loc[x_val[self.column_key] == key].drop(self.column_key, axis=1)
indexes = data_to_use.index.tolist()
predictions = DataFrame(index=indexes, data=model.predict(data_to_use))
if return_df is None:
return_df = predictions
else:
return_df = pd.concat([return_df, predictions])
return return_df.sort_index(ascending=False, inplace=False).to_numpy()
|
[
"stephen@motts.id.au"
] |
stephen@motts.id.au
|
39fbc6be8eec773a0933d226632a5c61e7b7b62a
|
49f5a9c505eb850aca073eaaa485b763755c6349
|
/gegalkin.py
|
5280d1e5d738728f1468f5723bd86fce12bf4987
|
[] |
no_license
|
readiv/bonwin_proxmark
|
34efad9a2388aeaa12e5161f8419fab570335da1
|
945614e70dba207dfb34b49a6c275278a6ab6137
|
refs/heads/master
| 2021-01-08T10:39:19.834319
| 2020-03-04T23:04:07
| 2020-03-04T23:04:07
| 242,005,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,542
|
py
|
# -*- coding: utf-8 -*-
name_file = "uin_key_ff.txt"
# 0000003c e3fcf9dfe7f4
n_byte_uid = 0 # Номер стартового байта. Нумерация 0,2,4,6
n_byte_key = n_byte_uid # Номер стартового байта. Нумерация с 0
key_00000000 = "00 00 00 00\tdf fc f9 df df cc"
def get_bit(uid_key: str, i_mask_bit: int = 0 ): # Возвращает номер байта и бит
""" На входе строка вида 00 00 00 00\tdf fc f9 df df cc
На выходе часть uid вырезаная в зависимости от n_byte_uid
и приведенная к int.
так же на выходе бит вырезаный из key в зависимости от
n_byte_key и i_mask_bit
"""
uid = uid_key.replace(' ', '').split("\t")
key = 0
if int(uid[1][n_byte_key: n_byte_key + 2], 16) & (1 << i_mask_bit):
key = 1
uid = int(uid[0][n_byte_uid: n_byte_uid + 2], 16)
return uid, key
def get_subscript_unicode(n):
codes = {
0 : u"\u2080",
1 : u"\u2081",
2 : u"\u2082",
3 : u"\u2083",
4 : u"\u2084",
5 : u"\u2085",
6 : u"\u2086",
7 : u"\u2087",
8 : u"\u2088",
9 : u"\u2089"
}
return codes[n]
def get_str_of_bit(x: int): # Возвращает текстовую строку множителей по байту. То есть элементы разложения Жигалкина
""" На входе байт x
На выходе строка вида b7*b5*b0
"""
strbit = ""
if x:
for i in range(8):
if x & (1 << i):
strbit = u"x" + get_subscript_unicode(i) + strbit
else:
strbit = "1"
# if strbit[0:3] == " * ":
# strbit = strbit[3:]
return strbit
def get_str_of_bit_2(x: int): # Возвращает текстовую строку множителей по байту. То есть элементы разложения Жигалкина
""" На входе байт x
На выходе строка вида 01000110
"""
strbit = f" - {x}"
for i in range(8):
if x & (1 << i):
strbit = "1" + strbit
else:
strbit = "0" + strbit
return strbit
if __name__ == '__main__':
# print(get_str_of_bit(255))
for i_mask_bit in range(8):
pif = []
for i in range(256, 0, -1):
pif.append([0] * i)
# print(pif[0])
# print("\n")
with open(name_file, 'r') as f:
for line in f:
uid, key = get_bit(line, i_mask_bit)
if uid:
pif[0][uid] = key
uid, key = get_bit(key_00000000, i_mask_bit)
pif[0][uid] = key
#Считаем треугольник пифагора
for row in range(1, 256):
for i in range(256 - row):
if pif[row - 1][i] != pif[row - 1][i + 1]:
pif[row][i] = 1
sy = u"y" + get_subscript_unicode(i_mask_bit) + u" ="
for row in range(256):
if pif[row][0]:
if sy[-1] == "=":
sy += " " + get_str_of_bit(row)
else:
sy += u"\u2295" + get_str_of_bit(row)
print("==============================================================")
print(sy)
with open('byte02.txt', 'a', encoding='utf-16') as f:
f.write(sy + '\n\n')
|
[
"hd3200@gmail.com"
] |
hd3200@gmail.com
|
5832d395d65684014916c925fcfebfdd07368442
|
e34110b784f32e34c3fe155d0555fca1ed1f2ad7
|
/person.py
|
771b6f0a4058a147da15774db1023151941e8756
|
[] |
no_license
|
Bjornwolf/Coviz
|
8de221ddbfd23ae14ee6ca38769cb6af48a4af7c
|
130381c8e723ce0800aa4e1004124ddd0dd9b8d0
|
refs/heads/master
| 2021-03-15T10:00:01.209688
| 2020-03-12T15:17:47
| 2020-03-12T15:17:47
| 246,841,719
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
class Person(object):
def __init__(self, state, sociability, infection_time):
self.state = state
self.sociability = sociability
self.infection_time = infection_time
def infect(self):
if self.state == 'healthy':
self.state = 'infected'
def tick(self):
if self.state == 'infected':
self.infection_time -= 1
if self.infection_time == 0:
self.state = 'cured'
def get_state_letter(self):
return self.state[0]
|
[
"noreply@github.com"
] |
Bjornwolf.noreply@github.com
|
ac8e35ab63bda973ae6333380d19b45aa339273e
|
7e0d3d55abd268f86c3ab355e9e6808f2f845221
|
/TianJin_Spidernew.py
|
84bf89edb6843bdeeb88d1aedd9ff5ce48831388
|
[] |
no_license
|
BennyKuya/MySQLtest
|
e9d781928837b4426f4c2a1add6e061eb6a6ad52
|
3a00257b2aa10971271117d7b1ff31ce259c642e
|
refs/heads/master
| 2021-02-06T09:22:13.928099
| 2020-07-26T10:41:59
| 2020-07-26T10:41:59
| 243,901,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,804
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 29 05:32:06 2020
@author: Administrator
"""
import pymysql
import requests
import time
import re
import datetime
import random
def agent():
user_agent_list = [
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1; 125LA; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022)',
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
"(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
return random.choice(user_agent_list)
def areaPanBie(titleastr):
qulist = ["和平区","河东区","河西区","南开区","河北区","红桥区","滨海新区","东丽区","西青区","津南区","北辰区","武清区","宝坻区","宁河区","静海区","蓟州区"]
area = '区级'
for qutoken in qulist:
if qutoken in titleastr:
area = qutoken
break
else:
continue
return area
class Tianjin_caigouSpider(object):
def __init__(self):
self.connect = pymysql.connect(host='139.129.246.77',user='ceshi',password='ceshi@123',db='hegang2',charset='utf8')
self.cursor = self.connect.cursor()
self.tenderingtypelist = ['公开招标公告','竞争性磋商公告','竞争性谈判公告']
def date_time(self):
str_now = datetime.datetime.now().strftime('%b-%d-%Y %H:%M:%S')
now_time = datetime.datetime.strptime(str_now, '%b-%d-%Y %H:%M:%S')
return now_time
def GetHtml(self,url):
headers = {'User-Agent':agent()}
response = requests.get(url,headers=headers)
response.encoding = 'utf8'
return response.text
def ParseWeb(self,idcode,areais):
pageNo = 1
while True:
link = 'http://www.ccgp-tianjin.gov.cn/portal/topicView.do'
headers = {'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Content-Length': '69',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': 'JSESSIONID=hQSGXaHCGLtQ3akisHWk-YpBUjZdmjqNtwA9obnhgq4pbTVwPoeF!2132241801; insert_cookie=19021653',
'Host': 'www.ccgp-tianjin.gov.cn',
'Origin': 'http://www.ccgp-tianjin.gov.cn',
'Referer': 'http://www.ccgp-tianjin.gov.cn/portal/topicView.do?method=view&view=Infor&id=1665&ver=2&st=1&stmp=1595687695359',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'}
datashi = {
'method': 'view',
'page': pageNo,
'id': idcode,
'step': 1,
'view': 'Infor',
'st': 1,
'ldateQGE': '',
'ldateQLE': ''}
dataqu = {'method': 'view',
'page': pageNo,
'id': idcode,
'step': 1,
'view': 'Infor',
'ldateQGE': '',
'ldateQLE': ''}
if areais == '市级':
data = datashi
else:
data = dataqu
data = requests.post(link,data,headers=headers)
data.encoding = 'utf8'
source = data.text
if 'title="' not in source:
print('无源码')
break
linkTitleTimes = re.findall(re.compile('<li><b>.*?id=(\d+)&ver.*?title="(.*?)".*?class="time">(.*?)<',re.S),source)
exp_flag = False
for token in linkTitleTimes:
try:
url = 'http://www.ccgp-tianjin.gov.cn/portal/documentView.do?method=view&id={}&ver=2'.format(token[0])
title = token[1]
publicTime = datetime.datetime.strptime(token[2],'%Y-%m-%d')
if areais == '市级':
area = '天津市'
if areais == '区级':
area = areaPanBie(title)
webhtml = self.GetHtml(url)
content = webhtml.replace('\'','')
tenderingtype = '采购公告'
for tendertype in self.tenderingtypelist:
if tendertype in title:
tenderingtype = tendertype
break
else:
continue
sql = "insert into zhaobiao_tb(`tenderingtype`,`area`,`url`,`title`,`content`,`announcement_time`,`collect_time`,`collectid`,`collect_source`) values('%s','%s','%s','%s','%s','%s','%s','%s','%s')" %(tenderingtype,area,url,title,content,publicTime,self.date_time(),55,'天津政府采购网')
self.cursor.execute(sql)
self.connect.commit()
print('===={} {}《{}》入库成功!!===='.format('采购公告',publicTime,title))
print('\n')
time.sleep(2)
except Exception as e:
if "Duplicate entry" in str(e):
print('=============数据重复入库,程序提前终止.......================')
print('\n')
exp_flag = True
break
if exp_flag is True:
break
pageNo += 1
time.sleep(2)
def main(self):
for idcode,areais in zip(['1665','1664'],['市级','区级']):
self.ParseWeb(idcode,areais)
time.sleep(2)
class Tianjin_gengzhengSpider(object):
def __init__(self):
self.connect = pymysql.connect(host='139.129.246.77',user='ceshi',password='ceshi@123',db='hegang2',charset='utf8')
self.cursor = self.connect.cursor()
def date_time(self):
str_now = datetime.datetime.now().strftime('%b-%d-%Y %H:%M:%S')
now_time = datetime.datetime.strptime(str_now, '%b-%d-%Y %H:%M:%S')
return now_time
def GetHtml(self,url):
headers = {'User-Agent':agent()}
response = requests.get(url,headers=headers)
response.encoding = 'utf8'
return response.text
def ParseWeb(self,idcode,areais):
pageNo = 1
while True:
link = 'http://www.ccgp-tianjin.gov.cn/portal/topicView.do'
headers = {'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Content-Length': '69',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': 'JSESSIONID=hQSGXaHCGLtQ3akisHWk-YpBUjZdmjqNtwA9obnhgq4pbTVwPoeF!2132241801; insert_cookie=19021653',
'Host': 'www.ccgp-tianjin.gov.cn',
'Origin': 'http://www.ccgp-tianjin.gov.cn',
'Referer': 'http://www.ccgp-tianjin.gov.cn/portal/topicView.do?method=view&view=Infor&id=1665&ver=2&st=1&stmp=1595687695359',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'}
datashi = {
'method': 'view',
'page': pageNo,
'id': idcode,
'step': 1,
'view': 'Infor',
'st': 1,
'ldateQGE': '',
'ldateQLE': '' }
dataqu = {'method': 'view',
'page': pageNo,
'id': idcode,
'step': 1,
'view': 'Infor',
'ldateQGE': '',
'ldateQLE': ''}
if areais == '市级':
data = datashi
else:
data = dataqu
data = requests.post(link,data,headers=headers)
data.encoding = 'utf8'
source = data.text
if 'title="' not in source:
print('无源码')
break
linkTitleTimes = ''.join(re.findall(re.compile('<li><b>·</b>.*?id=(.*?)&ver=2.*?title="()">.*?class="time">(.*?)</span>',re.S),source))
exp_flag = False
for token in linkTitleTimes:
try:
url = 'http://www.ccgp-tianjin.gov.cn/portal/documentView.do?method=view&id={}&ver=2'.format(token[0])
title = token[1]
publicTime = datetime.datetime.strptime(token[2],'%Y-%m-%d')
if areais == '市级':
area = '天津市'
if areais == '区级':
area = areaPanBie(title)
webhtml = self.GetHtml(url)
first_announcement_timestr = ''.join(re.findall(re.compile('>首次公告日期:(.*?)</div>',re.S),webhtml))
first_announcement_time = datetime.datetime.strptime(first_announcement_timestr,'%Y-%m-%d')
content = webhtml.replace('\'','')
tenderingtype = '更正公告'
sql = "insert into zhaobiao_tb(`tenderingtype`,`area`,`url`,`title`,`content`,`announcement_time`,`first_announcement_time`,`collect_time`,`collectid`,`collect_source`) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" %(tenderingtype,area,url,title,content,publicTime,first_announcement_time,self.date_time(),55,'天津政府采购网')
self.cursor.execute(sql)
self.connect.commit()
print('===={} {}《{}》入库成功!!===='.format('更正公告',publicTime,title))
print('\n')
time.sleep(2)
except Exception as e:
if "Duplicate entry" in str(e):
print('=============数据重复入库,程序提前终止.......================')
print('\n')
exp_flag = True
break
if exp_flag is True:
break
pageNo += 1
time.sleep(2)
def main(self):
for idcode,areais in zip(['1663','1666'],['市级','区级']):
self.ParseWeb(idcode,areais)
time.sleep(2)
class Tianjin_jieguoSpider(object):
def __init__(self):
self.connect = pymysql.connect(host='139.129.246.77',user='ceshi',password='ceshi@123',db='hegang2',charset='utf8')
self.cursor = self.connect.cursor()
self.tenderingtypelist = ['成交公告','中标公告','终止公告']
def date_time(self):
str_now = datetime.datetime.now().strftime('%b-%d-%Y %H:%M:%S')
now_time = datetime.datetime.strptime(str_now, '%b-%d-%Y %H:%M:%S')
return now_time
def GetHtml(self,url):
headers = {'User-Agent':agent()}
response = requests.get(url,headers=headers)
response.encoding = 'utf8'
return response.text
def ParseWeb(self,idcode,areais):
pageNo = 1
while True:
link = 'http://www.ccgp-tianjin.gov.cn/portal/topicView.do'
headers = {'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Content-Length': '69',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': 'JSESSIONID=hQSGXaHCGLtQ3akisHWk-YpBUjZdmjqNtwA9obnhgq4pbTVwPoeF!2132241801; insert_cookie=19021653',
'Host': 'www.ccgp-tianjin.gov.cn',
'Origin': 'http://www.ccgp-tianjin.gov.cn',
'Referer': 'http://www.ccgp-tianjin.gov.cn/portal/topicView.do?method=view&view=Infor&id=1665&ver=2&st=1&stmp=1595687695359',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'}
datashi = {
'method': 'view',
'page': pageNo,
'id': idcode,
'step': 1,
'view': 'Infor',
'st': 1,
'ldateQGE': '',
'ldateQLE': ''}
dataqu = {'method': 'view',
'page': pageNo,
'id': idcode,
'step': 1,
'view': 'Infor',
'ldateQGE': '',
'ldateQLE': ''}
if areais == '市级':
data = datashi
else:
data = dataqu
data = requests.post(link,data,headers=headers)
data.encoding = 'utf8'
source = data.text
if 'title="' not in source:
print('无源码')
break
linkTitleTimes = ''.join(re.findall(re.compile('<li><b>·</b>.*?id=(.*?)&ver=2.*?title="()">.*?class="time">(.*?)</span>',re.S),source))
exp_flag = False
for token in linkTitleTimes:
try:
url = 'http://www.ccgp-tianjin.gov.cn/portal/documentView.do?method=view&id={}&ver=2'.format(token[0])
title = token[1]
publicTime = datetime.datetime.strptime(token[2],'%Y-%m-%d')
tenderingtype = '采购结果公告'
for tendertype in self.tenderingtypelist:
if tendertype in title:
tenderingtype = tendertype
break
else:
continue
if areais == '市级':
area = '天津市'
if areais == '区级':
area = areaPanBie(title)
webhtml = self.GetHtml(url)
content = webhtml.replace('\'','')
sql = "insert into zhaobiao_tb(`tenderingtype`,`area`,`url`,`title`,`content`,`announcement_time`,`collect_time`,`collectid`,`collect_source`) values('%s','%s','%s','%s','%s','%s','%s','%s','%s')" %(tenderingtype,area,url,title,content,publicTime,self.date_time(),55,'天津政府采购网')
self.cursor.execute(sql)
self.connect.commit()
print('===={} {}《{}》入库成功!!===='.format('采购结果公告',publicTime,title))
print('\n')
time.sleep(5)
except Exception as e:
if "Duplicate entry" in str(e):
print('=============数据重复入库,程序提前终止.......================')
print('\n')
exp_flag = True
break
if exp_flag is True:
break
pageNo += 1
time.sleep(5)
def main(self):
for idcode,areais in zip(['2014','2013'],['市级','区级']):
self.ParseWeb(idcode,areais)
time.sleep(2)
if __name__ == '__main__':
while True:
caigou = Tianjin_caigouSpider()
caigou.main()
time.sleep(3)
gengzheng = Tianjin_gengzhengSpider()
gengzheng.main()
time.sleep(3)
jieguo = Tianjin_jieguoSpider()
jieguo.main()
print('@@@@@@@@@@@@@@@@@@睡眠中,下次更新时间为{}@@@@@@@@@@@@@@@'.format((datetime.datetime.now()+datetime.timedelta(hours=24)).strftime("%Y-%m-%d %H:%M:%S")))
time.sleep(24*60*60)
|
[
"noreply@github.com"
] |
BennyKuya.noreply@github.com
|
e404d6b0a29db1667fdbbef3adcc9529b3219b24
|
da934e0010380fdc6894063540f61b0ebc2c9ded
|
/vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-complex-raise.py
|
8005e45b917f772f7ca37f7ff1a08b382680adb5
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
bopopescu/cc-2
|
ed4f1dfe3c98f476ff619058d99855a16272d36b
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
refs/heads/master
| 2022-11-23T03:57:12.255817
| 2014-10-02T06:10:46
| 2014-10-02T06:10:46
| 282,512,589
| 0
| 0
|
Apache-2.0
| 2020-07-25T19:36:05
| 2020-07-25T19:36:05
| null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
class MyExc(Exception):
"A sample exception."
try:
x = 1 + 3
raise MyExc("I can't go on!")
x = x + 1
print x
except MyExc, me:
print 'error (',me,'). x was:', x
except:
print 'fatal error! abort!'
|
[
"anotherjesse@gmail.com"
] |
anotherjesse@gmail.com
|
64a9868fbebd5e6a96bc4c844558394735670e23
|
b8849355e1b4adabc6054477ec087745fd979c6d
|
/tcpclient.py
|
b8727944091aa79c05c8b6c583378f5fdb8d2673
|
[] |
no_license
|
LittleBuster/DokuMail
|
85a4a558f1a4b4fa2b6eedba48d6e466f709b8df
|
574d4ebf3b2ea77b9b874c640bf463ab20533974
|
refs/heads/master
| 2021-01-24T06:07:23.082725
| 2015-05-08T13:52:12
| 2015-05-08T13:52:12
| 21,331,417
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,427
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import shutil
import socket
import json
import hashlib
import platform
from crypt import *
from configs import Configs, PacketHeader
from compress import *
from keys import AppKeys
from logger import Log
from PyQt4 import QtCore
from paths import AppPath
class TcpClient(QtCore.QObject):
"""
Class for low-level communication with a socket
"""
downloadStart = QtCore.pyqtSignal(str)
decryptStart = QtCore.pyqtSignal()
decompressStart = QtCore.pyqtSignal()
downloadComplete = QtCore.pyqtSignal()
fileDownloaded = QtCore.pyqtSignal(str)
fileCount = QtCore.pyqtSignal(int)
def __init__(self):
super(TcpClient, self).__init__()
self.app_path = AppPath().main()
self.cfg = Configs()
self.a_key = AES256_cert_read("".join((self.app_path, "transf.crt")))
self.r_key = AES256_cert_read("".join((self.app_path, "retrieve.crt")))
self.header = PacketHeader().header()
def connect(self, ip, port, user, pwd):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect((ip, int(port)))
except:
Log().local("TCP Client: error connection to server: " + ip)
return False
h = hashlib.sha512()
h.update(pwd.encode('utf-8'))
h_passwd = h.hexdigest().upper()
"""
Send creditionals login and password hash to server
"""
cred = {"header": self.header, "type": "login", "user": user, "passwd": h_passwd}
self.sock.send(AES256_encode_msg(json.dumps(cred), self.r_key ))
answ = AES256_decode_msg(self.sock.recv(1024), self.r_key )
answ = json.loads(answ)
if not answ["header"] == self.header:
return
if answ["answ"] == 'login-ok':
print("Login ok")
return True
else:
print("login fail")
Log().local("TCP Server: Login fail")
return False
def check_status(self, ip, port):
"""
Check server status: offline/online
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect((ip, int(port)))
self.sock.close()
return True
except:
return False
def create_news(self, news_header):
data = {"header": self.header, "type": "news", "action": "create", "news-header": news_header}
data = AES256_encode_msg( json.dumps(data), self.r_key )
self.sock.send( data )
def delete_news(self, news_header):
data = {"header": self.header, "type": "news", "action": "delete", "news-header": news_header}
data = AES256_encode_msg( json.dumps(data), self.r_key )
self.sock.send( data )
def send_message(self, toUsers, message):
if len(message.encode("utf-8")) > 3050:
return "[FAIL-LEN]"
try:
data = {"header": self.header, "type": "send-message", "ToUsers": toUsers}
data = json.dumps(data)
self.sock.send( AES256_encode_msg(data, self.r_key) )
answ = AES256_decode_msg( self.sock.recv(1024), self.r_key )
answ = json.loads( answ )
if not answ["header"] == self.header:
return "[FAIL]"
if answ["answ"] == "fail-access":
return "[FAIL-ACCESS]"
data = {"header": self.header, "message": message}
self.sock.send(AES256_encode_msg(json.dumps(data), self.a_key))
answ = AES256_decode_msg(self.sock.recv(1024), self.r_key)
answ = json.loads(answ)
if answ["answ"] == "send-msg-ok":
return "[SEND-MSG-OK]"
else:
return "[FAIL]"
except:
Log().local("TCP Server: Fail sending message")
return "[FAIL]"
def get_messages(self):
"""
Get from server 1 last message
"""
msg = {}
data = {"header": self.header, "type": "get-message"}
data = json.dumps(data)
self.sock.send( AES256_encode_msg(data, self.r_key) )
answ = AES256_decode_msg(self.sock.recv(1024), self.r_key)
answ = json.loads(answ)
if not answ["header"] == self.header:
msg["Time"] = "-"
msg["FromUser"] = "Client"
msg["Data"] = "Сервер обмена не совместим с вашим протоколом."
Log().local("Error header")
return msg
if answ["answ"] == 'empty-msg':
self.sock.close()
print("No new messages")
return "[EMPTY-MSG]"
self.sock.send(b"ok")
data = self.sock.recv(4096)
msg["FromUser"] = answ["From"]
msg["Time"] = answ["Time"]
try:
data = AES256_decode_msg(data, self.a_key)
data = json.loads(data)
if not data["header"] == self.header:
msg["Time"] = "-"
msg["FromUser"] = "Client"
msg["Data"] = "Сервер обмена не совместим с вашим протоколом."
Log().local("Error header")
return msg
msg["Data"] = data["message"]
except:
Log().local("Error reading message file")
msg["Data"] = "Ошибка декодирования сообщения"
return msg
def begin_send_files(self, toUser):
"""
Init send files process
"""
data = {"header": self.header, "type": "send-files", "ToUser": toUser}
self.sock.send(AES256_encode_msg(json.dumps(data), self.r_key))
print(self.sock.recv(1024))
def send_file(self, fname):
"""
Send single file to server
"""
lsf = fname.split("/")
l = len(lsf)
data = {"header": self.header, "type": "sf", "filename": lsf[l - 1]}
data = json.dumps(data)
self.sock.send( AES256_encode_msg(data, self.r_key ))
print(self.sock.recv(1024))
f = open(fname + ".bin", "rb")
while True:
data = f.read(4096)
if len(data) != 0:
self.sock.send(data)
else:
break
f.close()
self.sock.send(b"[end]")
print(self.sock.recv(1024))
def end_send_files(self):
"""
Stop sending files process
"""
data = {"header": self.header, "type": "end-retrieve"}
self.sock.send(AES256_encode_msg(json.dumps(data), self.r_key))
def get_files(self, update, cur_path):
"""
Get all files for client, from remote TCP server
"""
exts = []
try:
exts = self.cfg.unzip_formats()
except:
Log().local("Error reading unzip formats")
c_exts = []
try:
c_exts = self.cfg.uncrypt_formats()
except:
Log().local("Error reading uncrypt formats file")
data = {}
if update:
data = {"header": self.header, "type": "get-files", "operation": "update"}
else:
data = {"header": self.header, "type": "get-files", "operation": "download"}
self.sock.send(AES256_encode_msg(json.dumps(data), self.r_key))
if not update:
path = self.cfg.downloads_path()
if not os.path.exists(path):
os.makedirs( path )
else:
if not os.path.exists("".join((cur_path, "update/data"))):
os.makedirs("".join((cur_path, "update/data")))
answ = AES256_decode_msg(self.sock.recv(1024), self.r_key)
answ = json.loads(answ)
cnt = answ["count"]
self.sock.send(b"ok")
self.fileCount.emit(cnt)
while True:
data = AES256_decode_msg(self.sock.recv(1024), self.r_key)
data = json.loads(data)
if data["type"] == "not-files":
break
if data["type"] == "end-retrieve":
self.downloadComplete.emit()
print("All files recieved")
break
print("Start downloading...")
self.sock.send(b'recieveing...')
try:
fname = data["filename"]
except:
return
print("New file: " + fname)
self.downloadStart.emit(fname)
if update:
dest = "".join((cur_path, "update/data/"))
destf = "".join((dest, fname))
else:
dest = self.cfg.downloads_path()
destf = "".join((dest, fname, ".bin"))
"""
Check extention
"""
isDecompress = True
isCrypt = True
tmp_fname = fname.split(".")
ext = tmp_fname[len(tmp_fname) - 1].lower()
for ex in exts:
if ex == ext:
isDecompress = False
break
for ex in c_exts:
if ex == ext:
isCrypt = False
break
f = open(destf, "wb")
while True:
data = self.sock.recv(4096)
l = len(data) - 5
try:
if data[l:] == b'[end]':
print("Download complete")
f.write(data[:l])
self.sock.send("complete".encode('utf-8'))
f.close()
if not update:
self.decryptStart.emit()
if isCrypt:
print("Decrypt: " + fname)
if not AES256_decode_file( "".join((dest, fname, ".bin")), "".join((dest, fname, ".z")),
self.a_key):
Log().local("Error decrypting recieved file: " + fname)
print("error decrypting")
else:
shutil.copy2("".join((dest, fname, ".bin")), "".join((dest, fname, ".z")))
self.decompressStart.emit()
if isDecompress:
print("Decompress: " + fname)
if not zlib_decompress_file("".join((dest, fname, ".z")), "".join((dest, fname))):
Log().local("Error decompressing recieved file: " + fname)
print("error decompressing")
else:
print("".join((fname, " not compressed")))
shutil.copy2("".join((dest, fname, ".z")), "".join((dest, fname)))
self.fileDownloaded.emit(fname)
if not update:
os.remove("".join((dest, fname, ".bin")))
os.remove("".join((dest, fname, ".z")))
break
except:
Log().local("Fatal error when files recieved")
print('except')
f.write(data)
def close(self):
self.sock.close()
|
[
"Denisov172@gmail.com"
] |
Denisov172@gmail.com
|
228cab6a55c4fd93d9858d5d7d8f80e40b0862f4
|
d63cdbf6ac977348c94979530719e16601e1d8d5
|
/gorgeous/wsgi.py
|
f26b1e570e263b5f176a5f74b40bf77d39600ad5
|
[] |
no_license
|
web-developer77/Django-Clothy
|
22f27c79290c9c7fcc22ec6ad2c2def46bb28cb4
|
dcda1d0bfec628c9d8170f0583c95a9f654580ce
|
refs/heads/master
| 2023-06-10T05:11:57.146941
| 2021-07-08T11:22:00
| 2021-07-08T11:22:00
| 383,831,071
| 2
| 0
| null | 2021-07-08T11:22:01
| 2021-07-07T14:43:04
|
Python
|
UTF-8
|
Python
| false
| false
| 484
|
py
|
"""
WSGI config for gorgeous project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.staging")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[
"koa06800@gmail.com"
] |
koa06800@gmail.com
|
94aef605f02c02e7b2e6aec09478cf550791d550
|
8320f4367fb5aed478bf00bb1bb393bd716f469f
|
/hmm/hmmlearn.py
|
e839670f16e8ede5915c28bcd57fc092d12c4788
|
[
"Apache-2.0"
] |
permissive
|
sjayakum/csci544
|
1ce9af68c4a0c8e857fadccb31e5eb56fbfe9cb8
|
c452ed518fda0909836107668428791be90b82b4
|
refs/heads/master
| 2021-06-16T22:03:58.824754
| 2017-03-18T23:33:50
| 2017-03-18T23:33:50
| 80,076,270
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,348
|
py
|
f = 0
vocab = []
data_set_tags = []
data_set_words_split = []
distinct_tags = []
def learn_huristic():
pass
def load_data(file_name):
global f, vocab,data_set_tags,data_set_words_split,distinct_tags
with open(file_name, 'r') as file_pointer:
f = file_pointer.read()
lines = f.split('\n')
lines.remove('')
list_of_all_tags = ['Q0']
for each_line in lines:
list_of_words = each_line.split(' ')
temp_list_tags = ['Q0']
temp_list_words = ['']
for each_word in list_of_words:
word, tag = each_word.rsplit('/', 1)
temp_list_tags.append(tag)
temp_list_words.append(str(word))
list_of_all_tags.append(tag)
vocab.append(word)
data_set_tags.append(temp_list_tags)
data_set_words_split.append(temp_list_words)
vocab = set(vocab)
distinct_tags = set(list_of_all_tags)
from collections import defaultdict
import copy
import math
transition_matrix = defaultdict(dict)
emission_matrix = defaultdict(dict)
def build_transition():
global transition_matrix
temp_dict = {}
for each_tag in distinct_tags:
temp_dict[each_tag] = 0
for each_tag in distinct_tags:
transition_matrix[each_tag] = copy.deepcopy(temp_dict)
for k in range(len(data_set_tags)):
for j in range(1,len(data_set_tags[k])):
transition_matrix[data_set_tags[k][j-1]][data_set_tags[k][j]] +=1
for each_main_key in transition_matrix:
for each_sub_key in transition_matrix[each_main_key]:
transition_matrix[each_main_key][each_sub_key] +=1
for each_main_key in transition_matrix:
norm_value = 0
for each_sub_key in transition_matrix[each_main_key]:
norm_value += transition_matrix[each_main_key][each_sub_key]
for each_sub_key in transition_matrix[each_main_key]:
transition_matrix[each_main_key][each_sub_key] /= float(norm_value)
def build_emission():
global emission_matrix
for each_tag in distinct_tags:
for each_word in vocab:
emission_matrix[each_tag][each_word] = 0
for i in range(len(data_set_tags)):
set_tags = data_set_tags[i]
set_sentence_split = data_set_words_split[i]
for j in range(1, len(set_tags)):
current_word = set_sentence_split[j]
current_tag = set_tags[j]
emission_matrix[current_tag][current_word] += 1
for each_main_key in emission_matrix.keys()[1:]:
norm_constant = 0
for each_sub_key in emission_matrix[each_main_key].keys():
norm_constant += emission_matrix[each_main_key][each_sub_key]
for each_sub_key in emission_matrix[each_main_key].keys():
emission_matrix[each_main_key][each_sub_key] /= float(norm_constant)
def write_model():
f_new = open('hmmmodel.txt','w')
hmmmodel = {}
hmmmodel['transition'] = transition_matrix
hmmmodel['emission'] = emission_matrix
import json
json_obj = json.dumps(hmmmodel)
f_new.write(json_obj)
f_new.close()
if __name__ == '__main__':
import sys
file_name = sys.argv[1]
load_data(file_name)
build_transition()
build_emission()
write_model()
|
[
"suraj.jayakumar@gmail.com"
] |
suraj.jayakumar@gmail.com
|
a7f6231ec810cb96698d9d0fcdff8cd39bbea50f
|
4038c0d075b203c353a2c68eccfbf5666d9cb635
|
/q-3-1.py
|
e261080d9fd0e80cd2fb8d1b0162a204fedefd3d
|
[] |
no_license
|
dhawal777/LogisticKmeansHirerchical
|
fd2c94246bf8edc5417a8b30b637343bb4af1d13
|
a562ef7494b0762e88b5c52effb5ddb96f79491e
|
refs/heads/master
| 2021-10-19T03:20:21.112146
| 2019-02-17T06:47:05
| 2019-02-17T06:47:05
| 171,090,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,890
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[14]:
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
import sklearn as sk
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.metrics import classification_report, confusion_matrix ,accuracy_score
import copy
# In[15]:
df=pd.read_csv("wine-quality/data.csv")
# In[16]:
X =df.drop(['quality'],axis=1)
y=df['quality']
X = (X - X.mean())/X.std()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
intrain=X_train
intest=X_test
# In[17]:
def grad(thetaT):
return 1/(1+np.exp(-thetaT))
# In[18]:
my_data=pd.concat([X_train,y_train],axis=1)
X=X_train
ones = np.ones([X.shape[0],1])
X = np.concatenate((ones,X),axis=1)
y=pd.DataFrame(y_train)
y=y.values
# In[19]:
def gradientDescent(X,y,theta,iters,alpha):
for i in range(iters):
theta = theta - (alpha/len(X)) * np.sum(X * (grad(X @ theta.T) - y), axis=0)
return theta
# *considering one class at a time rest all as zero and applying gradient descent to get theta for it*
# In[20]:
betaList=[]
count=[]
for j in range(0,11):
y_temp=[]
X1=copy.deepcopy(X)
r=0;
for i in range(len(y)):
if y[i]==j:
r=r+1
y_temp.append(1)
else:
y_temp.append(0)
y_temp1=pd.DataFrame(y_temp)
y_temp2=y_temp1.values
theta = np.zeros([1,12])
alpha = 0.01
iters = 1000
count.append(r)
g = gradientDescent(X1,y_temp2,theta,iters,alpha)
betaList.append(g[0])
# *Calculating y for each theta and the class whose theta value will result in maximum value(here maximum value is not its value or result value) it is probablity will be resultant class for that row*
# In[21]:
y_pred=[]
for index,row in X_test.iterrows():
max1=0
row=list(row)
class1=0
for i in range(0,11):
y1=0
for j in range(1,12):
y1=y1+betaList[i][j]*row[j-1]
y1=y1+betaList[i][0]
y1=grad(y1)
if(y1>=max1):
max1=y1
class1=i
y_pred.append(class1)
# In[22]:
print(confusion_matrix(y_test, y_pred))
print("Accuracy: ",accuracy_score(y_test,y_pred))
print((y_test==y_pred).mean())
# In[23]:
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
model = LogisticRegression(solver = 'lbfgs',multi_class='multinomial',max_iter=10000)
intrain1 = StandardScaler().fit_transform(intrain)
# In[24]:
model.fit(intrain1, y_train)
intest1=StandardScaler().fit_transform(intest)
y_pred = model.predict(intest1)
# In[25]:
count_misclassified = (y_test != y_pred).sum()
# print('Misclassified samples: {}'.format(count_misclassified))
accuracy = accuracy_score(y_test, y_pred)
print('System Accuracy : {:.2f}'.format(accuracy))
|
[
"dhawalvrr@gmail.com"
] |
dhawalvrr@gmail.com
|
32af7006ba6b6f9f0a98bb7d13fb355eadaff6ae
|
e493798de1448c155e0299c2395a4592a3140887
|
/devices/devcmd.py
|
03b46167f99345e2dc5ccb7348263e314e2aa18b
|
[] |
no_license
|
sanyaade-iot/core-zerynth-toolchain
|
c7823bb63a70aac03fcf9705ef340b102e513fd8
|
53a72334c1a2bdfc268f101401d1da6b4cb04ff1
|
refs/heads/master
| 2021-04-06T01:17:34.463758
| 2018-02-20T15:49:57
| 2018-02-20T15:49:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,519
|
py
|
"""
.. _ztc-cmd-device:
*******
Devices
*******
In the ZTC a device is a peripheral that can execute Zerynth bytecode. In order to do so a device must be prepared and customized with certain attributes.
The main attributes of a device are:
* :samp:`alias`, a unique name given by the user to the device in order to identify it in ZTC commands
* :samp:`uid`, a unique id provided by the operative system identifying the device at hardware level
* :samp:`target`, specifies what kind of virtual machine can be run by the device
* :samp:`name`, a human readable name describing the device. Automatically set by the ZTC
* :samp:`chipid`, the unique identifier of the microcontroller present on the device
* :samp:`remote_id`, the unique identifier of the device in the pool of user registered device
* :samp:`classname`, a Python class name identifying the class containing commands to configure the device
When a new device is connected, some steps must be taken in order to make it able to run Zerynth code:
1. The device must be :ref:`discovered <ztc-cmd-device-discover>`, namely its hardware parameters must be collected (:samp:`uid`).
2. Once discovered an :samp:`alias` must be :ref:`assigned <ztc-cmd-device-alias_put>`. Depending on the type of device :samp:`target` and :samp:`classname` can be assigned in the same step.
3. The device must be :ref:`registered <ztc-cmd-device-register>` in order to create virtual machines for it (:samp:`chipid` and :samp:`remote_id` are obtained in this step)
4. The device must be :ref:`virtualized <ztc-cmd-device-virtualize>, namely a suited virtual machine must be loaded on the device microcontroller
List of device commands:
* :ref:`discover <ztc-cmd-device-discover>`
* :ref:`alias put <ztc-cmd-device-alias_put>`
* :ref:`register <ztc-cmd-device-register>`
* :ref:`virtualize <ztc-cmd-device-virtualize>`
* :ref:`supported <ztc-cmd-device-supported>`
* :ref:`open <ztc-cmd-device-open>`
The list of supported devices is available :ref:`here <doc-supported-boards>`
"""
from base import *
from .discover import *
import click
import re
import base64
_dsc = None
@cli.group(help="Manage devices.")
def device():
global _dsc
_dsc = Discover()
##### DEVICE ALIAS [PUT|DEL]
@device.group(help="Manage device configuration.")
def alias():
pass
@device.command(help="Discover connected devices.")
@click.option("--loop","loop",flag_value=True, default=False,help="Set continuous discover mode.")
@click.option("--looptime",default=2000,help="Set polling delay for discover")
@click.option("--matchdb","matchdb",flag_value=True, default=False,help="Match raw device data with device db.")
def discover(loop,looptime,matchdb):
"""
.. _ztc-cmd-device-discover:
Discover
--------
Device discovery is performed by interrogating the operative system database for USB connected peripherals. Each peripheral returned by the system has at least the following "raw" attributes:
* :samp:`vid`, the USB vendor id
* :samp:`pid`, the USB product id
* :samp:`sid`, the unique identifier assigned by the operative system, used to discriminate between multiple connected devices with the same :samp:`vid:pid`
* :samp:`port`, the virtual serial port used to communicate with the device, if present
* :samp:`disk`, the mount point of the device, if present
* :samp:`uid`, a unique identifier assigned by the ZTC
* :samp:`desc`, the device description provided by the operative system (can differ between different platforms)
Raw peripheral data can be obtained by running: ::
ztc device discover
.. note:: In Linux peripheral data is obtained by calling into libudev functions. In Windows the WMI interface is used. In Mac calls to ioreg are used.
Raw peripheral data are not so useful apart from checking the effective presence of a device. To obtain more useful data the option :option:`-- matchdb` must be provided. Such option adds another step of device discovery on top of raw peripheral data that is matched against the list of supported devices and the list of already known devices.
A :option:`--matchdb` discovery returns a different set of more high level information:
* :samp:`name`, the name of the device taken from the ZTC supported device list
* :samp:`alias`, the device alias (if set)
* :samp:`target`, the device target, specifying what kind of microcontroller and pcb routing is to be expected on the device
* :samp:`uid`, the device uid, same as raw peripheral data
* :samp:`chipid`, the unique identifier of the device microcontrolloer (if known)
* :samp:`remote_id`, the unique identifier of the device in the Zerynth backend (if set)
* :samp:`classname`, the Python class in charge of managing the device
All the above information is needed to make a device usable in the ZTC. The information provided helps in distinguishing different devices with different behaviours. A device without an :samp:`alias` is a device that is not yet usable, therefore an alias must be :ref:`set <ztc-cmd-device-alias_put>`. A device without :samp:`chipid` and :samp:`remote_id` is a device that has not been :ref:`registered <ztc-cmd-device-register> yet and can not be virtualized yet.
To complicate the matter, there are additional cases that can be spotted during discovery:
1. A physical device can match multiple entries in the ZTC supported device list. This happens because often many different devices are built with the same serial USB chip and therefore they all appear as the same hardware to the operative system. Such device are called "ambiguous" because the ZTC can not discriminate their :samp:`target`. For example, both the Mikroelektronika Flip&Click development board and the Arduino Due, share the same microcontroller and the same USB to serial converter and they both appear as a raw peripheral with the same :samp:`vid:pid`. The only way for the ZTC to differentiate between them is to ask the user to set the device :samp:`target`. For ambiguous devices the :samp:`target` can be set while setting the :samp:`alias`. Once the :samp:`target` is set, the device is disambiguated and subsequent discovery will return only one device with the right :samp:`target`.
2. A physical device can appear in two or more different configurations depending on its status. For example, the Particle Photon board has two different modes: the DFU modes in which the device can be flashed (and therefore virtualized) and a "normal" mode in which the device executes the firmware (and hence the Zerynth bytecode). The device appears as a different raw peripherals in the two modes with different :samp:`vid:pid`. In such cases the two different devices will have the same :samp:`target` and, once registered, the same :samp:`chipid` and :samp:`remote_id`. They will appear to the Zerynth backend as a single device (same :samp:`remote_id`), but the ZTC device list will have two different devices with different :samp:`alias` and different :samp:`classname`. The :samp:`classname` for such devices can be set while setting the alias. In the case of the Particle Photon, the :samp:`classname` will be "PhotonDFU" for DFU mode and "Photon" for normal mode. PhotonDFU is the :samp:`alter_ego` of Photon in ZTC terminology.
3. Some development boards do not have USB circuitry and can be programmed only through a JTAG or an external usb-to-serial converter. Such devices can not be discovered. To use them, the programmer device (JTAG or usb-to-serial) must be configured by setting :samp:`alias` and :samp:`target` to the ones the development device.
Finally, the :command:`discover` command can be run in continuous mode by specifying the option :option:`--loop`. With :option:`--loop` the command keeps printing the set of discovered devices each time it changes (i.e. a new device is plugged or a connected device is unplugged). In some operative system the continuous discovery is implemented by polling the operative system device database for changes. The polling time can be set with option :option:`--looptime milliseconds`, by default it is 2000 milliseconds.
"""
try:
_dsc.run(loop,looptime,matchdb)
except Exception as e:
warning("Exception while discovering devices:",str(e))
@alias.command("put", help="assign an unique alias to a device. \n\n Arguments: \n\n UID: device uid. \n\n ALIAS: device alias. \n\n TARGET: device target.")
@click.argument("uid")
@click.argument("alias")
@click.argument("target")
@click.option("--name",default=False,help="Set device name.")
@click.option("--chipid",default="")
@click.option("--remote_id",default="")
@click.option("--classname",default="",help="Set device classname.")
def alias_put(uid,alias,name,target,chipid,remote_id,classname):
"""
.. _ztc-cmd-device-alias_put:
Device configuration
--------------------
Before usage a device must be configured. The configuration consists in linking a physical device identified by its :samp:`uid` to a logical device identified by its :samp:`alias` and :samp:`target` attributes. Additional attributes can be optionally set.
The configuration command is: ::
ztc device alias put uid alias target
where :samp:`uid` is the device hardware identifier (as reported by the discovery algorithm), :samp:`alias` is the user defined device name (no spaces allowed) and :samp:`target` is one of the supported the :ref:`supported <ztc-cmd-device-supported>` devices target. A :samp:`target` specifies what kind of microcontroller, pin routing and additional perpherals can be found on the device. For example, the :samp:`target` for NodeMCU2 development board id :samp:`nodemcu2` and informs the ZTC about the fact that the configured device is a NodeMCU2 implying an esp8266 microcontroller, a certain pin routing and an onboard FTDI controller.
There is no need to write the whole :samp:`uid` in the command, just a few initial character suffice, as the list of known uids is scanned and compared to the given partial :samp:`uid` (may fail if the given partial :samp:`uid` matches more than one uid).
Additional options can be given to set other device attributes:
* :option:`--name name` set the human readable device name to :samp:`name` (enclose in double quotes if the name contains spaces)
* :option:`--chipid chipid` used by external tools to set the device :samp:`chipid` manually
* :option:`--remote_id remote_id` used by external tools to set device :samp:`remote_id` manually
* :option:`--classname classname` used to set the device :samp:`classname` in case of ambiguity.
Aliases can be also removed from the known device list with the command: ::
ztc device alias del alias
"""
#if not re.match("^[A-Za-z0-9_:-]{4,}$",alias):
# fatal("Malformed alias")
devs = _dsc.run_one(True)
#print(devs)
uids=_dsc.matching_uids(devs, uid)
#print(uids)
if len(uids)<=0:
fatal("No devices with uid",uid)
else:
uid = uids[0]
dd = [dev for uu,dev in devs.items() if dev.uid==uid]
dd = dd[0]
if not classname and len(dd["classes"])>1:
fatal("Multiclass device! Must specify --classname option")
if not classname:
classname = dd["classes"][0].split(".")[1]
aliaskey = alias
aliases = env.get_dev(uid)
aliasuid = aliases[alias].uid if alias in aliases else None
if not _target_exists(target):
fatal("No such target",target)
###TODO to define chipid and remote_id if needed ... related option are not documented
deventry = {
"alias":alias,
"uid":uid,
"name": aliases[alias].name if not name and aliasuid!=None else "",
"target": target,
"chipid":chipid,
"remote_id":remote_id,
"classname":classname
}
env.put_dev(deventry)
@alias.command("del", help="Delete a device from the known device list. \n\n Arguments: \n\n ALIAS: The alias of the device to remove.")
@click.argument("alias")
def alias_del(alias):
env.del_dev(Var({"alias":alias}))
#TODO: remove
def _target_exists(target):
if not target: return False
for k,v in _dsc.device_cls.items():
if "target" in v and v["target"]==target:
return True
return False
@device.command(help="Register a new device. \n\n Arguments: \n\n ALIAS: device alias")
@click.argument("alias")
@click.option("--skip_burn",flag_value=True, default=False,help="bootloader is not flashed on the device (must be flashed manually!)")
def register(alias,skip_burn):
"""
.. _ztc-cmd-device-register:
Device Registration
-------------------
To obtain a virtual machine a device must be registered first. The registration process consists in flashing a registration firmware on the device, obtaining the microcontroller unique identifier and communicating it to the Zerynth backend.
The process is almost completely automated, it may simply require the user to put the device is a mode compatible with burning firmware.
Device registration is performed by issuing the command: ::
ztc device register alias
where :samp:`alias` is the device alias previously set (or just the initial part of it).
The result of a correct registration is a device with the registration firmware on it, the device :samp:`chipid` and the device :samp:`remote_id`. Such attributes are automatically added to the device entry in the known device list.
The option :option:`--skip_burn` avoid flashing the device with the registering firmware (it must be made manually!); it can be helpful in contexts where the device is not recognized correctly.
.. note:: Devices with multiple modes can be registered one at a time only!
"""
tgt = _dsc.search_for_device(alias)
if not tgt:
fatal("Can't find device",alias)
elif isinstance(tgt,list):
fatal("Ambiguous alias",[x.alias for x in tgt])
if not tgt.virtualizable:
fatal("Device is not virtualizable! Try to put it in a virtualizable mode...")
if tgt.virtualizable != tgt.classname:
fatal("Device must be put in virtualizable mode!")
# open register.vm
reg = fs.get_json(fs.path(tgt.path,"register.vm"))
info("Starting device registration")
# burn register.vm
if not skip_burn:
info("Burning bootloader...")
if isinstance(reg["bin"],str):
res,out = tgt.burn(bytearray(base64.standard_b64decode(reg["bin"])),info)
else:
res,out = tgt.burn([ base64.standard_b64decode(x) for x in reg["bin"]],info)
if not res:
fatal("Can't burn bootloader! -->",out)
else:
info("Skipping bootloader burning...")
alter_ego = None
if tgt.has_alter_ego:
alter_ego = tgt
clsname = tgt.has_alter_ego
uids,devs = _dsc.wait_for_classname(clsname)
if not uids:
fatal("Can't find this device alter ego!")
elif len(uids)>1:
fatal("Too many devices matching this device alter ego! Please unplug them all and retry...")
tgt = devs[uids[0]]
else:
# virtualizable device is the same as uplinkable device :)
# search for dev again and open serial
tgt = _dsc.find_again(tgt)
if not tgt:
fatal("Can't find device",alias)
if tgt.reset_after_register:
info("Please reset the device!")
if tgt.sw_reset_after_register is True:
tgt.reset()
conn = ConnectionInfo()
conn.set_serial(tgt.port,**tgt.connection)
ch = Channel(conn)
try:
ch.open(timeout=2)
except:
fatal("Can't open serial port!")
lines = []
for x in range(30):
line=ch.readline()
lines.append(line.strip("\n"))
ch.close()
cnt = [lines.count(x) for x in lines]
pos = cnt.index(max(cnt))
if pos>=0 and cnt[pos]>3 and len(lines[pos])>=8:
info("Found chipid:",lines[pos])
else:
fatal("Can't find chipid")
chipid=lines[pos]
# call api to register device
dinfo = {
"name": tgt.custom_name or tgt.name,
"on_chip_id": chipid,
"type": tgt.target,
"category": tgt.family_name
}
try:
res = zpost(url=env.api.devices, data=dinfo)
rj = res.json()
if rj["status"] == "success":
info("Device",tgt.custom_name or tgt.name,"registered with uid:", rj["data"]["uid"])
else:
fatal("Remote device registration failed with:", rj["message"])
except Exception as e:
critical("Error during remote registration",exc=e)
tgt = tgt.to_dict()
tgt["chipid"]=chipid
tgt["remote_id"]=rj["data"]["uid"]
env.put_dev(tgt,linked=tgt["sid"]=="no_sid")
if alter_ego:
alter_ego = alter_ego.to_dict()
alter_ego["chipid"]=chipid
alter_ego["remote_id"]=rj["data"]["uid"]
env.put_dev(alter_ego)
@device.command(help="Virtualize a device. \n\n Arguments: \n\n ALIAS: device alias. \n\n VMUID: Virtual Mahine identifier.")
@click.argument("alias")
@click.argument("vmuid")
def virtualize(alias,vmuid):
"""
.. _ztc-cmd-device-virtualize:
Virtualization
--------------
Device virtualization consists in flashing a Zerynth virtual machine on a registered device. One or more virtual machines for a device can be obtained with specific ZTC :ref:`commands <ztc-cmd-vm-create>`.
Virtualization is started by: ::
ztc device virtualize alias vmuid
where :samp:`alias` is the device alias and :samp:`vmuid` is the unique identifier of the chosen vm. :samp:`vmuid` can be typed partially, ZTC will try to match it against known identifiers. :samp:`vmuid` is obtained during virtual machine :ref:`creation <ztc-cmd-vm-create>`.
The virtualization process is automated, no user interaction is required.
"""
tgt = _dsc.search_for_device(alias)
if not tgt:
fatal("Can't find device",alias)
elif isinstance(tgt,list):
fatal("Ambiguous alias",[x.alias for x in tgt])
if tgt.virtualizable!=tgt.classname:
fatal("Device not virtualizable")
vms=tools.get_vms(tgt.target)
if vmuid not in vms:
vuids = []
for vuid in vms:
if vuid.startswith(vmuid):
vuids.append(vuid)
if len(vuids)==1:
vmuid=vuids[0]
elif len(vuids)>1:
fatal("Ambiguous VM uid",vuids)
else:
fatal("VM",vmuid,"does not exist")
vm = fs.get_json(vms[vmuid])
info("Starting Virtualization...")
if isinstance(vm["bin"],str):
res,out = tgt.burn(bytearray(base64.standard_b64decode(vm["bin"])),info)
else:
res,out = tgt.burn([ base64.standard_b64decode(x) for x in vm["bin"]],info)
if not res:
fatal("Error in virtualization",out)
else:
info("Virtualization Ok")
@device.command(help="Open device serial. \n\n Arguments: \n\n ALIAS: device alias.")
@click.argument("alias")
@click.option("--echo","__echo",flag_value=True, default=False,help="print typed characters to stdin")
@click.option("--baud","__baud", default=0,type=int,help="open with a specific baudrate")
def open(alias,__echo,__baud):
"""
.. _ztc-cmd-device-open:
Serial Console
--------------
Each virtual machine provides a default serial port where the output of the program is printed. Such port can be opened in full duplex mode allowing bidirectional communication between the device and the terminal.
The command: ::
ztc device open alias
tries to open the default serial port with the correct parameters for the device. Output from the device is printed to stdout while stdin is redirected to the serial port. Adding the option :option:`--echo` to the command echoes back the characters from stdin to stdout.
"""
tgt = _dsc.search_for_device(alias)
if not tgt:
fatal("Can't find device",alias)
elif isinstance(tgt,list):
fatal("Ambiguous alias",[x.alias for x in tgt])
conn = ConnectionInfo()
if __baud:
tgt.connection["baudrate"]=__baud
conn.set_serial(tgt.port,**tgt.connection)
ch = Channel(conn,__echo)
ch.open()
ch.run()
# import serial
# ser = serial.Serial(tgt.port,115200)
# while True:
# data = ser.read()
# log(data.decode("ascii","replace"),sep="",end="")
# #print(,sep="",end="")
@device.command(help="List of supported devices.")
@click.option("--type",default="board",type=click.Choice(["board","jtag","usbtoserial"]),help="type of device [board, jtag,usbtoserial]")
def supported(type):
"""
.. _ztc-cmd-device-supported:
Supported Devices
-----------------
Different versions of the ZTC may have a different set of supported devices. To find the device supported by the current installation type: ::
ztc device supported
and a table of :samp:`target` names and paths to device support packages will be printed.
Supported devices can be filtered by type with the :option:`--type type` option where :samp:`type` can be one of:
* :samp:`board` for development boards
* :samp:`jtag` for JTAG tools
* :samp:`usbtoserial` for USB to Serial converters
"""
table = []
for k,v in _dsc.device_cls.items():
if v["type"]==type:
if env.human:
table.append([v["target"],v["path"]])
else:
log_json({
"target":v["target"],
"path":v["path"]
})
if env.human:
log_table(table,headers=["Target","Path"])
|
[
"g.baldi@zerynth.com"
] |
g.baldi@zerynth.com
|
b08ed496f35ec6171b123e2018f270299ebd55fd
|
4eb1f72e04ed31fef5bbf2623ad1a2ecd03aa0ad
|
/migrations/versions/1530db5e0641_.py
|
1005050fcf7bf13410793d7540cafc7ecdd58c7d
|
[] |
no_license
|
tiennt-no1/flask_lab
|
8f73515fee0d08a8e94aff83a9af003fb1c9d8a6
|
8775b32194ace24637ec1454f72eceeec88e2ffe
|
refs/heads/master
| 2023-03-09T01:18:02.547261
| 2021-03-03T17:40:05
| 2021-03-03T17:40:05
| 342,493,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
"""empty message
Revision ID: 1530db5e0641
Revises: 3b0df184a708
Create Date: 2021-03-03 23:15:24.214261
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1530db5e0641'
down_revision = '3b0df184a708'
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
|
[
"tiennt.june.20@gmail.com"
] |
tiennt.june.20@gmail.com
|
2e11929c3c04682e85b99c70e0c443440673a539
|
057149a24c55be96e02ae76ab4e59fc916f720a3
|
/backend/today.py
|
e0ef8edec772776006a22b05ca56d66356c65261
|
[] |
no_license
|
nikkird/personal-desktop-voice-assistant
|
f56cd9a80b2aaec744b63fe44a014120ce2aa1a1
|
8d3d010404e47ac0fb91a56f4b084d8a096cb14e
|
refs/heads/main
| 2023-08-13T21:40:40.691335
| 2021-10-05T13:22:32
| 2021-10-05T13:22:32
| 401,955,102
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
import random
from datetime import datetime
import holidays
from backend.tts import Speak
def call_time():
strTime = datetime.now().strftime("%H:%M:%S")
Speak(strTime)
def call_day():
strTime = datetime.now().strftime("%A")
print("in today.py:" + strTime)
Speak(strTime)
def date_today():
strTime = datetime.now().strftime("%B %d %Y")
Speak(strTime)
def call_month():
strTime = datetime.now().strftime("%B")
Speak(strTime)
is_not_holiday = ["Today is not a holiday", "There is no holiday today", "There is no occasion today"]
def is_holiday():
# getting India holidays
india_holidays = holidays.India()
date = datetime.now().strftime("%d-%m-%Y")
if date in india_holidays:
Speak(f"today is {india_holidays.get(date)}")
else:
Speak(random.choice(is_not_holiday))
|
[
"noreply@github.com"
] |
nikkird.noreply@github.com
|
20f6fd6a8ac0a73f028f205121af5d9d93420d81
|
ecab535afdf192df992e1148b2b5230e5385d36b
|
/website/bookshelf/urls.py
|
fec00c0b48ec7dd7f67a33a2acc66720ea14146d
|
[] |
no_license
|
Shalmali1271/Bookshelf
|
30caf635da3656d8bb8360aa43a161b4662be502
|
d0faed136fc0c99aae5bbe41c73253efe66c892c
|
refs/heads/main
| 2023-06-25T07:15:02.945947
| 2021-07-11T07:18:53
| 2021-07-11T07:18:53
| 384,760,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
from django.http import request
from django.urls import path
from .views import CreateBook, BookList, BookDetailview
from django.conf.urls import url
from . import views
urlpatterns = [
path('create-book/', CreateBook.as_view(), name = "create-book"),
path('list-book/', BookList.as_view(), name = "list-book"),
path('detail-book/<int:pk>/', BookDetailview.as_view(), name = "detail-book"),
]
|
[
"59409625+Shalmali1271@users.noreply.github.com"
] |
59409625+Shalmali1271@users.noreply.github.com
|
78958c12ca78facf308e9b73852c1197af318fac
|
33fcda8821796595dc510f7157664db5b30c4310
|
/vim/vimfiles/plugged/jupyter-vim/pythonx/jupyter_vim.py
|
7177500b38a75769330b48d0ef5505b511f42fa2
|
[
"MIT"
] |
permissive
|
ChrisGVE/win_config
|
d41f38be5042917583f9f299c199b4af90531fb6
|
f2b3afec14a52b1cfded42c8385fdafc5e12cb8c
|
refs/heads/main
| 2023-03-04T17:12:35.993949
| 2021-02-15T14:24:18
| 2021-02-15T14:24:18
| 338,777,353
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,217
|
py
|
##############################################################################
# File: pythonx/jupyter_vim.py
# Created: 07/28/11 22:14:58
# Author: Paul Ivanov (http://pirsquared.org)
# Updated: [11/13/2017] Marijn van Vliet
# Updated: [02/14/2018, 12:31] Bernie Roesler
# Updated: [15/12/2019] Tinmarino
#
# Description:
# Python code for ftplugin/python/jupyter.vim.
##############################################################################
"""
Jupyter-Vim interface, permit to send code to a jupyter kernel from a vim client
Install:
You *must* install the jupyter package into the
Python that your vim is linked against. If you are seeing this message, this
usually means either:
(1) configuring vim to automatically load a virtualenv that has Jupyter
installed and whose Python interpreter is the same version that your
vim is compiled against
(2) installing Jupyter using the system Python that vim is using, or
(3) recompiling Vim against the Python where you already have Jupyter
installed.
This is only a requirement to allow Vim to speak with a Jupyter kernel using
Jupyter's own machinery. It does *not* mean that the Jupyter instance with
which you communicate via jupyter-vim needs to be running the same version of
Python.
"""
try:
# pylint: disable=unused-import
import jupyter # noqa
except ImportError as e:
raise ImportError("Could not import jupyter.\n(The original ImportError: {})\n{}"
.format(e, __doc__))
try:
import vim
except ImportError as e:
raise ImportError('vim module only available within vim! The original ImportError: ' + str(e))
# Standard
import functools
from os import kill, remove
from os.path import splitext
from platform import system
import signal
from jupyter_client import find_connection_file
# Local
from jupyter_util import str_to_py, echom, is_integer
from language import get_language
from message_parser import VimMessenger, JupyterMessenger, Sync
from monitor_console import Monitor, monitor_decorator
# TODO
# * Rename `Monitor` -> 'JupyterMonitor`
# * Rename `Sync` -> 'JupyterSync`
# * docstrings!!
class JupyterVimSession():
"""Object containing jupyter <-> vim session info.
This object is created in lieu of individual functions so that a single vim
session can connect to multiple Jupyter kernels at once. Each connection
gets a new JupyterVimSession object.
Attributes
----------
sync : :obj:`Sync`
Object to support asynchronous operations.
kernel_client : :obj:`JupyterMessenger`
Object to handle primitive messaging between vim and the jupyter kernel.
vim_client : :obj:`VimMessenger`
Object to handle messaging between python and vim.
monitor : :obj:`Monitor`
Jupyter kernel monitor buffer and message line.
lang : :obj:`Language`
User-defined Language object corresponding to desired kernel type.
"""
def __init__(self):
self.sync = Sync()
self.kernel_client = JupyterMessenger(self.sync)
self.vim_client = VimMessenger(self.sync)
self.monitor = Monitor(self)
self.lang = get_language('')
def if_connected(fct):
"""Decorator, fail if not connected."""
# pylint: disable=no-self-argument, not-callable, no-member
@functools.wraps(fct)
def wrapper(self, *args, **kwargs):
if not self.kernel_client.check_connection_or_warn():
echom(f"Pythonx _jupyter_session.{fct.__name__}() needs a connected client",
style='Error')
return None
return fct(self, *args, **kwargs)
return wrapper
def connect_to_kernel(self, kernel_type, filename=''):
"""Establish a connection with the specified kernel type.
.. note:: vim command `:JupyterConnect`
Parameters
----------
kernel_type : str
Type of kernel, i.e. `python3` with which to connect.
filename : str, optional, default=''
Specific kernel connection filename, i.e.
``$(jupyter --runtime)/kernel-123.json``
"""
self.kernel_client.kernel_info['kernel_type'] = kernel_type
self.kernel_client.kernel_info['cfile_user'] = filename
self.lang = get_language(kernel_type)
# Create thread
self.sync.start_thread(target=self.thread_connect_to_kernel)
# Launch timers: update echom
for sleep_ms in self.vim_client.get_timer_intervals():
vim_cmd = ('let timer = timer_start(' + str(sleep_ms) +
', "jupyter#UpdateEchom")')
vim.command(vim_cmd)
@if_connected
def disconnect_from_kernel(self):
"""Disconnect from the kernel client (Sync).
.. note:: vim command `:JupyterDisconnect`.
"""
self.kernel_client.disconnnect()
echom(f"Disconnected: {self.kernel_client.kernel_info['id']}", style='Directory')
@if_connected
def signal_kernel(self, sig=signal.SIGTERM):
"""Send a signal to the remote kernel via the kill command.
This command side steps the non-functional jupyter interrupt.
Only works on posix.
.. note:: vim command `:JupyterTerminateKernel`
Parameters
----------
sig : :obj:`signal`, optional, default=signal.SIGTERM
Signal to send to the kernel.
"""
# Clause: valid signal
if isinstance(sig, str):
try:
sig = getattr(signal, sig)
except Exception as e:
echom(f"Cannot send signal {sig} on this OS: {e}", style='Error')
return
# Clause: valid pid
pid = self.kernel_client.kernel_info['pid']
if not is_integer(pid):
echom(f"Cannot kill kernel: pid is not a number {pid}", style='Error')
return
pid = int(pid)
if pid < 1:
echom(f"Cannot kill kernel: unknown pid retrieved {pid}", style='Error')
return
# Kill process
try:
kill(pid, int(sig))
echom("kill pid {p:d} with signal #{v:d}, {n:s}"
.format(p=pid, v=sig.value, n=sig.name), style='WarningMsg')
except ProcessLookupError:
echom(("pid {p:d} does not exist! " +
"Kernel may have been terminated by outside process")
.format(p=pid, style='Error'))
except OSError as err:
echom("signal #{v:d}, {n:s} failed to kill pid {p:d}"
.format(v=sig.value, n=sig.name, p=pid), style='Error')
raise err
# Delete connection file
sig_list = [signal.SIGTERM]
if system() != 'Windows':
sig_list.append(signal.SIGKILL)
if sig in sig_list:
try:
remove(self.kernel_client.cfile)
except OSError:
pass
@if_connected
def run_file(self, flags='', filename=''):
"""Run an entire file in the kernel.
.. note:: vim command `:JupyterRunFile`.
Parameters
----------
flags : str, optional, default=''
Flags to pass with language-specific `run` command.
filename : str, optional, default=''
Specific filename to run.
"""
# Special cpython cases
if self.kernel_client.kernel_info['kernel_type'] == 'python':
return self.run_file_in_ipython(flags=flags, filename=filename)
# Message warning to user
if flags != '':
echom('RunFile in other kernel than "python" doesn\'t support flags.'
' All arguments except the file location will be ignored.',
style='Error')
# Get command and read file if not implemented
cmd_run = self.lang.run_file.format(filename)
if cmd_run == '-1':
with open(filename, 'r') as file_run:
cmd_run = file_run.read()
# Run it
return self.run_command(cmd_run)
# -----------------------------------------------------------------------------
# Thread Functions: vim function forbidden here:
# could lead to segmentation fault
# -----------------------------------------------------------------------------
def thread_connect_to_kernel(self):
"""Create kernel manager from existing connection file (Async)."""
if self.sync.check_stop():
return
# Check if connection is alive
connected = self.kernel_client.check_connection()
# Try to connect
MAX_ATTEMPTS = 3
for attempt in range(MAX_ATTEMPTS):
# NOTE if user tries to :JupyterConnect <new_pid>, this check will ignore
# the requested new pid.
if connected:
break
# Check if thread want to return
if self.sync.check_stop():
return
# Find connection file
try:
self.kernel_client.cfile = find_connection_file(
filename=self.kernel_client.kernel_info['cfile_user'])
except IOError:
self.vim_client.thread_echom(
"kernel connection attempt {:d}/{:d} failed - no kernel file"
.format(attempt, MAX_ATTEMPTS), style="Error")
continue
# Connect
connected = self.kernel_client.create_kernel_manager()
# Early return if failed
if not connected:
self.kernel_client.disconnnect()
self.vim_client.thread_echom('kernel connection attempt timed out', style='Error')
return
# Pre-message the user
self.vim_client.thread_echom('Connected! ', style='Question')
# Collect and echom kernel info
self.vim_client.thread_echom_kernel_info(self.kernel_client.get_kernel_info(self.lang))
# TODO only if verbose
# Print vim connected -> client
# cmd_hi = self.lang.print_string.format(self.vim_client.string_hi())
# self.kernel_client.send(cmd_hi)
# -----------------------------------------------------------------------------
# Communicate with Kernel
# -----------------------------------------------------------------------------
@if_connected
def update_monitor_msgs(self):
"""Update monitor buffer if present"""
self.monitor.update_msgs()
@if_connected
@monitor_decorator
def change_directory(self, directory):
"""Change current working directory in kernel.
.. note:: vim command `:JupyterCd`.
Parameters
----------
directory : str
Directory into which to change.
"""
msg = self.lang.cd.format(directory)
msg_id = self.kernel_client.send(msg)
# Print cwd
try:
cwd = self.kernel_client.send_code_and_get_reply(self.lang.cwd)
echom('CWD: ', style='Question')
vim.command("echon \"{}\"".format(cwd))
except Exception:
pass
# Return to decorators
return (msg, msg_id)
@if_connected
@monitor_decorator
def run_command(self, cmd):
"""Send a single command to the kernel.
.. note:: vim command `:JupyterSendCode`.
Parameters
----------
cmd : str
Lines of code to send to the kernel.
"""
self.kernel_client.update_meta_messages()
msg_id = self.kernel_client.send(cmd)
return (cmd, msg_id)
@if_connected
@monitor_decorator
def run_file_in_ipython(self, flags='', filename=''):
"""Run a given python file using ipython's %run magic.
.. note:: vim command `:JupyterRunFile`.
Parameters
----------
flags : str, optional, default=''
Flags to pass with language-specific `run` command.
filename : str, optional, default=''
Specific filename to run.
"""
ext = splitext(filename)[-1][1:]
if ext in ('pxd', 'pxi', 'pyx', 'pyxbld'):
run_cmd = '%run_cython'
params = str_to_py(vim.vars.get('cython_run_flags', ''))
else:
run_cmd = '%run'
params = flags or str_to_py(vim.current.buffer.vars['ipython_run_flags'])
cmd = '{run_cmd} {params} "{filename}"'.format(
run_cmd=run_cmd, params=params, filename=filename)
msg_id = self.run_command(cmd)
return (cmd, msg_id)
@if_connected
@monitor_decorator
def send_range(self):
"""Send a range of lines from the current vim buffer to the kernel.
.. note:: vim command `:JupyterSendRange`.
"""
rang = vim.current.range
lines = "\n".join(vim.current.buffer[rang.start:rang.end+1])
msg_id = self.run_command(lines)
prompt = "range {:d}-{:d} ".format(rang.start+1, rang.end+1)
return (prompt, msg_id)
@if_connected
@monitor_decorator
def run_cell(self):
"""Run all the code between two cell separators.
.. note:: vim command `:JupyterSendCell`.
"""
# Get line and buffer and cellseparators
cur_buf = vim.current.buffer
cur_line = vim.current.window.cursor[0] - 1
self.vim_client.set_cell_separators()
# Search upwards for cell separator
upper_bound = cur_line
while upper_bound > 0 and not self.vim_client.is_cell_separator(cur_buf[upper_bound]):
upper_bound -= 1
# Skip past the first cell separator if it exists
if self.vim_client.is_cell_separator(cur_buf[upper_bound]):
upper_bound += 1
# Search downwards for cell separator
lower_bound = min(upper_bound+1, len(cur_buf)-1)
while lower_bound < len(cur_buf)-1 and \
not self.vim_client.is_cell_separator(cur_buf[lower_bound]):
lower_bound += 1
# Move before the last cell separator if it exists
if self.vim_client.is_cell_separator(cur_buf[lower_bound]):
lower_bound -= 1
# Make sure bounds are within buffer limits
upper_bound = max(0, min(upper_bound, len(cur_buf)-1))
lower_bound = max(0, min(lower_bound, len(cur_buf)-1))
# Make sure of proper ordering of bounds
lower_bound = max(upper_bound, lower_bound)
# Execute cell
lines = "\n".join(cur_buf[upper_bound:lower_bound+1])
msg_id = self.run_command(lines)
prompt = "execute lines {:d}-{:d} ".format(upper_bound+1, lower_bound+1)
return (prompt, msg_id)
|
[
"christian.berclaz@mac.com"
] |
christian.berclaz@mac.com
|
7556c744dc5c73ce75e5506f1c680c0c6067a4be
|
f40a4c0a9acbcd659a7c528ebe5f78494515a75b
|
/Python studying/Codes of examples/2.2-boolean_expression.py
|
3bd40869bb89f8f58aa816c5042c6c5891759acf
|
[
"Apache-2.0"
] |
permissive
|
BoyangSheng/Skill-studying
|
834d5b5b4c4ab0aea21a4f0e2abea22039b961b8
|
974c37365fff72e2c7b1e27ae52cb267c7070c9e
|
refs/heads/main
| 2023-04-09T11:11:47.856727
| 2021-04-19T06:54:41
| 2021-04-19T06:54:41
| 318,980,279
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
x = 6
y = x in [3,5,8,9]
print(y)
x = 6
y = (x == 3) or (x == 5) or (x == 8) or (x == 9)
a = 's' in 'science'
print(a)
|
[
"summernights@stu.xjtu.edu.cn"
] |
summernights@stu.xjtu.edu.cn
|
fadcb0ce44b6124de4c47191551238a9847ac862
|
bac99fd938258ad068851b7c90e6ef81f8feb3e9
|
/setup.py
|
82694131e4feeeb21d979ac7a1ce19e9729cf6a5
|
[] |
no_license
|
yunstanford/attrs-schema
|
9e0ef87302fd62c84d7ed9ade98799eed493545f
|
0b423231429af15856e8f1f10612eef16da5044a
|
refs/heads/master
| 2021-06-21T03:28:14.783336
| 2017-06-27T15:12:02
| 2017-06-27T15:12:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
#!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
is_release = False
if "--release" in sys.argv:
is_release = True
sys.argv.remove("--release")
base = os.path.dirname(os.path.abspath(__file__))
README_PATH = os.path.join(base, "README.rst")
install_requires = ['attrs']
tests_require = []
setup(name='attrs-schema',
setup_requires=["vcver"],
vcver={
"is_release": is_release,
"path": base
},
description=(
"a set of utilities to use attrs as a schema library."
),
long_description=open(README_PATH).read(),
author='Yusuke Tsutsumi',
author_email='yusuke@tsutsumi.io',
url='https://github.com/toumorokoshi/attrs-jsonschema',
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Software Distribution',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
tests_require=tests_require
)
|
[
"yusuke@tsutsumi.io"
] |
yusuke@tsutsumi.io
|
b0f65270883773f3df80bcab170272bced29458e
|
409ed6910b4554b6fb5c4aeed472004cf26d3c4e
|
/Sisonke Gaming/Inyoka noga.py
|
bdbe0804a90e59fc59b35a5d027b2970acdf20f7
|
[] |
no_license
|
vuyisile/Sisonke-Gaming
|
13165dffbcf4623cb8a51bee16fb1189c75d37c6
|
dcf1fa4659fd3c0205843c1f986f6dfb0562d8d4
|
refs/heads/master
| 2022-09-01T23:57:35.234981
| 2016-02-06T08:47:51
| 2016-02-06T08:47:51
| 51,194,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,122
|
py
|
# Wormy (a Nibbles clone)
# By Al Sweigart al@inventwithpython.com
# http://inventwithpython.com/pygame
# Released under a "Simplified BSD" license
#KRT 14/06/2012 modified Start Screen and Game Over screen to cope with mouse events
#KRT 14/06/2012 Added a non-busy wait to Game Over screen to reduce processor loading from near 100%
import random, pygame, sys
from pygame.locals import *
FPS = 15
WINDOWWIDTH = 1200
WINDOWHEIGHT = 800
CELLSIZE = 20
assert WINDOWWIDTH % CELLSIZE == 0, "Window width must be a multiple of cell size."
assert WINDOWHEIGHT % CELLSIZE == 0, "Window height must be a multiple of cell size."
CELLWIDTH = int(WINDOWWIDTH / CELLSIZE)
CELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 211, 221, 0)
DARKGREEN = ( 0, 155, 0)
DARKGRAY = ( 40, 40, 40)
BGCOLOR = BLACK
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
HEAD = 0 # syntactic sugar: index of the worm's head
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
pygame.display.set_caption('Wormy')
showStartScreen()
while True:
runGame()
showGameOverScreen()
def runGame():
# Set a random start point.
startx = random.randint(5, CELLWIDTH - 6)
starty = random.randint(5, CELLHEIGHT - 6)
wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
direction = RIGHT
# Start the apple in a random place.
apple = getRandomLocation()
while True: # main game loop
for event in pygame.event.get(): # event handling loop
if event.type == QUIT:
terminate()
elif event.type == KEYDOWN:
if (event.key == K_LEFT or event.key == K_a) and direction != RIGHT:
direction = LEFT
elif (event.key == K_RIGHT or event.key == K_d) and direction != LEFT:
direction = RIGHT
elif (event.key == K_UP or event.key == K_w) and direction != DOWN:
direction = UP
elif (event.key == K_DOWN or event.key == K_s) and direction != UP:
direction = DOWN
elif event.key == K_ESCAPE:
terminate()
# check if the worm has hit itself or the edge
if wormCoords[HEAD]['x'] == -1 or wormCoords[HEAD]['x'] == CELLWIDTH or wormCoords[HEAD]['y'] == -1 or wormCoords[HEAD]['y'] == CELLHEIGHT:
return # game over
for wormBody in wormCoords[1:]:
if wormBody['x'] == wormCoords[HEAD]['x'] and wormBody['y'] == wormCoords[HEAD]['y']:
return # game over
# check if worm has eaten an apply
if wormCoords[HEAD]['x'] == apple['x'] and wormCoords[HEAD]['y'] == apple['y']:
# don't remove worm's tail segment
apple = getRandomLocation() # set a new apple somewhere
else:
del wormCoords[-1] # remove worm's tail segment
# move the worm by adding a segment in the direction it is moving
if direction == UP:
newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] - 1}
elif direction == DOWN:
newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] + 1}
elif direction == LEFT:
newHead = {'x': wormCoords[HEAD]['x'] - 1, 'y': wormCoords[HEAD]['y']}
elif direction == RIGHT:
newHead = {'x': wormCoords[HEAD]['x'] + 1, 'y': wormCoords[HEAD]['y']}
wormCoords.insert(0, newHead)
DISPLAYSURF.fill(BGCOLOR)
drawGrid()
drawWorm(wormCoords)
drawApple(apple)
drawScore(len(wormCoords) - 3)
pygame.display.update()
FPSCLOCK.tick(FPS)
def drawPressKeyMsg():
pressKeySurf = BASICFONT.render('Press a key to play.', True, DARKGRAY)
pressKeyRect = pressKeySurf.get_rect()
pressKeyRect.topleft = (WINDOWWIDTH - 200, WINDOWHEIGHT - 30)
DISPLAYSURF.blit(pressKeySurf, pressKeyRect)
# KRT 14/06/2012 rewrite event detection to deal with mouse use
def checkForKeyPress():
for event in pygame.event.get():
if event.type == QUIT: #event is quit
terminate()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE: #event is escape key
terminate()
else:
return event.key #key found return with it
# no quit or key events in queue so return None
return None
def showStartScreen():
titleFont = pygame.font.Font('freesansbold.ttf', 100)
titleSurf1 = titleFont.render('inyoka!', True, WHITE, DARKGREEN)
titleSurf2 = titleFont.render('noga!', True, GREEN)
degrees1 = 0
degrees2 = 0
#KRT 14/06/2012 rewrite event detection to deal with mouse use
pygame.event.get() #clear out event queue
while True:
DISPLAYSURF.fill(BGCOLOR)
rotatedSurf1 = pygame.transform.rotate(titleSurf1, degrees1)
rotatedRect1 = rotatedSurf1.get_rect()
rotatedRect1.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
DISPLAYSURF.blit(rotatedSurf1, rotatedRect1)
rotatedSurf2 = pygame.transform.rotate(titleSurf2, degrees2)
rotatedRect2 = rotatedSurf2.get_rect()
rotatedRect2.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
DISPLAYSURF.blit(rotatedSurf2, rotatedRect2)
drawPressKeyMsg()
#KRT 14/06/2012 rewrite event detection to deal with mouse use
if checkForKeyPress():
return
pygame.display.update()
FPSCLOCK.tick(FPS)
degrees1 += 3 # rotate by 3 degrees each frame
degrees2 += 7 # rotate by 7 degrees each frame
def terminate():
pygame.quit()
sys.exit()
def getRandomLocation():
return {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
def showGameOverScreen():
gameOverFont = pygame.font.Font('freesansbold.ttf', 150)
gameSurf = gameOverFont.render('Game', True, WHITE)
overSurf = gameOverFont.render('Over', True, WHITE)
gameRect = gameSurf.get_rect()
overRect = overSurf.get_rect()
gameRect.midtop = (WINDOWWIDTH / 2, 10)
overRect.midtop = (WINDOWWIDTH / 2, gameRect.height + 10 + 25)
DISPLAYSURF.blit(gameSurf, gameRect)
DISPLAYSURF.blit(overSurf, overRect)
drawPressKeyMsg()
pygame.display.update()
pygame.time.wait(500)
#KRT 14/06/2012 rewrite event detection to deal with mouse use
pygame.event.get() #clear out event queue
while True:
if checkForKeyPress():
return
#KRT 12/06/2012 reduce processor loading in gameover screen.
pygame.time.wait(100)
def drawScore(score):
scoreSurf = BASICFONT.render('Score: %s' % (score), True, WHITE)
scoreRect = scoreSurf.get_rect()
scoreRect.topleft = (WINDOWWIDTH - 120, 10)
DISPLAYSURF.blit(scoreSurf, scoreRect)
def drawWorm(wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)
pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)
def drawApple(coord):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect)
def drawGrid():
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
if __name__ == '__main__':
main()
|
[
"f.n.weni@gmail.com"
] |
f.n.weni@gmail.com
|
cd5cc7e02f3ce003640bef345ad599f7d7d2dd48
|
fa2d5ea9fd8eefdab0f1dd35b40e77600bdeb69f
|
/data/views.py
|
23a9215d663b22401043a2691120fa67c37795a7
|
[
"MIT"
] |
permissive
|
bobcosc/masteringmastery
|
ec81ccabf6edbfbc55ed068190c5fac0775161c3
|
a02d67441a1a7d5ff2a125e40197469145bef6db
|
refs/heads/master
| 2020-12-24T20:24:12.694958
| 2016-05-05T10:36:14
| 2016-05-05T10:36:14
| 57,078,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,178
|
py
|
from django.shortcuts import render
from data.models import Player, Champion, PlayerChampionMastery
from data.helper import *
from django.views.generic import TemplateView
import collections
from data.serializers import *
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import mixins
from rest_framework import generics
# Create your views here.
class IndexView (TemplateView):
template_name = "index.html"
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
#get_masters('na')
#for summoner in Player.objects.all():
# get_mastery_points(summoner.summoner_id, summoner.summoner_name, summoner.region)
#get_champions()
#sortedData = sorted(data,key=lambda riotplayer: riotplayer.sortPoints, reverse=True )
# context['summoners'] = Champion.objects.all()
#context['champions'] = Champion.objects.all().extra(order_by = ['champion_name'])
#data = {}
#for champion in Champion.objects.all():
# print(champion.champion_id)
# data[champion.champion_name.replace(" ", "").replace("'", "")] = PlayerChampionMastery.objects.filter(champion__champion_id=champion.champion_id).order_by('-points')[:5]
# champ = data[champion.champion_name.replace(" ", "").replace("'", "")]
# print(champ)
# ordered = collections.OrderedDict(sorted(data.items()))
# context['data'] = ordered
players = Player.objects.all()
serializer = PlayerSerializer(players, many=True)
context['json'] = serializer
return context
class PlayerList(generics.ListCreateAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
class ChampionList(generics.ListCreateAPIView):
queryset = Champion.objects.all()
serializer_class = ChampionSerializer
class MasteryList(generics.ListCreateAPIView):
queryset = PlayerChampionMastery.objects.all()
serializer_class = PlayerChampionMasterySerializer
|
[
"bobsnewgit@gmail.com"
] |
bobsnewgit@gmail.com
|
c39a69512fefe44b27094bf002dede8450c57b79
|
38836427823c6b26bec3c70aa89982f2a20b70ec
|
/Awurama - userinput.py
|
560aaa00f7588fda7a21611941d73f20ec927e25
|
[] |
no_license
|
oawurama/Python-Assignment
|
3210912e6f1faabbf6aa4cd5354ee7dfc5ee3560
|
4d3b352e090ea9b56292b31893985388240e2f83
|
refs/heads/master
| 2021-03-01T09:52:16.972089
| 2020-03-08T08:11:08
| 2020-03-08T08:11:08
| 245,774,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
Firstname = input ("Enter your first name: ")
Lastname = input ("Enter your Last name: ")
month = input ("Which month were you born? ")
day = input ("Which day were you born? ")
year = input ("Which year were you born?")
a = " was born on "
print ("{} {} {} {} {} {}" . format (Firstname, Lastname, a, month, day, year,))
|
[
"oawurama94@gmail.com"
] |
oawurama94@gmail.com
|
e9df49caebaa296fae94bb091ddfc4025ed2252a
|
922f688710308a0f3498789467e87f31f8b40656
|
/100_nokku/chap07/60.py
|
5e436dbb9158a769c0276c9e598431ffac0a8a21
|
[] |
no_license
|
noritake41/100_nokku
|
f7905d3463a9baeaa90e6cf7b2daa639d6a95cdf
|
94d7b3ca95486be8d6c05a699712695c29f65068
|
refs/heads/master
| 2020-04-12T08:39:08.112037
| 2018-12-19T06:32:42
| 2018-12-19T06:32:42
| 162,391,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
# coding: utf-8
import gzip
import json
import leveldb
fname = 'artist.json.gz'
fname_db = 'test_db'
# LevelDBオープン、なければ作成
db = leveldb.LevelDB(fname_db)
# gzファイル読み込み、パース
with gzip.open(fname, 'rt') as data_file:
for line in data_file:
data_json = json.loads(line)
# key=name+id、value=areaとしてDBへ追加
key = data_json['name'] + '\t' + str(data_json['id'])
value = data_json.get('area', '') # areaはないことがある
db.Put(key.encode(), value.encode())
# 確認のため登録件数を表示
print('{}件登録しました。'.format(len(list(db.RangeIter(include_value=False)))))
|
[
"administrator@KM-S09.local"
] |
administrator@KM-S09.local
|
d6de3e62458bf8481eb0d81b0d7787b5841f7c06
|
9f4604a47472ab4fb3a521d86f2dd28d9067d96e
|
/Python/Livro Introdução à programação com Python/Capitulo3/exercicio 3.13.py
|
b5cdc338d641337d598f4483596eb5a737a95330
|
[] |
no_license
|
LuBonani/Projetos
|
4039f4f38113cbc5b29bdd08a324a9a4b4dae9cf
|
59ab35a9ea2e0a07d24e0640266be4e0ee21e0bd
|
refs/heads/master
| 2023-01-21T00:51:21.279387
| 2020-12-07T18:20:31
| 2020-12-07T18:20:31
| 286,602,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
c = int(input("Entre com a temperatura em celsius"))
f = ((9*c)/5)+32
print("a temperatura em °f é %d" % f)
|
[
"bonani.luciana@gmail.com"
] |
bonani.luciana@gmail.com
|
e31b80a8e3828892709f5407520a2915c3eb7e35
|
8a0c059415d8d10bca4084e3d766850f62e4403e
|
/leapYear.py
|
7ede68ed2004ac3dbca6cc52c2e8299447085d79
|
[] |
no_license
|
junanyeap/ncnu_1072_python_oldfish
|
087bc1beae73005b951cfeadbeb0c85396b9f89c
|
4e01adc0bac5c607dc92c3d0b3a75701a72dfbc3
|
refs/heads/master
| 2020-04-25T05:03:45.472003
| 2019-06-24T03:37:45
| 2019-06-24T03:37:45
| 172,530,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
#學號:104213070
#姓名:葉潤安
def leapYearJudge(year):
# 依傳遞來的year,做下面兩項判斷來計算是否爲閏年
# 其中一項符合,即是閏年
# 是---則return true,否---則return false
return (year%4==0 and year%100!=0) or (year%400 == 0 and year % 3200 !=0)
def main():
# [WRONG] 宣告變數year,存儲使用者輸入的年份
# define and link year with input()
# int()目的在於改變輸入值的形態,由預設的str改成int
year = int(input("Please input a year to judge if leapyear : "))
# 依據呼叫class的回傳結果,印出相對應文字
# print(leapYearJudge(year))
if leapYearJudge(year) == True:
print("加薪!!")
else :
print("沒有加薪QQ")
# 呼叫主class
main()
|
[
"junanyeap@gmail.com"
] |
junanyeap@gmail.com
|
3c87e5773d851cc122f29d4aa600b2547b1dad80
|
7a86aabeae1071c09573dde886d7d31b472cbd35
|
/intro_to_cs/pycharm/exercise1_default.py
|
dbc7088a1b194e973323594e99733c1431707b02
|
[] |
no_license
|
kkrugler/codecademy-validator
|
7453f1f82e6488aecb959af0f9d3a8b05eca2ee2
|
fe5749cfb12705e0c16f7060111dd4e9b4ffc9ba
|
refs/heads/master
| 2021-01-21T21:40:01.819302
| 2016-05-17T15:43:43
| 2016-05-17T15:43:43
| 29,620,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
# Replace the values in the assignment statements below with
# your answers to questions 1-20 (see the directions).
answer_1 = 0
answer_2 = 0
answer_3 = ''
answer_4 = ''
answer_5 = ''
answer_6 = ''
answer_7 = 0
answer_8_name = ''
answer_8_value = 0
answer_9_name = ''
answer_9_value = 0
answer_10 = 0
answer_11 = ''
answer_12 = ''
answer_13 = 0
answer_14_name = ''
answer_14_value = 0
answer_15 = 0
answer_16 = 0
answer_17 = ''
answer_18 = 0
answer_19 = ''
answer_20 = 0
|
[
"Schmed@TransPac.com"
] |
Schmed@TransPac.com
|
85913214afde9535603ff14a932b07d7558b026e
|
b5ac646f40aa42af35da50f49e71a688e1ed0096
|
/2_scrape_re.py
|
32fd722267b6ebcd46cec682b7a544c83fb77ab9
|
[] |
no_license
|
apotree/Web-crawling
|
66b7381ee48fb966e0b9fe25f0cc56434e27a5ca
|
fcb5ae27a59aff793d212d128a22c0a9c41f94b1
|
refs/heads/master
| 2020-11-26T13:26:02.193744
| 2020-02-04T06:07:27
| 2020-02-04T06:07:27
| 229,085,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
import re
from html import unescape
# 이전 절에서 다운로드한 파일을 열고 html 이라는 변수에 저장
with open('dp.html') as f:
html = f.read()
# re.findall()을 사용해 도서 하나에 해당하는 HTML을 추출
for partial_html in re.findall(r'<td class="left"><a.*?</td>', html, re.DOTALL):
# 도서의 URL을 추출
url = re.search(r'<a href="(.*?">', partial_html).group(1)
url = 'http://www.hanbit.co.kr' + url
# 태그를 제거해서 도서의 제목을 추출합니다.
title = re.sub(r'<.*?>', '', partial_html)
title = unescape(title)
print('url : ', url)
print('title : ', title)
print('----')
|
[
"noreply@github.com"
] |
apotree.noreply@github.com
|
528eee062ae34fe37c4faef7476ca86ea33b715a
|
bb32566f0c4688292b8f37d29630e0b7a18be24b
|
/work2017/leetcode/twosum.py
|
a1b66f92b5ab8c02b712271029e60ec2b150ce80
|
[] |
no_license
|
Chencheng78/python-learning
|
4c7dd3a5ad39ac2e96b9ff0a1b9aabb56e863f7f
|
63eb7ee9e547f899eafa698556f6adeb518795bb
|
refs/heads/master
| 2022-07-27T02:40:03.067820
| 2022-07-12T09:05:47
| 2022-07-12T09:05:47
| 55,224,905
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
class Solution(object):
def twoSum(self, nums, target):
while nums != []:
i = nums.pop()
if target-i in nums:
return [nums.index(target-i), len(nums)]
if __name__ == '__main__':
a = Solution()
b = a.twoSum([2, 7, 11, 15], 26)
print b
|
[
"geniuscc7@163.com"
] |
geniuscc7@163.com
|
a24bae27bb6b0cff5762be20e58aa3f3f8f23c9c
|
f471614ec6bea8d3da056f19c47e1f85964102af
|
/mF6W.py
|
cb71a2f08b8e4dce44f6ada0e146709f408cffc6
|
[] |
no_license
|
mashmatt/techgym_ai
|
0e8d8075397b495fc4e4d122ba6b2b07f2788cbc
|
67e405a1427525ac766f56eeaac54238570791e6
|
refs/heads/master
| 2022-12-01T09:56:28.515789
| 2020-08-21T12:38:01
| 2020-08-21T12:38:01
| 262,057,198
| 0
| 0
| null | 2020-05-07T13:25:46
| 2020-05-07T13:25:45
| null |
SHIFT_JIS
|
Python
| false
| false
| 2,761
|
py
|
#Tech-Gym-13-16-Q
#ディープラーニング画像分類器:CNN
#手書き文字データ
###ハイパーパラメータ###
#活性化関数
#隠れ層の数、隠れ層のチャンネル数
#ドロップアウトする割合(rate)
#学習率(Ir)
#最適化関数(optimizer)★
#誤差関数(loss)
#バッチサイズ(batch_size)
#エポック数(epochs)
#必要なライブラリ
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
#keras
from keras.models import Sequential
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Activation
from keras import optimizers
#性能評価
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
#MNISTデータ
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
#行列の大きさを確認
#print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
#はじめの6000個を1次元行列の形に変更
X_train = X_train.reshape(X_train.shape[0], 784)[:6000]
X_test = X_test.reshape(X_test.shape[0], 784)[:1000]
y_train = to_categorical(y_train)[:6000]
y_test = to_categorical(y_test)[:1000]
#行列の大きさを確認
#print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
#モデルのインスタンスを作成
model = Sequential()
# 入力ユニット数は784、1つ目の全結合層の出力ユニット数は256
model.add(Dense(256, input_dim=784))
model.add(Activation("sigmoid"))
# 2つ目の全結合層の出力ユニット数は128
model.add(Dense(128))
model.add(Activation("relu"))
# 3つ目の全結合層(出力層)の出力ユニット数は10
model.add(Dense(10))
model.add(Activation("softmax"))
#最適化関数を変えて、正解率の変化を調べる
OPT_list = []
OPT_color = {"sgd":"blue","adadelta":"red","adam":"orange","adamax":"green","nadam":"black"}
#グラフ描画用の空リスト
acc = []
for OPT in OPT_list:
#モデルの生成
model.compile(optimizer=OPT, loss="categorical_crossentropy", metrics=["accuracy"])
#学習
history = model.fit(X_train, y_train, verbose=0, epochs=10)
#モデル評価
score = model.evaluate(X_test, y_test, verbose=0)
#必要であれば表示
#print("evaluate loss: {0[0]}\nevaluate acc: {0[1]}".format(score))
acc.append(score[1])
#acc、val_accのプロット
plt.plot(history.history["acc"], label="acc_"+OPT, ls="-", marker="o", color=OPT_color[OPT])
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(loc="best")
plt.show()
|
[
"noreply@github.com"
] |
mashmatt.noreply@github.com
|
1a512a86d440af4af8f017b2d2487561beb88245
|
f2cbd7496abb1eee9054ff709d159d52f70f22dc
|
/spectral_cluster_sol.py
|
96adf4c2bbdd40d655350073a6306bc485d4d788
|
[] |
no_license
|
fhung2/hophacksspring2018
|
a69c89d4a6911264bbc2967d972b7b80a39bd59a
|
4f18a956f66001b4ccdf8ab9d9995a6330aed131
|
refs/heads/master
| 2021-04-29T11:10:03.133719
| 2018-02-18T00:51:02
| 2018-02-18T00:51:02
| 121,825,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,503
|
py
|
from scipy.io import loadmat
from sklearn.cluster import SpectralClustering
import numpy as np
from sklearn.metrics.cluster import supervised
from scipy.optimize import linear_sum_assignment
data = loadmat('../digits/digits-train.mat')
# data = loadmat('../objects/objects-train.mat')
#fea_hog = np.array(data['fea_hog_train'])
fea_hog = np.array(data['fea_scat_train'])
labels = np.array(data['labels_train']).flatten()
print(labels.shape)
print(fea_hog.transpose().shape)
#test_gamma = [.5,.55,.6,.65,.7,.75,.8,.85,.9,.95,1]
test_gamma = [.1,.15,.2,.25,.3,.35,.4,.45]
#test = [1,2,3,4,5,6,7,8,9,10]
#for x in test_gamma:
spec_clust = SpectralClustering(n_clusters=5,affinity='nearest_neighbors',n_neighbors=3).fit(fea_hog.transpose())
#print(spec_clust.get_params())
labels_pred = np.array(spec_clust.labels_)
#print(labels_pred.shape)
labels, labels_pred = supervised.check_clusterings(labels, labels_pred)
# labels_true : int array with ground truth labels, shape = [n_samples]
# labels_pred : int array with estimated labels, shape = [n_samples]
value = supervised.contingency_matrix(labels, labels_pred)
# value : array of shape [n, n] whose (i, j)-th entry is the number of samples in true class i and in predicted class j
[r, c] = linear_sum_assignment(-value)
accr = value[r, c].sum() / len(labels)
print(accr)
#print('gamma: ' + str(x) + ' accr: ' + str(accr))
# for x in range(0,20):
# print(kmeans.labels_[x])
# for x in range(0,10):
# print(data['fea_hog_train'][x])
|
[
"jrusso15@jhu.edu"
] |
jrusso15@jhu.edu
|
3961e6e425d4740afb32b711fe98adf73250d297
|
25fe650d23a4ea5c485b72c42434b64f5036fa82
|
/wrapper/pract.py
|
d549c87a71c34b399bd10915fef4a410b44e8895
|
[] |
no_license
|
kyu-chan/P_R
|
ba6cdb8bea0b262c4b151c73dd370d54b3b0e0ed
|
493e20a69a8830f8ed36c0029e873bb7b97c5cb8
|
refs/heads/master
| 2023-04-05T13:13:06.596611
| 2021-03-30T13:38:52
| 2021-03-30T13:38:52
| 352,969,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
import pandas as pd
import numpy as np
from wrapper import common
class Pract:
def DDM(self, d, r, g):
p = d / (r - g)
return(p)
def DCF(self, r, *cf): ### 구글링해보니 *가 불특정 n개를 의미
n = 1 #당연히 기말기준
p = 0 ## 초기값 주고
for c in cf:
p = p + (c / (1+r)**n)
n = n + 1
return(p)
|
[
"iopoh0203@gmail.com"
] |
iopoh0203@gmail.com
|
0fc52acc1436737d689acd9f26d8be0c207d3ec1
|
58d9951c8533132276b674b9653aedcaf1f2096d
|
/final_project_main.py
|
1423dfe0dad1e1c1d15c157323729f9e85838460
|
[] |
no_license
|
ajp619/IS602_Final_Project
|
d55a3b01fd702fb5ff5c2483f6ded105e77aa243
|
6a84572006d694d9cecc4378fff95142debeb4eb
|
refs/heads/master
| 2021-01-19T11:33:04.514533
| 2013-12-23T19:56:08
| 2013-12-23T19:56:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,662
|
py
|
"""
IS602 Final Project
Do the themes we see as a child show up later in political speeches
"""
__author__ = 'Aaron'
import create_speech_corpus
import create_tv_corpus
from projectutils import create_heat_map, plot_heat_map
import matplotlib.pyplot as plt
def get_speech_seed():
"""
Links to list of speeches on americanrhetoric. Starting point for speech crawl.
"""
return ["http://www.americanrhetoric.com/speechbanka-f.htm",
"http://www.americanrhetoric.com/speechbankg-l.htm",
"http://www.americanrhetoric.com/speechbankm-r.htm]",
"http://www.americanrhetoric.com/speechbanks-z.htm"]
def visualization(tv_summary, speech_summary, start, stop, mode='interactive'):
"""
Create final output
Two modes, save and interactive.
Use interactive to explore small batches of output
Use save to output a large number of years to output/outputYEAR.png
"""
# There was a problem with unicode to ascii errors cropping up again in matplotlib
# TODO fix encoding errors for the following years
skip_years = [1941, 1942, 1945, 1995, 2005, 2006, 2010, 2011]
for start_year in [year for year in range(start, stop) if year not in skip_years]:
print "Creating figure for " + str(start_year)
heat_map, keywords = create_heat_map(source=tv_summary,
response=speech_summary,
max_keywords=45,
start_year=start_year,
interval=50)
fig = plot_heat_map(heat_map, keywords, start_year)
if mode == 'save':
# Save fig to file
fig.set_size_inches(11, 7.5)
fig.savefig('output/output' + str(start_year) + '.png', dpi=100)
else:
plt.draw()
if mode != 'save':
plt.show()
def main():
# Get seed links for speech crawl
speech_seeds = get_speech_seed()
# Get summary of speech corpus
speech_summary = create_speech_corpus.get_corpus(speech_seeds)
# Get summary of tv corpus
tv_summary = create_tv_corpus.create_tv_corpus()
#Visualization with Heat Map:
# Create output for years from start to stop
# should be able to set this to limits, but I will save
# this with a smaller range to begin
start = 1970 # data starts at 1940
stop = 1972 # data stops at 2013
mode = 'interactive' # mode = 'interactive' or 'save'
visualization(tv_summary, speech_summary, start, stop, mode)
if __name__ == "__main__":
main()
|
[
"ajp619@outlook.com"
] |
ajp619@outlook.com
|
866fe4478abd3813a0c973bc7b480b7c5189ece0
|
a69503fd428c61419118090f45cc7b51f7c48c44
|
/main.py
|
92075f27bd188893609d6dd1ef9f2bf76df8285e
|
[] |
no_license
|
ianish07/invisible-cloak
|
a4459d32f377b97fd1582b58623611eb0672a844
|
1f3b193d221c7d247e42fbf4e2e566ce26a4271a
|
refs/heads/master
| 2022-11-21T11:45:04.237876
| 2020-07-17T09:14:36
| 2020-07-17T09:14:36
| 280,377,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,301
|
py
|
import numpy as np
import time
import cv2
cap = cv2.VideoCapture(0) # '0' refers to the primary camera, will change (eg. '1'/'2'') for any external ones
## Allow the system to sleep for 2 seconds before the webcam starts
time.sleep(2)
background = 0
for i in range(30):
#capturing the background for initial few secs(here in range 30)
ret,background = cap.read()
while (cap.isOpened()):
# capturing every frame till the webcam is open
ret, img = cap.read()
if not ret:
break
## Convert the color space from BGR to HSV
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
## Generat masks to detect red color (color may vary depending upon your choice)
##values [0,120,70] represents Hue(Color Portion/which color you want to identify), Saturation(amt of grey), Value(Brightness)
lower_red = np.array([0,120,70])
higher_red = np.array([10,255,255])
#separating our cloak part,i.e. looking for cloak in hsv in range of lower & higher red
mask1 = cv2.inRange(hsv,lower_red,higher_red)
lower_red = np.array([170, 120, 60])
higher_red = np.array([180, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, higher_red)
mask1 = mask1 + mask2 #segmenting any shade of red (from 0-10 or 170-180) and storing it in mask1
## morph_open basically removes any noise from the image
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8) ,iterations = 2) #iterations for better result
## morph_dialate smooths out the image
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8), iterations = 1)
## Create an inverted mask to segment out the red color from the frame(i.e, the cloak)
mask2 = cv2.bitwise_not((mask1))
## Segment the red color part out of the frame
res1 = cv2.bitwise_and(img,img, mask=mask2)
## Create image showing static background frame pixels only for the masked region i.e, Subsituting the cloak part
res2 = cv2.bitwise_and(background,background, mask=mask1)
## Linearly adding both image for final output
final_output = cv2.addWeighted(res1, 1, res2, 1, 0)
cv2.imshow("magic", final_output)
k = cv2.waitKey(10)
## Closing the window esc is entered
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
[
"ianish07@gmail.com"
] |
ianish07@gmail.com
|
ec285e46a757036c22792f80a49af5ea90265538
|
89d4bdfc8c05067a9d0f1917d5ab135c285dc959
|
/Apriori/apriori.py
|
c18019094d198b21c60033d6e1fe9b01da4b7bf5
|
[] |
no_license
|
rliu054/machine-learning-from-scratch
|
06c1a726055db251dc92ac75b7f516a96eec97ef
|
fc5ca0430aaa03f032a3319384d116e7c0ec7644
|
refs/heads/master
| 2020-03-19T10:22:49.076779
| 2018-10-19T20:36:17
| 2018-10-19T20:36:17
| 136,365,119
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,921
|
py
|
def load_data_set():
return [[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]]
def gen_set_list_size_one(data_set):
""" Get a list of itemsets with length 1. """
itemsets = []
for trans in data_set:
for item in trans:
if [item] not in itemsets:
itemsets.append([item])
itemsets.sort()
return list(map(frozenset, itemsets))
def gen_set_list_size_k(set_list, k):
""" Grow set size by 1 each time. """
set_list_size_k = []
list_size = len(set_list)
for i in range(list_size):
for j in range(i + 1, list_size):
list_1 = list(set_list[i])[:k - 2]
list_2 = list(set_list[j])[:k - 2]
list_1.sort()
list_2.sort()
if list_1 == list_2:
set_list_size_k.append(set_list[i] | set_list[j])
return set_list_size_k
def filter(data_set, set_list, min_support):
""" Filter out sets that doesn't meet min support. """
st_dict = {}
for transaction in data_set:
for st in set_list:
if st.issubset(transaction):
if st not in st_dict:
st_dict[st] = 1
else:
st_dict[st] += 1
num_items = float(len(data_set))
filtered_set_list = []
filtered_set_dict = {}
for st in st_dict:
support = st_dict[st] / num_items
if support >= min_support:
filtered_set_list.insert(0, st)
filtered_set_dict[st] = support
return filtered_set_list, filtered_set_dict
def apriori(data_set, min_support=0.5):
set_list_size_one = gen_set_list_size_one(data_set)
d = list(map(set, data_set)) # make sets immutable
f_set_list, f_set_dict = filter(d, set_list_size_one, min_support)
set_list = [f_set_list]
k = 2
while (len(set_list[k - 2]) > 0):
set_list_size_k = gen_set_list_size_k(set_list[k - 2], k)
f_set_list_k, f_set_dict_k = filter(d, set_list_size_k, min_support)
f_set_dict.update(f_set_dict_k)
set_list.append(f_set_list_k)
k += 1
return set_list, f_set_dict
def generate_rules(set_list, set_dict, min_conf=0.7):
rules_list = []
for i in range(1, len(set_list)): # only for sets with two or more items
for freq_set in set_list[i]:
print("freq_set: {}".format(freq_set))
h1 = [frozenset([item]) for item in freq_set]
if i > 1:
rules_from_conseq(freq_set, h1, set_dict, rules_list, min_conf)
else:
calc_conf(freq_set, h1, set_dict, rules_list, min_conf)
def calc_conf(freq_set, h, set_dict, rules_list, min_conf=0.7):
print("in calc_conf")
pruned_h = []
for conseq in h:
print("freq_set: {}, conseq: {}\n".format(freq_set, conseq))
conf = set_dict[freq_set] / set_dict[freq_set - conseq]
if conf >= min_conf:
print(freq_set - conseq, '-->', conseq, 'conf', conf)
rules_list.append((freq_set - conseq, conseq, conf))
pruned_h.append(conseq)
return pruned_h
def rules_from_conseq(freq_set, h, set_dict, rules_list, min_conf=0.7):
m = len(h[0])
print("in rules from conseq, h={}".format(h))
print("freq_set={}, m={}".format(freq_set, m))
if len(freq_set) > m + 1:
hmp1 = gen_set_list_size_k(h, m + 1)
print("before, hmp1={}".format(hmp1))
hmp1 = calc_conf(freq_set, hmp1, set_dict, rules_list, min_conf)
print("after, hmp1={}".format(hmp1))
if len(hmp1) > 1:
rules_from_conseq(freq_set, hmp1, set_dict, rules_list, min_conf)
def print_rules(rules_list, item_meaning):
for rule in rules_list:
for item in rule[0]:
print(item_meaning[item])
print(" ---->")
for item in rule[1]:
print(item_meaning[item])
print("confidence: %f" % rule[2])
print()
|
[
"iamliurui@gmail.com"
] |
iamliurui@gmail.com
|
b9978baea9f85c3b6621e5e94ed47e99c6a2e315
|
d9b620102da4a3632250da29b4b39d533a58686d
|
/nsePrediction/BarChart/apps.py
|
ea1c7f97cfc1da044462c1c07b5cff476e0c325b
|
[] |
no_license
|
SwagataRoy98/NseToolsPredictionAndCharts
|
ca9839652905c068a023a16e56e3decdc7e0337c
|
65dd1dcbe433842224a6d8369e2b9a1d671aa4d9
|
refs/heads/master
| 2023-07-10T17:12:27.314119
| 2021-07-24T06:11:28
| 2021-07-24T06:11:28
| 385,275,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
from django.apps import AppConfig
class BarchartConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'BarChart'
|
[
"r.swagata2016@gmail.com"
] |
r.swagata2016@gmail.com
|
008ef7ca47cef56af92cf66086f5e616c21552e5
|
dbca6a3e9e1f80649e7f26dc76e53c26508e8288
|
/survaeflow/transform/bijection/shift.py
|
ec7e59e6489911ca6d826f18275295dd9ea98944
|
[
"MIT"
] |
permissive
|
revsic/tf-survae-flows
|
95e6cbef5f03cfc02db4c5da3e6e07519bd73a7a
|
950a06c5e85ffedec6a024e81dc5fae557e2aae8
|
refs/heads/main
| 2023-04-18T05:24:11.086108
| 2021-05-05T14:44:08
| 2021-05-05T14:44:08
| 326,727,004
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
from .. import Transform
class Shift(Transform):
"""Constant Shifter.
"""
def __init__(self, shift):
"""Initializer.
Args:
shift: tf.Tensor, [tf.float32; [B, ...]], shift bias,
broadcastable to the inputs.
"""
super(Shift, self).__init__()
self.shift = shift
def call(self, inputs):
"""Shift inputs.
Args:
inputs: tf.Tensor, [tf.float32; [B, ...]], inputs.
Returns:
z: tf.Tensor, [tf.float32; [B, ...]], shifted.
ldj: float, zero.
"""
# [B, ...]
z = inputs + self.shift
return z, 0.
def forward(self, inputs):
"""Shift inputs.
Args:
inputs: tf.Tensor, [tf.flaot32; [B, ...]], inputs.
Returns:
z: tf.Tensor, [tf.float32; [B, ...]], shifted.
"""
return inputs + self.shift
def inverse(self, inputs):
"""Unshift inputs.
Args:
inputs: tf.Tensor, [tf.float32; [B, ...]], latent.
Returns:
tf.Tensor, [tf.float32; [B, ...]], recovered.
"""
return inputs - self.shift
|
[
"revsic99@gmail.com"
] |
revsic99@gmail.com
|
1b0474675f8594688ac332cef6d772f875546ee4
|
d2bb3512d9be21beabaa168b9458dc3953fe2061
|
/The Messanger Project/MessangerBeta (GUI Rec) (One Way) (With Dynamic Canvas)/MessengerTest.py
|
c80550b9354d269294a4b5c091999ca01d95256b
|
[] |
no_license
|
arelyx/Messenger-Project
|
0da19c4adf201039a9d16e48afdec6cfafd797f9
|
dfa48bb2cb2ab6980c4d08df390ec6bac0fdf0c4
|
refs/heads/master
| 2022-06-11T03:22:20.777112
| 2020-05-02T07:25:11
| 2020-05-02T07:25:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
import time
import os
while True:
text = input('What Do You Wanna Say? ---->')
message = open('MessageData.txt', 'w')
#appendfile.write('\n')
message.write(text)
message.close()
readMessage = open('MessageData.txt','r')
print (readMessage.read())
readMessage.close()
time.sleep(0.05)
os.remove("MessageData.txt")
|
[
"noreply@github.com"
] |
arelyx.noreply@github.com
|
2539bf36d6a792a9aed22d6687c73d6dc1d1aba0
|
b9dedd970eae03cea283188bba63d89463428f4e
|
/detailviewadvance/detailviewadvance/asgi.py
|
76141c558d4fdc52e923fc314a1937bb916ccffe
|
[] |
no_license
|
Firoz-Thakur/Django-game
|
08cabb9fed2e61a14d9d1848a1253b757bd98d2c
|
ec2689ecef9c8037de96a2271dc8d34fdf9bf405
|
refs/heads/master
| 2023-08-02T11:45:48.895901
| 2021-10-08T17:22:37
| 2021-10-08T17:22:37
| 349,045,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
ASGI config for detailviewadvance project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'detailviewadvance.settings')
application = get_asgi_application()
|
[
"firozbhaikardar21@gmail.com"
] |
firozbhaikardar21@gmail.com
|
5f8935d15ae86cd6bd9e37647685e21ba388b52b
|
473a9043010f47aaf4c101c3a22c96152a1da50d
|
/interface/shortcuts/dialog.py
|
bae511a2b36ffc55289888181c55a44d23072115
|
[] |
no_license
|
bmartins95/TagApp
|
ad057fca1ffa4a3f49f3d9a00afdafe0753fbf69
|
4c0cf67ad440f5cd1f0f1aa1b9fcc89a283deec2
|
refs/heads/main
| 2023-04-19T00:11:32.809919
| 2021-04-23T10:53:18
| 2021-04-23T10:53:18
| 358,332,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,298
|
py
|
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QDialogButtonBox
from server.server import Server
class Dialog(QDialog):
"""A template dialog class that defines default functions such as,
createProjectDict, createForm, createButtonBox, setMainLayout and
moveToCenter.
"""
def __init__(self):
super(Dialog, self).__init__()
self.setWindowTitle("Default Title")
self.setGeometry(100, 100, 300, 50)
self.createProjectDict()
self.createForm()
self.createButtonBox()
self.setMainLayout()
def createProjectDict(self):
"""Creates the dictionary projectDict that uses the names of the
projects as keys and their ids as values. Only the projects currently
available on the database are added to projectDict.
"""
server = Server()
table = server.getTable("projects")
self.projectIds = [project[0] for project in table]
self.projectNames = [project[1] for project in table]
self.projectDict = dict(zip(self.projectNames, self.projectIds))
def createForm(self):
"""Creates a form widget named formLayout."""
layout = QtWidgets.QFormLayout()
self.formLayout = QtWidgets.QWidget()
self.formLayout.setLayout(layout)
def createButtonBox(self):
"""Creates a widget named buttonBox that contains an OK and a Cancel
button."""
buttons = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
self.buttonBox = QDialogButtonBox(buttons)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def setMainLayout(self):
"""Joins formLayout and buttonBox in a single widget and sets it as the
dialog layout."""
mainLayout = QtWidgets.QVBoxLayout()
mainLayout.addWidget(self.formLayout)
mainLayout.addWidget(self.buttonBox)
self.setLayout(mainLayout)
def moveToCenter(self):
"""Moves the dialog window to the center of the screen."""
centerPoint = QtWidgets.QDesktopWidget().availableGeometry().center()
frame = self.frameGeometry()
frame.moveCenter(centerPoint)
self.move(frame.topLeft())
|
[
"bruno.martins.cesfi@gmail.com"
] |
bruno.martins.cesfi@gmail.com
|
e3a628780e884be8146c3183c1ac6262517c3b3f
|
54e9c9f74c79fa47e4f492fa60bbb799f28652c5
|
/satc_front/headless/ref2sink_cmdi.py
|
159eb2d13935f53df3c65294db59835d30e3f844
|
[] |
no_license
|
Cossack9989/SaTC
|
9830a91c6c67e547a86e940bf78f95f0206afaa5
|
2adc0f8e154b3b63e6b9968922437fe3532b6975
|
refs/heads/main
| 2023-07-06T04:10:32.274818
| 2021-07-08T14:02:51
| 2021-07-08T14:02:51
| 395,564,622
| 2
| 0
| null | 2021-08-13T07:55:57
| 2021-08-13T07:55:56
| null |
UTF-8
|
Python
| false
| false
| 12,361
|
py
|
# Find path to sink functions from reference of given strings. Different output format for the function calling certain check functions. Find more params heuristicly.
# @author tkmk
# @category Analysis
import time
import sys
from ghidra.util.classfinder import ClassSearcher
from ghidra.app.plugin.core.analysis import ConstantPropagationAnalyzer
from ghidra.program.util import SymbolicPropogator
from ghidra.program.model.mem import MemoryAccessException
from ghidra.util.exception import CancelledException
from collections import Counter, defaultdict
import re
DEBUG = False
heuristicMin = 4
sinks = ['system', '___system', 'bstar_system', 'popen',
'doSystemCmd', 'doShell', 'twsystem', 'CsteSystem', 'cgi_deal_popen',
'ExeCmd', 'ExecShell', 'exec_shell_popen', 'exec_shell_popen_str'
]
digest = ['strcpy', 'sprintf', 'memcpy', 'strcat']
heuristicIgnoreFunctions = ['strcpy', 'strncpy', 'strcat', 'memcpy']
needCheckConstantStr = {
'system': 0,
'fwrite': 0,
'___system': 0,
'bstar_system': 0,
'popen': 0,
'execve': 0,
'strcpy': 1,
'strcat': 1,
'strncpy': 1,
'memcpy': 1,
'twsystem': 0,
'cgi_deal_popen': 0,
'ExeCmd': 1,
'ExecShell': 0,
'exec_shell_popen': 0,
'exec_shell_popen_str': 0,
}
needCheckFormat = {
'sprintf': 1,
'doSystemCmd': 0,
'doShell': 0
}
syms = {}
newParam = defaultdict(set)
analyzer = None
def a2h(address):
return '0x' + str(address)
def getAnalyzer():
global analyzer
for a in ClassSearcher.getInstances(ConstantPropagationAnalyzer):
if a.canAnalyze(currentProgram):
analyzer = a
break
else:
assert 0
def getCallingArgs(addr, pos):
if not 0 <= pos <= 3:
return
arch = str(currentProgram.language.processor)
if arch == 'ARM':
reg = currentProgram.getRegister('r%d' % pos)
elif arch == 'MIPS':
nextInst = getInstructionAt(addr).next
if len(nextInst.pcode): # not NOP
addr = addr.add(8)
reg = currentProgram.getRegister('a%d' % pos)
elif arch == 'x86' and str(currentProgram.language.getProgramCounter()) == 'RIP':
# dont know how to tell 32 and 64 apart qwq
if pos == 3:
return
reg = currentProgram.getRegister(['RDI', 'RSI', 'RDX'][pos])
else:
return
return getRegister(addr, reg)
def getRegister(addr, reg):
if analyzer is None:
getAnalyzer()
func = getFunctionContaining(addr)
if func is None:
return
if func in syms:
symEval = syms[func]
else:
symEval = SymbolicPropogator(currentProgram)
symEval.setParamRefCheck(True)
symEval.setReturnRefCheck(True)
symEval.setStoredRefCheck(True)
analyzer.flowConstants(currentProgram, func.entryPoint, func.body, symEval, monitor)
syms[func] = symEval
return symEval.getRegisterValue(addr, reg)
def getStr(addr):
ad = addr
ret = ''
try:
while not ret.endswith('\0'):
ret += chr(getByte(ad) % 256)
ad = ad.add(1)
except MemoryAccessException:
return
return ret[:-1]
def getStrArg(addr, argpos=0):
rv = getCallingArgs(addr, argpos)
if rv is None:
return
return getStr(toAddr(rv.value))
def checkConstantStr(addr, argpos=0):
# empty string is not considered as constant, for it may be uninitialized global variable
return bool(getStrArg(addr, argpos))
def checkSafeFormat(addr, offset=0):
data = getStrArg(addr, offset)
if data is None:
return False
fmtIndex = offset
for i in range(len(data) - 1):
if data[i] == '%' and data[i + 1] != '%':
fmtIndex += 1
if data[i + 1] == 's':
if fmtIndex > 3:
return False
if not checkConstantStr(addr, fmtIndex):
return False
return True
def getCallee(inst):
callee = None
if len(inst.pcode):
if inst.pcode[-1].mnemonic == 'CALL':
callee = getFunctionAt(inst.getOpObjects(0)[0])
elif inst.pcode[-1].mnemonic == 'CALLIND':
regval = getRegister(inst.address, inst.getOpObjects(0)[0])
if regval is not None:
callee = getFunctionAt(toAddr(regval.value))
return callee
searchStrArgDone = set()
def searchStrArg(func):
if func in searchStrArgDone:
return
if DEBUG:
print 'start search', func, '(heuristic)'
searchStrArgDone.add(func)
start = func.entryPoint
end = func.body.maxAddress
funcPosCounter = Counter()
inst = getInstructionAt(start)
while inst is not None and inst.address < end:
callee = getCallee(inst)
if callee is not None:
maxpos = 4
if callee.parameterCount > 0:
maxpos = min(maxpos, callee.parameterCount)
for pos in range(maxpos):
if getStrArg(inst.address, pos) in paramTargets:
funcPosCounter[callee, pos] += 1
inst = inst.next
# newParamCount = 0
inst = getInstructionAt(start)
while inst is not None and inst.address < end:
callee = getCallee(inst)
if callee is not None and callee.name not in heuristicIgnoreFunctions:
for pos in range(4):
if funcPosCounter[callee, pos] >= heuristicMin:
s = getStrArg(inst.address, pos)
if s and re.search(r'[a-zA-Z_]{4}', s) and s not in paramTargets:
if DEBUG:
print 'new param', s
newParam[s].add(func)
# newParamCount += 1
inst = inst.next
if DEBUG:
print 'finish search', func, '(heuristic)'
return
callMap = {}
safeFuncs = set()
referenced = set()
def findSinkPath(refaddr, stringaddr, stringval):
pending = []
def search(func, start=None):
if func in callMap:
return
callMap[func] = {}
start = start or func.entryPoint
end = func.body.maxAddress
inst = getInstructionAt(start)
while inst is not None and inst.address < end:
callee = getCallee(inst)
if callee is not None:
callMap[func][inst.address] = callee
if callee not in callMap:
pending.append(callee)
inst = inst.next
def printpath(path):
print >>f, '[Param "%s"(%s), Referenced at %s : %s]' % (stringval, a2h(stringaddr), startFunc, a2h(refaddr)),
for i in range(len(path)):
addr, callee = path[i][:2]
if i == len(path) - 1:
print >>f, '>>', a2h(addr), '->', callee,
else:
calleeCallDigestFunc = path[i + 1][-1]
if calleeCallDigestFunc:
print >>f, '>>', a2h(addr), '>>', callee,
else:
print >>f, '>>', a2h(addr), '->', callee,
print >>f
def dfs(func, path, start=None):
'''path: list of (addr of call, callee, callDigestFunc)'''
if func.name in sinks and len(path):
if func.name in needCheckConstantStr and checkConstantStr(path[-1][0], needCheckConstantStr[func.name]):
return False
if func.name in needCheckFormat and checkSafeFormat(path[-1][0], needCheckFormat[func.name]):
return False
printpath(path)
return True
callDigestFunc = False
vulnerable = False
for addr, callee in sorted(callMap[func].items()):
if start is not None and addr < start:
continue
if not callDigestFunc and callee.name in digest:
if callee.name in needCheckConstantStr and checkConstantStr(addr, needCheckConstantStr[callee.name]):
pass
elif callee.name in needCheckFormat and checkSafeFormat(addr, needCheckFormat[callee.name]):
pass
else:
callDigestFunc = True
if callee in [x[1] for x in path] + [startFunc] or callee in safeFuncs:
continue
vulnerable = dfs(callee, path + [(addr, callee, callDigestFunc)]) or vulnerable
if not vulnerable and func != startFunc:
safeFuncs.add(func)
return vulnerable
startFunc = getFunctionContaining(refaddr)
assert startFunc is not None
pending.append(startFunc)
while len(pending):
search(pending.pop())
vulnerable = dfs(startFunc, [], refaddr)
if vulnerable:
searchStrArg(startFunc)
return vulnerable
def searchParam(target, refstart=None, refend=None):
if DEBUG:
print 'start searching "%s" ...' % target
curAddr = currentProgram.minAddress
end = currentProgram.maxAddress
haveWayToSink = False
checkedRefAddr = set()
while curAddr < end:
curAddr = find(curAddr, target)
if curAddr is None:
break
if getByte(curAddr.add(len(target))) != 0:
curAddr = curAddr.add(1)
continue
for ref in getReferencesTo(curAddr):
if refstart is not None and refstart > ref.fromAddress:
continue
if refend is not None and refend < ref.fromAddress:
continue
if target not in newParam:
referenced.add(target)
caller = getFunctionContaining(ref.fromAddress)
if caller is not None:
if DEBUG:
print 'Reference From', a2h(ref.fromAddress), '(%s)' % caller,
print 'To', a2h(curAddr), '("%s")' % target
if ref.fromAddress in checkedRefAddr:
continue
haveWayToSink = findSinkPath(ref.fromAddress, curAddr, target) or haveWayToSink
checkedRefAddr.add(ref.fromAddress)
else:
for ref2 in getReferencesTo(ref.fromAddress):
caller = getFunctionContaining(ref2.fromAddress)
if caller is None:
if DEBUG:
print 'Ignore', getSymbolAt(ref2.fromAddress), 'at', a2h(ref2.fromAddress)
continue
if DEBUG:
print 'Reference From', a2h(ref2.fromAddress), '(%s)' % caller,
print 'To', a2h(ref.fromAddress), '(%s)' % getSymbolAt(ref.fromAddress),
print 'To', a2h(curAddr), '("%s")' % target
if ref2.fromAddress in checkedRefAddr:
continue
haveWayToSink = findSinkPath(ref2.fromAddress, curAddr, target) or haveWayToSink
checkedRefAddr.add(ref2.fromAddress)
curAddr = curAddr.add(1)
if DEBUG:
print 'finish searching "%s"' % target
return haveWayToSink
if __name__ == '__main__':
args = getScriptArgs()
paramTargets = set(open(args[0]).read().strip().split())
f = None
if len(args) > 1:
f = open(args[1], 'w')
numOfParam = len(paramTargets)
t = time.time()
cnt = 0
for i, param in enumerate(paramTargets):
monitor.setMessage('Searching for "%s": %d of %d' % (param, i + 1, numOfParam))
cnt += searchParam(param)
for i, param in enumerate(newParam):
monitor.setMessage('Searching for "%s": %d of %d' % (param, i + 1, len(newParam)))
for func in newParam[param]:
searchParam(param, func.body.minAddress, func.body.maxAddress)
t = time.time() - t
print 'Time Elapsed:', t
print '%d of %d parameters are referenced' % (len(referenced), numOfParam)
print '%d of %d parameters have way to sink function' % (cnt, numOfParam)
print 'Find %d new params heuristicly:' % len(newParam)
print ', '.join(newParam)
if f is not None:
print >>f, 'Time Elapsed:', t
print >>f, '%d of %d parameters are referenced' % (len(referenced), numOfParam)
print >>f, '%d of %d parameters have way to sink function' % (cnt, numOfParam)
print >>f, 'Find %d new params heuristicly:' % len(newParam)
print >>f, ', '.join(newParam)
f.close()
|
[
"tt.jiaqi@gmail.com"
] |
tt.jiaqi@gmail.com
|
609c7b5dab24d54b1bb8b651774f5ea99e7e3c1a
|
9695983ca3fd5b742255dc7c27c2d97451110dba
|
/miller/models/document.py
|
10f8d50840372ddc2053e52760a8fd1a509b9f93
|
[] |
no_license
|
inmagik/miller
|
b94f5fcf7454616495d4adef20598b333be3c342
|
40217fc6c01ec18de6a264ea096dfddf4ad84c7a
|
refs/heads/master
| 2021-01-22T22:34:21.738286
| 2017-12-12T12:07:46
| 2017-12-12T12:07:46
| 92,777,842
| 0
| 0
| null | 2017-05-29T21:50:06
| 2017-05-29T21:50:06
| null |
UTF-8
|
Python
| false
| false
| 20,349
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil,os,codecs, mimetypes, json, requests, tempfile, logging, PyPDF2, bibtexparser, errno
from actstream import action
from actstream.actions import follow
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.contrib.postgres.search import SearchVectorField
from django.core import files
from django.core.cache import cache
from django.db import models
from django.db.models.signals import pre_delete, post_save, pre_save
from django.dispatch import receiver, Signal
from django.utils.text import slugify
from miller import helpers
from pydash import py_
from wand.image import Image, Color
logger = logging.getLogger('miller.commands')
document_ready = Signal(providing_args=["instance", "created"])
def attachment_file_name(instance, filename):
return os.path.join(instance.type, filename)
def private_attachment_file_name(instance, filename):
return os.path.join(settings.MEDIA_PRIVATE_ROOT, instance.type, filename)
def snapshot_attachment_file_name(instance, filename):
return os.path.join(instance.type, 'snapshots', filename)
class Document(models.Model):
BIBLIOGRAPHIC_REFERENCE = 'bibtex'
CROSSREF_REFERENCE = 'crossref'
VIDEO_COVER = 'video-cover'
PICTURE = 'picture'
IMAGE = 'image'
PHOTO = 'photo'
VIDEO = 'video'
AUDIO = 'audio'
TEXT = 'text'
PDF = 'pdf'
RICH = 'rich'
LINK = 'link'
AV = 'audiovisual'
ENTITY = 'entity'
TYPE_CHOICES = (
(BIBLIOGRAPHIC_REFERENCE, 'bibtex'),
(CROSSREF_REFERENCE, 'bibtex'),
(VIDEO_COVER, 'video interview'),
(VIDEO, 'video'),
(TEXT, 'text'),
(PICTURE, 'picture'),
(PDF, 'pdf'),
(IMAGE, 'image'),
(PHOTO, 'photo'),
(RICH, 'rich'),
(LINK, 'link'),
(AV, 'audiovisual'),
(ENTITY, 'entity: see data type property'), # use the type field inside data JsonField.
) + settings.MILLER_DOCUMENT_TYPE_CHOICES
DEFAULT_OEMBED = {
'provider_name': '',
'provider_url': '',
'type': 'rich',
'title': '',
'description': '',
'html': '',
'details':{}
}
type = models.CharField(max_length=24, choices=TYPE_CHOICES)
short_url = models.CharField(max_length=22, db_index=True, default=helpers.create_short_url, unique=True, blank=True)
title = models.CharField(max_length=500)
slug = models.CharField(max_length=150, unique=True, blank=True, db_index=True)
contents = models.TextField(null=True, blank=True, default=json.dumps(DEFAULT_OEMBED, indent=1)) # OEMBED (JSON) metadata field, in different languages if available.
data = JSONField(default=dict)
copyrights = models.TextField(null=True, blank=True, default='')
url = models.URLField(max_length=500, null=True, blank=True)
owner = models.ForeignKey(User); # at least the first author, the one who owns the file.
attachment = models.FileField(upload_to=attachment_file_name, null=True, blank=True, max_length=200)
snapshot = models.FileField(upload_to=snapshot_attachment_file_name, null=True, blank=True, max_length=200)
mimetype = models.CharField(max_length=127, blank=True, default='')
locked = models.BooleanField(default=False) # prevent accidental override when it is not needed.
# add search field
search_vector = SearchVectorField(null=True, blank=True)
# add last modified date
# undirected
documents = models.ManyToManyField("self", blank=True)
# documents = models.ManyToManyField("self", through='Mention', symmetrical=False, related_name='mentioned_with')
def download(self, outputFormat='iiif'):
"""
write/rewrite metadata file according to outputformat, then add attachment.
Return the zippath, or raise an exception.
"""
import zipfile
zf = os.path.join(settings.ZIP_ROOT, '%s.zip' % self.slug)
if not os.path.exists(settings.ZIP_ROOT):
try:
os.makedirs(settings.ZIP_ROOT)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# write/rewrite data file according to outputformat
# add attachment (if allowed) and data file
with zipfile.ZipFile(zf, 'w') as z:
if self.data.get('downloadable', False) and self.attachment: #getattr(self.attachment, 'path', None) is not None:
z.write(self.attachment.path)
# write zip file
return zf
@property
def dmetadata(self):
if not hasattr(self, '_dmetadata'):
try:
self._dmetadata = json.loads(self.contents)
except Exception as e:
self._dmetadata = {}
logger.exception(e)
return {}
else:
return self._dmetadata
instance._dispatcher = True
else:
return self._dmetadata
class Meta:
ordering = ['-id']
def __unicode__(self):
return '%s (%s)' % (self.slug, self.type)
@staticmethod
def get_search_Q(query):
"""
Return search queryset for this model. No ranking for the moment.
"""
from miller.postgres import RawSearchQuery
search_query = RawSearchQuery(query, config='simple')
logger.debug('search query: %s - parsed: %s' %(
query,
search_query.parsed_query
))
return models.Q(search_vector=search_query)
def update_search_vector(self):
"""
Fill the search_vector using self.data:
e.g. get data['title'] if is a basestring or data['title']['en_US'] according to the values contained into settings.LANGUAGES
Note that a language configuration can be done as well, in this case consider the last value in settings.LANGUAGES (e.g. 'english')
"""
from django.db import connection
fields = (('title', 'A'), ('description', 'B'))
contents = []
for _field, _weight in fields:
default_value = self.data.get(_field, None)
value = u"\n".join(filter(None,[
default_value if isinstance(default_value, basestring) else None
] + list(
set(
py_.get(self.data, '%s.%s' % (_field, lang[2]), None) for lang in settings.LANGUAGES)
)
))
contents.append((value, _weight, 'simple'))
q = ' || '.join(["setweight(to_tsvector('simple', COALESCE(%%s,'')), '%s')" % weight for value, weight, _config in contents])
with connection.cursor() as cursor:
cursor.execute(''.join(["""
UPDATE miller_document SET search_vector = x.weighted_tsv FROM (
SELECT id,""",
q,
"""
AS weighted_tsv
FROM miller_document
WHERE miller_document.id=%s
) AS x
WHERE x.id = miller_document.id
"""]), [value for value, _w, _c in contents] + [self.id])
logger.debug('document {pk:%s, slug:%s} search_vector updated.'%(self.pk, self.slug))
return contents
# this is searchable as SELECT id FROM miller_document WHERE search_vector @@ to_tsquery('simple', 'descript:*')
# store into the whoosh index
def store(self, ix=None):
if ix is None:
ix = helpers.get_whoosh_index()
writer = ix.writer()
_fields = {}
# get title and description in different languages
for k in ['title', 'description', 'details.caption']:
_fields[k] = [ self.data[k] if k in self.data and isinstance(self.data[k], basestring) else '']
for lang in settings.LANGUAGES:
_fields[k].append(py_.get(self.data, '%s.%s' % (k,lang[2]), ''))
_fields[k] = ' '.join(_fields[k]).strip()
# create multilanguage content by squashing stuff
writer.update_document(
title = _fields['title'],
path = u"%s"% self.short_url,
content = u"\n".join(filter(None,[
self.url,
self.data.get('url', None),
self.data.get('provider_name', None),
self.data.get('provider_url', None),
_fields['description'],
_fields['details.caption'],
])),
classname = u"document")
writer.commit()
# download remote pdfs allowing to produce snapshots. This should be followed by save() :)
def fill_from_url(self):
logger.debug('on document {pk:%s}' % self.url)
if self.url:
logger.debug('url: %s for document {pk:%s}' % (self.url, self.pk))
try:
res = requests.get(self.url, timeout=settings.MILLER_URL_REQUEST_TIMEOUT, stream=True)
if res.status_code == requests.codes.ok:
self.mimetype = res.headers['content-type'].split(';')[0].lower()
logger.debug('mimetype found: %s for document {pk:%s}' % (self.mimetype, self.pk))
if self.mimetype == 'application/pdf':
# Create a temporary file
filename = self.url.split('/')[-1]
filename = filename[:80]
lf = tempfile.NamedTemporaryFile()
# Read the streamed image in sections
for block in res.iter_content(1024 * 8):
if not block: # If no more file then stop
break
lf.write(block) # Write image block to temporary file
# complete writing.
lf.flush()
logger.debug('saving attachment: %s for document {pk:%s}' % (filename, self.pk))
outfile = os.path.join(settings.MEDIA_PRIVATE_ROOT, self.type, self.short_url)
try:
os.makedirs(os.path.dirname(outfile))
except OSError:
pass
shutil.copy(lf.name, outfile)
self.attachment = os.path.join(settings.MEDIA_PRIVATE_RELATIVE_PATH, self.type, self.short_url)
self.save()
# clean tempfile
lf.close()
except requests.exceptions.Timeout:
logger.debug('url: %s for document {pk:%s} TIMEOUT...' % (self.url, self.pk))
def fill_from_metadata(self):
if 'error' in self.data: # simply ignore filling from erroneous self.__metadata.
return
if 'bibtex' in self.data:
self.data['details'] = self.data['details'] if 'details' in self.data else {}
try:
self.data['details']['bibtex'] = bibtexparser.loads(self.data['bibtex']).entries[0]
except Exception, e:
logger.exception(e)
return
if not self.title and 'title' in self.data['details']['bibtex']:
self.title = self.data['details']['bibtex']['title']
# complete self.data section with title
if not 'title' in self.data or not self.data['title']:
self.data['title'] = self.title
# complete with rough reference
if not 'reference' in self.data or not self.data['reference']:
self.data['reference'] = self.data['title']
# dep. brew install ghostscript, brew install imagemagick
def create_snapshot(self):
logger.debug('document {pk:%s, mimetype:%s, type:%s} init snapshot' % (self.pk, self.mimetype, self.type))
if not self.attachment or not getattr(self.attachment, 'path', None):
logger.debug('document {pk:%s} snapshot cannot be generated.' % self.pk)
return
if not os.path.exists(self.attachment.path):
logger.debug('document {pk:%s} snapshot cannot be generated, attached file does not exist.' % self.pk)
return
# reconsider mimetype
mimetype, encoding = mimetypes.guess_type(self.attachment.path, strict=True)
if mimetype:
self.mimetype = mimetype
logger.debug('document {pk:%s, mimetype:%s, type:%s} snapshot can be generated' % (self.pk, self.mimetype, self.type))
filename = '%s.snapshot.png' % self.short_url
outfile = os.path.join(settings.MEDIA_ROOT, snapshot_attachment_file_name(self, filename))
# generate dir if there is none
try:
os.makedirs(os.path.dirname(outfile))
except OSError:
logger.debug('document {pk:%s, mimetype:%s, type:%s} creating folder for snapshot' % (self.pk, self.mimetype, self.type))
pass
# generate thumbnail
if self.mimetype.split('/')[0] == 'image' or self.type == Document.IMAGE or self.type == Document.PHOTO:
logger.debug('document {pk:%s, mimetype:%s, type:%s} generating IMAGE thumbnail...' % (self.pk, self.mimetype, self.type))
# generate snapshot
d = helpers.generate_snapshot(filename=self.attachment.path, output=outfile, width=settings.MILLER_SNAPSHOT_WIDTH, height=settings.MILLER_SNAPSHOT_HEIGHT)
if d:
self.data.update(d)
self.snapshot = snapshot_attachment_file_name(self, filename)#outfile# .save(os.path.basename(outfile), files.images.ImageFile(f), save=False)
self._dirty = True
logger.debug('document {pk:%s, mimetype:%s, type:%s} IMAGE thumbnail done.' % (self.pk, self.mimetype, self.type))
# remove tempfile
# print mimetype
elif self.mimetype == 'application/pdf':
logger.debug('document {pk:%s, mimetype:%s, type:%s} generating PDF snapshot...' % (self.pk, self.mimetype, self.type))
pdffile = self.attachment.path
pdf_im = PyPDF2.PdfFileReader(pdffile)
# get page
page = 0
try:
metadata = json.loads(self.contents)
page = int( metadata['thumbnail_page']) if 'thumbnail_page' in metadata else 0
except Exception as e:
logger.exception(e)
try:
# Converting first page into JPG
with Image(filename='%s[%s]'%(pdffile,page), resolution=150) as img:
img.format = 'png'
img.background_color = Color('white') # Set white background.
img.alpha_channel = 'remove'
img.save(filename=outfile)
self.snapshot = snapshot_attachment_file_name(self, filename)#outfile# .save(os.path.basename(outfile), files.images.ImageFile(f), save=False)
self._dirty = True
# with open(self.attachment.path + '.png') as f:
# self.snapshot.save(os.path.basename(self.attachment.path)[:100] + '.png', files.images.ImageFile(f), save=False)
# self._dirty = True
# logger.debug('document {pk:%s, type:%s} PDF snapshot done.' % (self.pk,self.type))
except Exception as e:
logger.exception(e)
print 'could not save snapshot of the required resource', self.pk
else:
logger.debug('snapshot generated for document {pk:%s}, page %s' % (self.pk, page))
def noembed(self):
"""
use noembed MILLER_EMBEDLY_API_KEY to get videos from url
"""
if self.url:
logger.debug('document {pk:%s, url:%s} init embedly' % (self.pk, self.url))
from embedly import Embedly
client = Embedly(settings.MILLER_EMBEDLY_API_KEY)
embed = client.oembed(self.url, raw=True)
self.contents = embed['raw']
# print json.embed
#else:
# logger.warn('document {pk:%s, url:%s} cannot embedly, it is not a recognized provider.' % (self.pk, self.url))
def create_oembed(self):
"""
Create a rich oembed for uploaded document, if needed.
"""
logger.debug('document {pk:%s, mimetype:%s} init oembed' % (self.pk, self.mimetype))
if self.mimetype == 'application/pdf' and self.attachment and hasattr(self.attachment, 'path'):
url = '%s%s' %(settings.MILLER_SETTINGS['host'], self.attachment.url)
self.data['html'] = "<iframe src='https://drive.google.com/viewerng/viewer?url=%s&embedded=true' width='300' height='200' style='border: none;'></iframe>" % url
self.data['type'] = 'rich'
self.type = Document.RICH # yep so that client can use the oembed correctly (rich, video, photo, image).
self._dirty=True
logger.debug('document {pk:%s} oembed done.' % self.pk)
else:
logger.debug('document {pk:%s, mimetype:%s} cannot create oembed.' % (self.pk, self.mimetype))
def save(self, *args, **kwargs):
"""
Override ortodox save method. Check for duplicates on OPTIONAL fields (url in this case)
"""
if not hasattr(self, '_saved'):
self._saved = 1
else:
self._saved = self._saved + 1
logger.debug('document {pk:%s} init save, time=%s' % (self.pk, self._saved))
if not self.pk:
# get the missing fields from metadata bibtex if any.
self.fill_from_metadata()
if self.url:
#print 'verify the url:', self.url
try:
doc = Document.objects.get(url=self.url)
self.pk = doc.pk
self.title = doc.title
self.slug = doc.slug
self.type = doc.type
self.short_url = doc.short_url
self.copyrights = doc.copyrights
self.url = doc.url
self.owner = doc.owner
self.attachment = doc.attachment
self.snapshot = doc.snapshot
self.mimetype = doc.mimetype
# update contents only
if not doc.locked and self.contents != doc.contents:
# print "updating the content", self.contents, doc.contents
super(Document, self).save(force_update=True, update_fields=['contents'])
# print "done, now:", self.contents
else:
# print "do not update the content"
self.contents = doc.contents
except Document.DoesNotExist:
logger.debug('document {pk:%s,url:%s} from url' % (self.pk, self.url[:10]))
super(Document, self).save(*args, **kwargs)
action.send(self.owner, verb='created', target=self)
else:
super(Document, self).save(*args, **kwargs)
action.send(self.owner, verb='created', target=self)
else:
super(Document, self).save(*args, **kwargs)
@receiver(pre_save, sender=Document)
def complete_instance(sender, instance, **kwargs):
logger.debug('document {pk:%s} @pre_save' % instance.pk)
if not instance.slug:
instance.slug = helpers.get_unique_slug(instance, instance.title, max_length=68)
logger.debug('document {pk:%s, slug:%s} @pre_save slug generated' % (instance.pk, instance.slug))
@receiver(post_save, sender=Document)
def dispatcher(sender, instance, created, **kwargs):
"""
Generic post_save handler. Dispatch a document_ready signal.
If receiver need to update the instance, they just need to put the property `_dirty`
"""
if getattr(instance, '_dispatched', None) is None:
instance._dispatched = True
else:
logger.debug('document@post_save {pk:%s} dispatching already dispatched. Skipping.' % instance.pk)
# done already.
return
logger.debug('document@post_save {pk:%s} dispatching @document_ready...' % instance.pk)
document_ready.send(sender=sender, instance=instance, created=created)
if getattr(instance, '_dirty', None) is not None:
logger.debug('document@post_save {pk:%s} dirty instance. Need to call instance.save()..' % instance.pk)
instance.save()
else:
logger.debug('document@post_save {pk:%s} no need to save the instance again.' % instance.pk)
if created:
follow(instance.owner, instance)
from miller.tasks import document_update_search_vectors
document_update_search_vectors.delay(instance.pk)
@receiver(document_ready, sender=Document)
def create_snapshot(sender, instance, created, **kwargs):
if created and instance.attachment and hasattr(instance.attachment, 'path'):
logger.debug('document@document_ready {pk:%s} need to create snapshot' % instance.pk)
instance.create_snapshot()
else:
logger.debug('document@document_ready {pk:%s} NO need to create snapshot.' % instance.pk)
@receiver(document_ready, sender=Document)
def create_oembed(sender, instance, created, **kwargs):
if created:
try:
logger.debug('document@document_ready {pk:%s}: need to create oembed' % instance.pk)
instance.create_oembed()
except Exception as e:
logger.exception(e)
else:
logger.debug('document@document_ready {pk:%s}: NO need to create oembed.' % instance.pk)
@receiver(document_ready, sender=Document)
def clean_related_documents_cache(sender, instance, created, **kwargs):
# list of affected stories
affected = set(list(instance.stories.values_list('short_url', flat=True)) + list(instance.stories.values_list('short_url', flat=True)))
for key in affected:
ckey = 'story.%s' % key
cache.delete(ckey)
logger.debug('document@document_ready {pk:%s}: clean cache of %s related docs.' % (instance.pk, len(affected)))
|
[
"gui.daniele@gmail.com"
] |
gui.daniele@gmail.com
|
754ae18913fa53c3f6491ed3c9f8dd6add8f7112
|
65a0f03a21bb30b271a724a327d2312b230fb8a3
|
/freeze_model.py
|
608d1ab23965c1217c385822b9a5b912b3600154
|
[] |
no_license
|
pod3275/Deep_Knowledge_Tracing
|
86cc759b97b6b3efe0c6f2d95e6e12d2faad70a6
|
acb899c5606fa6d9bd0596108f573ad097971c41
|
refs/heads/master
| 2023-03-10T18:09:29.148918
| 2021-02-26T08:44:32
| 2021-02-26T08:44:32
| 342,514,469
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 27 16:50:02 2021
@author: LSH
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.python.tools import freeze_graph
def generate_freezed_graph():
freeze_graph.freeze_graph('./results/model/model.pb',"", True, './results/model/LSTM-102',
'output_layer/preds', "save/restore_all", "save/Const",
'./results/model/frozen_model.pb', True, "")
if __name__ == "__main__":
generate_freezed_graph()
|
[
"sangheon_lee@tmax.co.kr"
] |
sangheon_lee@tmax.co.kr
|
3365499c58315b1ab7f490a845e64dd33914485c
|
9dedde0a8e77f85106e11b64803853733f5744b5
|
/lib/relay.py
|
03e5b0f2147bef285077c16d16ac61a02e8ab715
|
[] |
no_license
|
mdegrazia/piWarmer
|
46466866c5ebb68234afc085496e1f886a8ba357
|
54b30ad56cf55c1676044eb488128f093ccf9529
|
refs/heads/master
| 2021-01-12T06:09:10.854687
| 2017-12-27T20:49:26
| 2017-12-27T20:49:26
| 77,316,716
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
import RPi.GPIO as GPIO
import subprocess
import time
class relay(object):
"""Class that controls an AC/DC control relay
Attributes:
name: Relay name (IE - Heater, Light, etc.
GPIO_PIN: BCM GPIO PIN on rasperry pi that the AC/D control relay is plugged into
"""
def __init__(self,name="relay",GPIO_PIN=18,type="always_off"):
"""Return a relay object whose name is *name*."""
self.name = name
self.GPIO_PIN = GPIO_PIN
self.type = type
#setup GPIO Pins
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_PIN, GPIO.OUT)
def switchHigh(self):
try:
GPIO.output(self.GPIO_PIN,GPIO.HIGH)
time.sleep(3)
except:
return False
return True
def switchLow(self):
try:
GPIO.output(self.GPIO_PIN,GPIO.LOW)
time.sleep(3)
except:
return False
return True
def status(self):
#return current status of switch, 0 or 1
try:
p = subprocess.Popen(["gpio -g read " + str(self.GPIO_PIN)],shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
message = p.communicate(input)
return message[0].rstrip()
except:
return False
'''
heater = relay()
print heater.status()
heater.switch_high()
heater.switch_low()
'''
|
[
"noreply@github.com"
] |
mdegrazia.noreply@github.com
|
ab5921e4841a4ea9e40abc6143c666860fb6e899
|
e4117e535c8f0d5d93d6bf17382eb25f63930eef
|
/initLBP.py
|
f8b7d6655fd4b5fbcc96e6f1cbfa5b60f2fb7801
|
[] |
no_license
|
UpCoder/MedicalLBP
|
2f1e933f3a2671e59015856155b008f8ef0b3076
|
a1f98f31240932abf0bc84ed33174c2ac79d920c
|
refs/heads/master
| 2021-01-18T18:40:07.717801
| 2017-04-10T03:03:59
| 2017-04-10T03:03:59
| 86,870,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,428
|
py
|
from extractFeature import extractFeature as EF
import numpy as np
import kCrossValidation as KCV
import KCrossValidationPR as KCVPR
def initLBP():
dirPath = 'D:\\MedicalImageAll'
allData,allCountArr = EF(dirPath)
label1_1 = (np.ones((18, 1)))
label1_2 = (np.ones((18, 1)))
label2_1 = (np.ones((10, 1))*2)
label2_2 = (np.ones((12, 1)) * 2)
label3_1 = (np.ones((10, 1))*3)
label3_2 = (np.ones((17, 1)) * 3)
label4_1 = (np.ones((10, 1))*4)
label4_2 = (np.ones((17, 1))*4)
label5_1 = (np.ones((10, 1))*5)
label5_2 = (np.ones((10, 1)) * 5)
label = np.concatenate((label1_1, label2_1, label3_1, label4_1, label5_1,\
label1_2, label2_2, label3_2, label4_2, label5_2))
print 'first line is ', np.shape(allData[0][:])
print allData[0][:]
allLabel = getAllLabel(label,allCountArr)
print 'allCountArr is ',allCountArr
print 'allCountArr len is ',len(allCountArr)
print 'allCountArr is ',np.sum(allCountArr)
# KCV.kCrossValidation(allData,allLabel,label,countArr=allCountArr)
KCVPR.kCrossValidation(allData, allLabel, label, countArr=allCountArr)
def getAllLabel(label,allCountArr):
result = []
for i in range(np.shape(label)[0]):
curNum = allCountArr[i]
for j in range(curNum):
result.append(label[i][0])
result = np.array(result)
print type(result)
return result
initLBP()
|
[
"546043882@qq.com"
] |
546043882@qq.com
|
f7220d766059c398ce9ddce4db0d786c79982507
|
6d569717e6e2b7ff70b810b9210b1ec753477b38
|
/examples/nod.py
|
2616d68573a5381dc443c6d189b9ad8fa29013e9
|
[
"MIT"
] |
permissive
|
unton3ton/In_Rust_We_Trust
|
fc85191e47e2bb41bb2c90b68b22ea266fd727f5
|
43513b4a34b2d7e20950db9a0ac811721db06a1a
|
refs/heads/master
| 2023-08-16T18:25:39.658698
| 2020-06-19T12:34:07
| 2020-06-19T12:34:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
def nod(x,y):
if y != 0:
return nod(y, x % y)
else:
return x
n1 = int(input("Введите n1: "))
n2 = int(input("Введите n2: "))
print("НОД = ", nod(n1,n2))
print("НОK = ", int((n1*n2)/nod(n1,n2)))
|
[
"noreply@github.com"
] |
unton3ton.noreply@github.com
|
39c5ec8e7c2341d971c5187271dee557b5788d86
|
1b84b1cfed163cff380d13f428ce77ccaf22c9b7
|
/generate.py
|
b7465fa87e7cc4daec9fffc0144a0d25ae80d243
|
[] |
no_license
|
Sagiaj/Data-Mining
|
b4cb8fb709c291270effc10f78f814d7c8337ebc
|
6f6a9bf2ec111061ddd803ef01e954448906278d
|
refs/heads/master
| 2021-01-23T02:10:25.883457
| 2017-10-30T21:48:44
| 2017-10-30T21:48:44
| 102,435,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
def gen(dim, numberOfPoints):
import random
return [[random.randint(1,100) for i in range(dim)] for j in range(numberOfPoints)]
|
[
"sagiajaj@gmail.com"
] |
sagiajaj@gmail.com
|
9192d014f6dccd5e8794d066a7d0b63fa47e8c57
|
950b1cc985544b2f8e4aed85865043058ab15f82
|
/caixa/admin.py
|
a9a095f72a4b107ed4ebb9f995648f62afe66efa
|
[] |
no_license
|
rubensrojas/Django-api-internship-test-
|
088e79b64698ce1f4688b7412f6b79775b4b09a0
|
c2e859542b59fd2ab3f7ea5441501ed49d618063
|
refs/heads/master
| 2021-03-13T06:19:01.273962
| 2020-03-25T13:00:03
| 2020-03-25T13:00:03
| 246,647,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
from django.contrib import admin
# local
from .models import Product
from account.models import Account
# Register your models here.
admin.site.register(Product)
admin.site.register(Account)
|
[
"troy.rubens@gmail.com"
] |
troy.rubens@gmail.com
|
f198dc27184c102bb69f4454494fc0748e1a4a64
|
1e1376342cc8256de400857500ad6784a50514a7
|
/opentelemetry-api/src/opentelemetry/configuration/__init__.py
|
d0fc11dc594f99cb51ccf05255b54b71f8f7b71b
|
[
"Apache-2.0"
] |
permissive
|
nirsky/opentelemetry-python
|
7f62abb123a640c4b7e9d773714e1c55976c974f
|
8d09319c43a24b05d14128361de2c9afe8c856b6
|
refs/heads/master
| 2022-07-17T23:24:52.654516
| 2020-05-12T23:37:01
| 2020-05-12T23:37:01
| 260,860,682
| 0
| 0
|
Apache-2.0
| 2020-05-03T08:36:35
| 2020-05-03T08:36:34
| null |
UTF-8
|
Python
| false
| false
| 5,291
|
py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# FIXME find a better way to avoid all those "Expression has type "Any"" errors
# type: ignore
"""
Simple configuration manager
This is a configuration manager for OpenTelemetry. It reads configuration
values from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose
characters are only alphanumeric characters and unserscores, except for the
first character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.
For example, these environment variables will be read:
1. ``OPENTELEMETRY_PYTHON_SOMETHING``
2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``
3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``
4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``
4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``
These won't:
1. ``OPENTELEMETRY_PYTH_SOMETHING``
2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``
3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
The values stored in the environment variables can be found in an instance of
``opentelemetry.configuration.Configuration``. This class can be instantiated
freely because instantiating it returns always the same object.
For example, if the environment variable
``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
``Configuration().meter_provider == "my_meter_provider"`` would be ``True``.
Non defined attributes will always return ``None``. This is intended to make it
easier to use the ``Configuration`` object in actual code, because it won't be
necessary to check for the attribute to be defined first.
Environment variables used by OpenTelemetry
-------------------------------------------
1. OPENTELEMETRY_PYTHON_METER_PROVIDER
2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER
The value of these environment variables should be the name of the entry point
that points to the class that implements either provider. This OpenTelemetry
API package provides one entry point for each, which can be found in the
setup.py file::
entry_points={
...
"opentelemetry_meter_provider": [
"default_meter_provider = "
"opentelemetry.metrics:DefaultMeterProvider"
],
"opentelemetry_tracer_provider": [
"default_tracer_provider = "
"opentelemetry.trace:DefaultTracerProvider"
],
}
To use the meter provider above, then the
``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to
``"default_meter_provider"`` (this is not actually necessary since the
OpenTelemetry API provided providers are the default ones used if no
configuration is found in the environment variables).
This object can be used by any OpenTelemetry component, native or external.
For that reason, the ``Configuration`` object is designed to be immutable.
If a component would change the value of one of the ``Configuration`` object
attributes then another component that relied on that value may break, leading
to bugs that are very hard to debug. To avoid this situation, the preferred
approach for components that need a different value than the one provided by
the ``Configuration`` object is to implement a mechanism that allows the user
to override this value instead of changing it.
"""
from os import environ
from re import fullmatch
class Configuration:
_instance = None
__slots__ = []
def __new__(cls) -> "Configuration":
if Configuration._instance is None:
for key, value in environ.items():
match = fullmatch(
r"OPENTELEMETRY_PYTHON_([A-Za-z_][\w_]*)", key
)
if match is not None:
key = match.group(1)
setattr(Configuration, "_{}".format(key), value)
setattr(
Configuration,
key,
property(
fget=lambda cls, key=key: getattr(
cls, "_{}".format(key)
)
),
)
Configuration.__slots__.append(key)
Configuration.__slots__ = tuple(Configuration.__slots__)
Configuration._instance = object.__new__(cls)
return cls._instance
def __getattr__(self, name):
return None
@classmethod
def _reset(cls):
"""
This method "resets" the global configuration attributes
It is not intended to be used by production code but by testing code
only.
"""
for slot in cls.__slots__:
if slot in cls.__dict__.keys():
delattr(cls, slot)
delattr(cls, "_{}".format(slot))
cls.__slots__ = []
cls._instance = None
|
[
"noreply@github.com"
] |
nirsky.noreply@github.com
|
997d622013234e1c3c077b2ab2c3f4ce830f7c02
|
8c8e7f6038ca689623c3a6330a7231b0f5ac2c56
|
/stats.py
|
45a79bdd0993160b831cc7380e6f988bead89c9f
|
[
"MIT"
] |
permissive
|
numediart/multi-rnn-wordlevel
|
68a420ff680d4768cf963965f15d047d8119736c
|
214e43f6c6f9de3a610389881ffbe4521c9df635
|
refs/heads/main
| 2023-08-14T03:48:19.490074
| 2021-09-24T09:55:04
| 2021-09-24T09:55:04
| 409,272,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,634
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2018 UMONS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import Counter
def occurrences(words, nbr_words=None):
word_counts = Counter()
word_counts.update(words)
top_occurrences = []
total = 0
for x in word_counts.most_common(nbr_words):
percentage = x[1] / len(words) * 100
top_occurrences.append((x[0], percentage))
total += percentage
return total, top_occurrences
def variation(words, data, nbr_words=None):
word_counts = Counter()
word_counts.update(words)
data_counts = Counter()
data_counts.update(data)
for x in data_counts.most_common():
data_counts[x[0]] = x[1] / len(data) * 100
diff = Counter()
for x in word_counts.most_common():
word_counts[x[0]] = x[1] / len(words) * 100
diff[x[0]] = abs(word_counts[x[0]] - data_counts[x[0]])
resolution = 100 / len(words)
variations = []
for word, _ in diff.most_common(nbr_words):
variations.append((word, word_counts[word], data_counts[word]))
return resolution, variations
def recurse_longest_sequence(words, data, rev_data, data_index, window=4, usual_word_occurrence=100):
if len(words) > 0:
value, _, sample_seq, index, orig_seq = longest_original_sequence(words, data, rev_data, data_index, window,
usual_word_occurrence)
if sample_seq != '':
size = len(sample_seq.split(" "))
prev_values, prev_len_sample, prev_len_data = recurse_longest_sequence(words[:index], data, rev_data, data_index, window,
usual_word_occurrence)
post_values, post_len_sample, post_len_data = recurse_longest_sequence(words[index + size:], data, rev_data, data_index, window,
usual_word_occurrence)
return prev_values + [value] + post_values, prev_len_sample + [size] + post_len_sample, \
prev_len_data + [len(orig_seq.split(" "))] + post_len_data
return [], [], []
def longest_original_sequence(words, data, rev_data, data_index, window=4, usual_word_occurrence=100):
# sample and data are array with each entry corresponding to a word in the vocabulary
max_value = 0
max_words_index = 0
max_data_index = 0
max_words_len = 0
max_data_len = 0
rev_words = list(reversed(words))
for i, word in enumerate(words):
indices = data_index[word]
if len(indices) > usual_word_occurrence:
# Discard starting algorithm on usual words
# They will not help us and will improve computing time
continue
for index in indices:
rev_value, rev_words_len, rev_data_len = match_sequences(rev_words[len(rev_words)-i:],
rev_data[len(rev_data)-i:],
window)
value, words_len, data_len = match_sequences(words[i:],
data[index:],
window)
if rev_value + value > max_value:
max_value = rev_value + value
max_words_index = i - rev_words_len
max_data_index = index - rev_data_len
max_words_len = words_len + rev_words_len
max_data_len = data_len + rev_data_len
percent = max_value / len(words) * 100
sample_seq = ' '.join(words[max_words_index:max_words_index + max_words_len])
orig_seq = ' '.join(data[max_data_index:max_data_index + max_data_len])
return max_value, percent, sample_seq, max_words_index, orig_seq
def match_sequences(words, data, window):
# First element in words and data are matching
# Return the size of the longest sequence found with a flexibility window
if len(words) == 0:
return 0, 0, 0
seq = 1
data_pointer = 1
words_pointer = 1
while data_pointer < len(data) and words_pointer < len(words):
real_window = min(len(data) - data_pointer, len(words) - words_pointer, window)
words_window = words[words_pointer:words_pointer + real_window]
data_window = data[data_pointer:data_pointer + real_window]
found, i, j = match_window(words_window, data_window)
if not found:
break
seq += 1
words_pointer += i + 1
data_pointer += j + 1
return seq, words_pointer, data_pointer
def match_window(data_1, data_2):
# Find index of first corresponding element
for i in range(len(data_1)):
for j in range(len(data_2)):
if data_1[i] == data_2[j]:
return True, i, j
return False, -1, -1
def vocab_distribution(words, vocabs):
use = {}
sample_vocabs = {}
for name in vocabs:
use[name] = 0
sample_vocabs[name] = {}
for word in words:
for name in vocabs:
if word in vocabs[name].keys():
use[name] += 1
sample_vocabs[name][word] = 1
for name in vocabs:
use[name] = use[name] / len(words) * 100
return use, sample_vocabs
def merge_common_vocab(vocabs, common_name='Common'):
new_vocabs = {}
for name in vocabs:
new_vocabs[name] = vocabs[name].copy()
if name == common_name:
print("Error : Common name is also a vocab name.")
return None
new_vocabs[common_name] = {}
# Iterate over words of one random vocab
ref = dict(next(iter(vocabs.values())))
for word in ref:
common = True
for name in vocabs:
if word not in vocabs[name]:
common = False
if common:
new_vocabs[common_name][word] = 1
for name in vocabs:
new_vocabs[name].pop(word)
return new_vocabs
def detect_pattern(words, max_size=None):
best_pattern = None
# Don't consider these tokens in patterns detection
escape_words = ["\n", '"', ".", "'", ",", ";"]
for word in escape_words:
words = [x for x in words if x != word]
if max_size is None or max_size > int(len(words) / 2):
max_size = int(len(words) / 2)
# Checking 1 word for patterns is not very interesting
# counter = Counter()
# counter.update(words)
# result = counter.most_common(1)[0]
# best_pattern = ([result[0]], result[1])
for size in range(2, max_size + 1):
counter = {}
grep = (words[i:] for i in range(size))
patterns = list(zip(*grep))
# Avoid overlapping patterns
for i, pattern in enumerate(patterns):
if pattern not in counter:
counter[pattern] = {'count': 1, 'index': i}
elif (pattern in counter) and counter[pattern]['index'] + size <= i:
counter[pattern]['count'] += 1
counter[pattern]['index'] = i
result = None
for pattern in counter:
if result is None or result[1] < counter[pattern]['count']:
result = (pattern, counter[pattern]['count'])
if result[1] > 1 and (
best_pattern is None or result[1] * len(result[0]) > best_pattern[1] * len(best_pattern[0])):
best_pattern = result
if best_pattern is None:
return [], 0
return best_pattern
|
[
"loic.vandenbemden@umons.ac.be"
] |
loic.vandenbemden@umons.ac.be
|
677c6baf6c50f3cb9bbffa21f022ff7a933218cb
|
da1848ea034a426378fec386739c9230065d73cb
|
/NOx_2020/lowess.py
|
4ffee5c96f93f9f54c1c673f31ae60bb8f791470
|
[] |
no_license
|
matt-rowlinson/cvao
|
54ab4b9cf09d407f54e5c1596e6fc6bc71d6334e
|
8a95850885b0059936a671c24c2918c9ca76dda0
|
refs/heads/main
| 2023-05-15T00:27:44.951038
| 2021-06-04T08:16:09
| 2021-06-04T08:16:09
| 349,418,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,372
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import pandas as pd # Pandas handles dataframes
import numpy as np
import scipy
import matplotlib # Numpy handles lots of basic maths operations
import matplotlib.pyplot as plt # Matplotlib for plotting
import seaborn as sns # Seaborn for beautiful plots
import statsmodels
sys.path.append('/users/mjr583/python_lib/')
import CVAO_tools as CV
from CVAO_dict import CVAO_dict as d
import RowPy as rp
variable='NO'
timestep='M'
alpha=.75 ; poly=2
df=pd.read_csv('/mnt/lustre/users/mjr583/NCAS_CVAO/CVAO_datasets/NOx_Jan_2014-Dec_2020_with_flags_and_LOD_ppt.csv', index_col=0)
df=pd.DataFrame(df)
df.index=pd.to_datetime(df.index)
df['NO']=df['NO_pptV']
df['NO2']=df['NO2_pptV']
var='NO'
dff=df[df['%s_Flag' %var] < .200 ]
temp=dff[dff[var] >= 0. ]
temp = pd.DataFrame( temp[var] )
temp.columns = [var]
df=temp[:'2020']
df=df.resample(timestep).mean()
df.index.name = 'Xvalue'
df.columns = ['Yvalue']
df=pd.DataFrame({'Xvalue':np.arange(len(df.Yvalue)), 'Yvalue':df.Yvalue }, index=df.index )
idx=np.isfinite(df.Yvalue)
Yvalue=df.Yvalue[idx]
Xvalue=df.Xvalue[idx]
index=df.index[idx]
df=pd.DataFrame({'Xvalue':np.arange(len(Yvalue)), 'Yvalue':Yvalue }, index=index )
cv=df
monmean=cv.groupby(cv.index.month).mean()
anom = []
for n in range(len(cv.Yvalue)):
nmonth=cv.index[n].month
anom.append( cv.Yvalue[n] - monmean.Yvalue[nmonth] )
df = pd.DataFrame({'Xvalue':np.arange(len(Yvalue)),'Yvalue' : anom}, index=cv.index)
# Scatterplot
plt.scatter(df.index, df["Yvalue"], color="grey", marker="o", s=5)
plt.xlabel("X"), plt.ylabel("Y")
plt.title('(N = 100)')
plt.savefig('plots/scatterplot.png')
plt.close()
# Create linear trend line
sns.regplot("Xvalue", "Yvalue", data=df, color="grey", scatter_kws={"s": 10},
line_kws={"color":"r","alpha":1,"lw":1} ,fit_reg=True)
plt.xlabel("X"), plt.ylabel("Y")
plt.title('Anomaly data - with linear trend line')
plt.savefig('plots/linear_trendline.png')
plt.close()
def loc_eval(x, b):
loc_est = 0
for i in enumerate(b): loc_est+=i[1]*(x**i[0])
return(loc_est)
def loess(xvals, yvals, data, alpha, poly_degree=1):
all_data = sorted(zip(data[xvals].tolist(), data[yvals].tolist()), key=lambda x: x[0])
xvals, yvals = zip(*all_data)
evalDF = pd.DataFrame(columns=['v','g'])
n = len(xvals)
m = n + 1
q = int(np.floor(n * alpha) if alpha <= 1.0 else n)
avg_interval = ((max(xvals)-min(xvals))/len(xvals))
v_lb = min(xvals)-(.5*avg_interval)
v_ub = (max(xvals)+(.5*avg_interval))
v = enumerate(np.linspace(start=v_lb, stop=v_ub, num=m), start=1)
xcols = [np.ones_like(xvals)]
for j in range(1, (poly_degree + 1)):
xcols.append([i ** j for i in xvals])
X = np.vstack(xcols).T
for i in v:
#print(i)
iterpos = i[0]
iterval = i[1]
iterdists = sorted([(j, np.abs(j-iterval)) for j in xvals], key=lambda x: x[1])
_, raw_dists = zip(*iterdists)
scale_fact = raw_dists[q-1]
scaled_dists = [(j[0],(j[1]/scale_fact)) for j in iterdists]
weights = [(j[0],((1-np.abs(j[1]**3))**3 if j[1]<=1 else 0)) for j in scaled_dists]
_, weights = zip(*sorted(weights, key=lambda x: x[0]))
_, raw_dists = zip(*sorted(iterdists, key=lambda x: x[0]))
_, scaled_dists = zip(*sorted(scaled_dists,key=lambda x: x[0]))
W = np.diag(weights)
b = np.linalg.inv(X.T @ W @ X) @ (X.T @ W @ yvals)
local_est = loc_eval(iterval, b)
iterDF2 = pd.DataFrame({
'v' :[iterval],
'g' :[local_est]
})
evalDF = pd.concat([evalDF, iterDF2])
evalDF = evalDF[['v','g']]
return(evalDF)
evalDF = loess("Xvalue", "Yvalue", data = df, alpha=alpha, poly_degree=poly)
print(len(evalDF['v']))
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(df.index, df["Yvalue"], color="grey", marker="o", alpha=0.5,s=5, label="_nolegend_")
ax1.plot(df.index, evalDF['g'][1:], color='red', linewidth= 3, label="Test")
plt.title('LOWESS regression ( alpha = %s, polynomial degree = %s )' %(alpha, poly))
plt.xlabel(None), plt.ylabel("NO anomaly, pptv")
plt.legend()
plt.tight_layout()
plt.savefig('plots/cv_%s%sm_lowess-a%s-p%s.png' %(variable, timestep, alpha, poly))
|
[
"matthew.rowlinson@york.ac.uk"
] |
matthew.rowlinson@york.ac.uk
|
cac1686c7e8af01ee5e5486215adb5c2c9d50c4c
|
025d7484c52b204bc286dfb9d17fc08e8e03604e
|
/hr_applicant_portal/models/shiping.py
|
0a3620e6fdc5223a8e8c4598e0c276483afd8d41
|
[] |
no_license
|
gotorishab/stpi
|
3e2d2393a3b64f313c688bfcb4855052ea5e62b4
|
a548e923f80e124ea5f90f4559ec727193c70528
|
refs/heads/master
| 2021-07-05T16:19:39.932782
| 2021-04-30T03:58:05
| 2021-04-30T03:58:05
| 236,436,956
| 0
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See COPYRIGHT & LICENSE files for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.http import request
class StockPicking(models.Model):
_inherit = "stock.picking"
def _compute_access_url(self):
for move in self:
move.access_url = '/my/shiping/%s' % (move.id)
def _get_report_base_filename(self):
self.ensure_one()
return '%s' % (self.name)
access_url = fields.Char('Portal Access URL', compute='_compute_access_url', help='Stock Order Portal URL')
|
[
"rgupta@dexciss.com"
] |
rgupta@dexciss.com
|
8d94abc692154699bbb4efe0f1971261ee8b2409
|
62033cc33fb92fc5c4e9d6741ad41a82bd17c2ae
|
/portfolio.py
|
012895b1d27b84c6dc0d5821c1a999d5b8e8646b
|
[] |
no_license
|
diarts/portfolio
|
5ea8fe9ef0adcff729694930915496b6f8b03e2f
|
666632ee3d05a8c4e1e099548e55e307ad229f53
|
refs/heads/develop
| 2023-05-10T20:22:34.122127
| 2019-07-29T10:06:35
| 2019-07-29T10:06:35
| 185,640,484
| 0
| 0
| null | 2023-05-01T20:58:13
| 2019-05-08T16:12:03
|
CSS
|
UTF-8
|
Python
| false
| false
| 249
|
py
|
from application import create_app
from database import db
from model import Buttons, Location
app = create_app('development')
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Buttons': Buttons, 'Location': Location}
|
[
"diarts@mail.ru"
] |
diarts@mail.ru
|
868b1bd8d68f7c2eea08fdb19e308f7c8df2d6ed
|
9e06b7e744b83ae05de1bed4a1f4936526dee79c
|
/deepspeech_recognizer.py
|
538837ab45198f54fac248eba0d660b4baf2b656
|
[] |
no_license
|
dimasKaskader/SpeechRecognitionTesting
|
f18a31943fdb4b5844dcdae669c8206bf59eb9af
|
f70a956798b15dd0af897bcd67d716dc9d86acb5
|
refs/heads/master
| 2020-06-02T23:04:01.102324
| 2019-06-11T09:56:43
| 2019-06-11T09:56:43
| 191,337,158
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,631
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import argparse
import numpy as np
import shlex
import subprocess
import sys
import wave
from deepspeech import Model
from timeit import default_timer as timer
try:
from shhlex import quote
except ImportError:
from pipes import quote
# These constants control the beam search decoder
# Beam width used in the CTC decoder when building candidate transcriptions
BEAM_WIDTH = 500
# The alpha hyperparameter of the CTC decoder. Language Model weight
LM_ALPHA = 0.75
# The beta hyperparameter of the CTC decoder. Word insertion bonus.
LM_BETA = 1.85
# These constants are tied to the shape of the graph used (changing them changes
# the geometry of the first layer), so make sure you use the same constants that
# were used during training
# Number of MFCC features to use
N_FEATURES = 26
# Size of the context window used for producing timesteps in the input vector
N_CONTEXT = 9
def convert_samplerate(audio_path):
sox_cmd = 'sox {} --type raw --bits 16 --channels 1 --rate 16000 --encoding signed-integer --endian little --compression 0.0 --no-dither - '.format(quote(audio_path))
try:
output = subprocess.check_output(shlex.split(sox_cmd), stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
raise RuntimeError('SoX returned non-zero status: {}'.format(e.stderr))
except OSError as e:
raise OSError(e.errno, 'SoX not found, use 16kHz files or install it: {}'.format(e.strerror))
return 16000, np.frombuffer(output, np.int16)
class VersionAction(argparse.Action):
def __init__(self, *args, **kwargs):
super(VersionAction, self).__init__(nargs=0, *args, **kwargs)
def __call__(self, *args, **kwargs):
#printVersions()
exit(0)
class DeepSpeech:
def __init__(self, model_path):
self.model = model_path + '/output_graph.pbmm'
self.alphabet = model_path + '/alphabet.txt'
self.lm = model_path + '/lm.binary'
self.trie = model_path + '/trie'
#print('Loading model from file {}'.format(self.model), file=sys.stderr)
#model_load_start = timer()
self.ds = Model(self.model, N_FEATURES, N_CONTEXT, self.alphabet, BEAM_WIDTH)
#model_load_end = timer() - model_load_start
#print('Loaded model in {:.3}s.'.format(model_load_end), file=sys.stderr)
if self.lm and self.trie:
#print('Loading language model from files {} {}'.format(self.lm, self.trie), file=sys.stderr)
#lm_load_start = timer()
self.ds.enableDecoderWithLM(self.alphabet, self.lm, self.trie, LM_ALPHA, LM_BETA)
#lm_load_end = timer() - lm_load_start
#print('Loaded language model in {:.3}s.'.format(lm_load_end), file=sys.stderr)
def recognize(self, wav_file):
'''parser = argparse.ArgumentParser(description='Running DeepSpeech inference.')
parser.add_argument('--model', required=True,
help='Path to the model (protocol buffer binary file)')
parser.add_argument('--alphabet', required=True,
help='Path to the configuration file specifying the alphabet used by the network')
parser.add_argument('--lm', nargs='?',
help='Path to the language model binary file')
parser.add_argument('--trie', nargs='?',
help='Path to the language model trie file created with native_client/generate_trie')
parser.add_argument('--audio', required=True,
help='Path to the audio file to run (WAV format)')
parser.add_argument('--version', action=VersionAction,
help='Print version and exits')
args = parser.parse_args()'''
fin = wave.open(wav_file, 'rb')
fs = fin.getframerate()
if fs != 16000:
#print('Warning: original sample rate ({}) is different than 16kHz. Resampling might produce erratic speech recognition.'.format(fs), file=sys.stderr)
fs, audio = convert_samplerate(wav_file)
else:
audio = np.frombuffer(fin.readframes(fin.getnframes()), np.int16)
#audio_length = fin.getnframes() * (1/16000)
fin.close()
#print('Running inference.', file=sys.stderr)
#inference_start = timer()
return self.ds.stt(audio, fs)
#inference_end = timer() - inference_start
#print('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length), file=sys.stderr)
|
[
"diantigers@gmail.com"
] |
diantigers@gmail.com
|
2cff1c703e744c7315bcc829d41e5720e0a69b84
|
13ad405784e51417fe73bf0d023d362fba7c0cdf
|
/web/polls/migrations/0011_auto_20181028_2328.py
|
dc9dac31a82350a766b28ea55e3c419badc8f6fa
|
[] |
no_license
|
skkulsc/skku
|
f37d7e49cc7039aa814f0da294f430e52aa81b81
|
1195e6fb5f9e05f8f873917eb7b42c997de84b02
|
refs/heads/master
| 2021-04-09T15:18:55.890165
| 2019-03-19T00:48:52
| 2019-03-19T00:48:52
| 125,811,634
| 0
| 0
| null | 2018-04-14T16:12:05
| 2018-03-19T06:29:44
|
Python
|
UTF-8
|
Python
| false
| false
| 345
|
py
|
# Generated by Django 2.1.2 on 2018-10-28 14:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0010_auto_20181028_2232'),
]
operations = [
migrations.AlterModelOptions(
name='usernewstable',
options={'managed': True},
),
]
|
[
"arc3577@gmail.com"
] |
arc3577@gmail.com
|
687156af1feb55adfd093213a26ed0719f652c5d
|
444d5554ee2b1a0ecfb88bb8d037de763088718c
|
/app.py
|
821593aa49e9d1fad9fbddac03b5f0293e3e0280
|
[] |
no_license
|
LeOric32/tripsearch32
|
9542a3bece4c2de8976b7456cfcea0c6b3575bd5
|
e0ae3c8dfc1eb7eef2366f65bc5490a8d725ffde
|
refs/heads/main
| 2023-05-06T16:44:20.796658
| 2021-05-31T03:10:25
| 2021-05-31T03:10:25
| 372,363,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
import os
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from models import Book
@app.route("/")
def hello():
return "Hello World!"
@app.route("/add")
def add_book():
name=request.args.get('name')
author=request.args.get('author')
published=request.args.get('published')
try:
book=Book(
name=name,
author=author,
published=published
)
db.session.add(book)
db.session.commit()
return "Book added. book id={}".format(book.id)
except Exception as e:
return(str(e))
@app.route("/getall")
def get_all():
try:
books=Book.query.all()
return jsonify([e.serialize() for e in books])
except Exception as e:
return(str(e))
@app.route("/get/<id_>")
def get_by_id(id_):
try:
book=Book.query.filter_by(id=id_).first()
return jsonify(book.serialize())
except Exception as e:
return(str(e))
if __name__ == '__main__':
app.run()
|
[
"dmitryzhuk99@gmail.com"
] |
dmitryzhuk99@gmail.com
|
098fa385ab05531a49d9ff3730c6664c729553f4
|
c5857b50862bf56397739e7c2f524bb7b233e929
|
/homeassistant/components/device_tracker/xiaomi_miio.py
|
615688923888eaf4d38fb73814b4d3ad043c637d
|
[
"Apache-2.0"
] |
permissive
|
nickw444/home-assistant
|
6668beaf1b1cc0dbcf0d3246b5bb57766cd66a2c
|
8d48164f25f3b7f272ee486ecdeb6e1e8e4c6174
|
refs/heads/dev
| 2023-08-23T11:26:32.812468
| 2018-04-11T01:38:23
| 2018-04-11T01:38:23
| 129,087,456
| 0
| 1
|
NOASSERTION
| 2021-08-31T22:42:32
| 2018-04-11T12:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,434
|
py
|
"""
Support for Xiaomi Mi WiFi Repeater 2.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/device_tracker.xiaomi_miio/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (DOMAIN, PLATFORM_SCHEMA,
DeviceScanner)
from homeassistant.const import (CONF_HOST, CONF_TOKEN)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
})
REQUIREMENTS = ['python-miio==0.3.9']
def get_scanner(hass, config):
"""Return a Xiaomi MiIO device scanner."""
from miio import WifiRepeater, DeviceException
scanner = None
host = config[DOMAIN].get(CONF_HOST)
token = config[DOMAIN].get(CONF_TOKEN)
_LOGGER.info(
"Initializing with host %s (token %s...)", host, token[:5])
try:
device = WifiRepeater(host, token)
device_info = device.info()
_LOGGER.info("%s %s %s detected",
device_info.model,
device_info.firmware_version,
device_info.hardware_version)
scanner = XiaomiMiioDeviceScanner(hass, device)
except DeviceException as ex:
_LOGGER.error("Device unavailable or token incorrect: %s", ex)
return scanner
class XiaomiMiioDeviceScanner(DeviceScanner):
"""This class queries a Xiaomi Mi WiFi Repeater."""
def __init__(self, hass, device):
"""Initialize the scanner."""
self.device = device
async def async_scan_devices(self):
"""Scan for devices and return a list containing found device ids."""
from miio import DeviceException
devices = []
try:
station_info = await self.hass.async_add_job(self.device.status)
_LOGGER.debug("Got new station info: %s", station_info)
for device in station_info['mat']:
devices.append(device['mac'])
except DeviceException as ex:
_LOGGER.error("Got exception while fetching the state: %s", ex)
return devices
async def async_get_device_name(self, device):
"""The repeater doesn't provide the name of the associated device."""
return None
|
[
"tpr@iki.fi"
] |
tpr@iki.fi
|
225efff7388866e6b7c5dfe7f5e55e2a5358b8ab
|
81dc2411ea6e9b59e80aad01a30e11ab8d581fff
|
/python-problems/problem9.py
|
2cef7a55f80b4839e0325b78e413221a857ad475
|
[] |
no_license
|
arnav13081994/python-deepdive
|
fb9445e3b5536af34093e993772fa0569df520a1
|
7b906291d044f4ca7387de6e1833a61c9583c966
|
refs/heads/master
| 2023-07-07T14:05:08.688553
| 2021-08-16T02:45:33
| 2021-08-16T02:45:33
| 254,629,266
| 1
| 0
| null | 2020-04-10T12:29:16
| 2020-04-10T12:29:15
| null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
"""
Given a list of numbers and a target sum, return all quadruples that add up to that sum
"""
def return_unique_members(lst):
""" Given an iterable of tuples, return only unique tuples.
Uniqueness is defined if no 2 tuples have all the same elements. Order doesn't matter"""
ans = []
set_unique = set()
for i in lst:
if not set(i).issubset(set_unique):
# Add all elements of that tuple to ans
set_unique.update(i)
ans.append(i)
return ans
def twosum(lst, target_sum):
""" Given a list of numbers and a target sum, return all doubles that add up to that sum"""
ans = [
[num, target_sum - num]
for num in lst
if (target_sum - num) in lst and num != (target_sum - num)
]
return ans
def threesum(lst, target_sum):
"""Given a list of numbers and a target sum, return all triples that add up to that sum"""
ans = [
i + [num]
for num in lst
for i in twosum(lst, target_sum - num)
]
return ans
def foursum(lst, target_sum):
"""Given a list of numbers and a target sum, return all quadruples that add up to that sum"""
ans = [
i + [num]
for num in lst
for i in threesum(lst, target_sum - num)
]
ans = return_unique_members(ans) # Remove duplicate iterables in the list
ans = [tuple(lst) for lst in ans] # Convert the iterables in the list to tuples
return ans
|
[
"arnav13@gmail.com"
] |
arnav13@gmail.com
|
e7055c9eb9ec3adfdd75c281b6197d9323c37786
|
5fe72bb13baf3649058ebe11aa86ad4fc56c69ed
|
/hard-gists/badd18a84b236c9e6c34057512cb569a/snippet.py
|
5c0a7a521adb4a2054174cb1ea11fc02afb8bbc5
|
[
"Apache-2.0"
] |
permissive
|
dockerizeme/dockerizeme
|
8825fed45ff0ce8fb1dbe34959237e8048900a29
|
408f3fa3d36542d8fc1236ba1cac804de6f14b0c
|
refs/heads/master
| 2022-12-10T09:30:51.029846
| 2020-09-02T13:34:49
| 2020-09-02T13:34:49
| 144,501,661
| 24
| 20
|
Apache-2.0
| 2022-11-21T12:34:29
| 2018-08-12T21:21:04
|
Python
|
UTF-8
|
Python
| false
| false
| 13,006
|
py
|
#######################################################
#### NOW AT A GITHUB REPO ####
#### https://github.com/tusing/unicorn_phat ####
#######################################################
#!/usr/bin/env python
# Display a list of user-defined color bars;
# fill the remaining area with sparkles.
# Designed for the Unicorn pHAT.
# By tusing.
import unicornhat as unicorn
from random import randint
import time, math, colorsys
import os, sys, subprocess, threading
# Initialization
unicorn.set_layout(unicorn.AUTO)
unicorn.rotation(0)
unicorn.brightness(0.35) # Tune to your preferences.
width,height=unicorn.get_shape()
# Line number for where each function will begin
function_pos = {}
# Store values for multithreaded fetching functions.
function_values = {}
def main(display_function, bar_functions, time_limit=None):
""" The main display function. Uses function_pos to assign parts of the display to
bar functions and display functions.
Args:
display_function (func): A function intended to take up the majority of the HAT's
display. Should limit display area with the use of function_pos.
bar_functions (func): A list of single-row "bars". Again, assign position with the
use of function_pos.
time_limit (int): How long to wait before quitting (in seconds).
"""
if bar_functions is not None:
for index, bar_function in enumerate(bar_functions):
function_pos[bar_function] = width - index - 1
if display_function is not None:
function_pos[display_function] = width - len(bar_functions) - 1
else:
function_pos[display_function] = width - 1
threads = [threading.Thread(target=function) for function in function_pos.keys()]
for thread in threads:
thread.start()
if time_limit is not None:
time.sleep(time_limit)
print("Time limit reached!")
os._exit(3)
######################################################################
####################### ##########################
####################### BAR FUNCTIONS ##########################
####################### ##########################
######################################################################
####################### INTERNET BAR ##########################
def internet_color(update_rate=5):
""" Color bar - tests internet connectivity. Displays white if connected;
orange if not.
Args:
update_rate (float): seconds to wait before checking connectivity again
"""
# Ping a Google DNS server to check for internet connectivity.
while True:
ping_response = subprocess.Popen(["/bin/ping", "-c1", "-w100", "8.8.8.8"], stdout=subprocess.PIPE).stdout.read()
if "1 received" in str(ping_response):
moving_pixel(function_pos[internet_color], (0, 255, 255), (255, 255, 255))
else:
moving_pixel(function_pos[internet_color], (255, 255, 255), (255, 127, 80))
unicorn.show()
time.sleep(update_rate)
def color_bar(position, color):
""" Display a single, static bar of ```color``` in ```position```.
Args:
position (int): the width index at which to display the bar
color (int tuple): (R, G, B) tuple of the RGB color to be displayed
"""
for height_index in range(height):
unicorn.set_pixel(position, height_index, *color)
return
def moving_pixel(position, color, background, speed=0.1, direction="right"):
""" Display a right-moving pixel of color ```color``` on a color bar with
color ```background``` in position ```position.```
Args:
position (int): The width index at which to display the bar animation
color (int tuple): (R, G, B) tuple of the moving pixel's color
background (int tuple): (R, G, B) tuple of the background color
speed (float): how often to wait between pixel movements
direction (string, "left" or "right"): the direction the pixel
should move, with "right" being towards the USB ports
"""
for height_index in range(height):
color_bar(position, background)
if direction == "right":
unicorn.set_pixel(position, height_index, *color)
if direction == "left":
unicorn.set_pixel(position, height - height_index - 1, *color)
unicorn.show()
time.sleep(speed)
color_bar(position, background)
unicorn.show()
######################################################################
####################### ##########################
####################### FETCHER FUNCTIONS ##########################
####################### ##########################
######################################################################
def load_fetcher(update_rate=5):
""" Get the load of the system and modify the relevant dictionary
with the new load value.
Args:
update_rate (float): seconds to wait before fetching load value
"""
while True:
function_values[load_fetcher] = os.getloadavg()[0]
time.sleep(update_rate)
def random_color():
""" Generate a random RGB color.
Returns:
int tuple: (R, G, B) values
"""
r, g, b = randint(0, 255), randint(0, 255), randint(0, 255)
return (r, g, b)
######################################################################
####################### ##########################
####################### DISPLAY FUNCTIONS ##########################
####################### ##########################
######################################################################
####################### LOAD SPARKLES ##########################
def load_sparkles(color_function=None, update_rate=5):
""" Fill the rest of the area with randomly-positioned sparkles.
Frequency of sparkling increases with load^2 (for load>1).
Args:
color_function (func): Define a custom function for the
sparkles' color, instead of a random rainbow.
update_rate (float): How often to refresh system load value (seconds).
"""
color_function = random_color if color_function is None else color_function
def random_pixel(color_function):
""" Generate a randomly positioned pixel with the color returned
by color_function.
Args:
color_function (func): Should return a (R,G,B) color value.
"""
color = color_function()
def random_position():
""" Get the position of a random pixel bound by
function_pos. """
x = randint(0, function_pos[load_sparkles])
y = randint(0, (height-1))
return (x,y)
selected_pixel = random_position()
''' Aesthetic: If the randomly generated pixel is currently lit,
turn it off and try with a new pixel. Also works as sort of a
population control on how many pixels will be lit. '''
while sum(unicorn.get_pixel(*selected_pixel)) > 0:
unicorn.set_pixel(*(selected_pixel + (0, 0, 0)))
selected_pixel = random_position()
unicorn.set_pixel(*(selected_pixel + color))
return
''' Sparkle with a frequency based off of the computer's current
load. Fetch load value every update_rate seconds.'''
function_values[load_fetcher] = 1
threading.Thread(target=load_fetcher).start()
while True:
tick = 1
if function_values[load_fetcher] > 1:
tick = 1/(function_values[load_fetcher]**2) if function_values[load_fetcher] < 12 else 1/144
for i in range(int(update_rate/tick)):
random_pixel(color_function)
unicorn.show()
time.sleep(tick)
####################### LOAD RAINBOW ##########################
def load_rainbow(update_rate=5):
""" A lightly modified version of Pimeroni's "rainbow" example.
Displays a moving rainbow of colors that increases with load.
Args:
update_rate (float): How often to update the load value (seconds).
"""
i = 0.0
offset = 30
function_values[load_fetcher] = 1
threading.Thread(target=load_fetcher).start()
while True:
load_function = function_values[load_fetcher]/10 if function_values[load_fetcher] <= 10 else 10
for w in range(int(update_rate/0.01)):
i = i + load_function
for y in range(height):
for x in range(function_pos[load_rainbow] + 1):
r = 0#x * 32
g = 0#y * 32
xy = x + y / 4
r = (math.cos((x+i)/2.0) + math.cos((y+i)/2.0)) * 64.0 + 128.0
g = (math.sin((x+i)/1.5) + math.sin((y+i)/2.0)) * 64.0 + 128.0
b = (math.sin((x+i)/2.0) + math.cos((y+i)/1.5)) * 64.0 + 128.0
r = max(0, min(255, r + offset))
g = max(0, min(255, g + offset))
b = max(0, min(255, b + offset))
unicorn.set_pixel(x,y,int(r),int(g),int(b))
unicorn.show()
time.sleep(0.01)
####################### LOAD MATRIX ##########################
def load_matrix(update_rate=5):
""" A heavily modified version of Pimeroni's "cross" example.
Speed increases with n*load^2.
Args:
update_rate (float): seconds to wait before updating load value
"""
points = []
edge_pixels = []
class LightPoint:
def __init__(self):
self.direction = randint(1, 4)
if self.direction == 1:
self.x = randint(0, function_pos[load_matrix])
self.y = 0
elif self.direction == 2:
self.x = 0
self.y = randint(0, height - 1)
elif self.direction == 3:
self.x = randint(0, function_pos[load_matrix])
self.y = height - 1
else:
self.x = function_pos[load_matrix] - 1
self.y = randint(0, height - 1)
self.colour = []
for i in range(0, 3):
self.colour.append(randint(100, 255))
self.oldxy = (self.x, self.y)
def update_positions():
for point in points:
# Any point already at an edge has been in display at this
# edge for ```tick``` seconds already, so we delete it.
check_edges(point)
point.oldxy = (point.x, point.y)
if point.direction == 1:
point.y += 1
elif point.direction == 2:
point.x += 1
elif point.direction == 3:
point.y -= 1
else:
point.x -= 1
# Delete points that would cause a boundary violation after
# the above coordinate update.
check_boundaries(point)
unicorn.show()
def plot_points():
for point in points:
unicorn.set_pixel(point.x, point.y, point.colour[0], point.colour[1], point.colour[2])
unicorn.set_pixel(*(point.oldxy + (0, 0, 0)))
unicorn.show()
def check_edges(point):
""" Deletes points that have reached an edge.
Args:
point (LightPoint): The point that has reached an edge.
"""
if (point.x == function_pos[load_matrix] and point.direction is 2) \
or (point.x == 0 and point.direction is 4) \
or (point.y == 0 and point.direction is 3) \
or (point.y == height - 1 and point.direction is 1):
unicorn.set_pixel(point.x, point.y, 0, 0, 0)
points.remove(point)
def check_boundaries(point):
""" Deletes points beyond allowed boundaries.
Args:
point (LightPoint): The point to check for boundary violations.
"""
if (point.x > function_pos[load_matrix] and point.direction is 2) \
or (point.x < 0 and point.direction is 4) \
or (point.y < 0 and point.direction is 3) \
or (point.y > height - 1 and point.direction is 1):
if point in points:
points.remove(point)
function_values[load_fetcher] = 1
threading.Thread(target=load_fetcher).start()
tick_func = lambda load: 0.5/(load**2) if load > 1 else 1/4
max_points = function_pos[load_matrix]*height/3
while True:
tick = tick_func(function_values[load_fetcher]) if function_values[load_fetcher] < 12 else tick_func(12)
for w in range(int(update_rate/tick)):
if len(points) < max_points and randint(0, 5) > 1:
points.append(LightPoint())
update_positions()
plot_points()
time.sleep(tick)
if __name__ == "__main__":
main(load_matrix, (internet_color,))
|
[
"42325807+dockerizeme@users.noreply.github.com"
] |
42325807+dockerizeme@users.noreply.github.com
|
2301af008976fde42176705242bac2e43f6e3771
|
6f73053e9c5a8bb40f34dd12a9a000ad3ca84f1f
|
/exchange/libs/event_emitter.py
|
cd93d0424e6ee51ed63efd71d65c43c385e0d644
|
[] |
no_license
|
Bizilizi/python-crypto-exchange
|
9610857580b5c0239a9cbd778246c65e693a8d31
|
9eef1641e8cc04a7b3922447b05c04e37268187b
|
refs/heads/master
| 2022-12-01T14:28:28.819215
| 2020-08-14T06:26:37
| 2020-08-14T06:26:37
| 287,306,992
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,733
|
py
|
from __future__ import annotations
import abc
import asyncio
import inspect
import typing as t
from .flow import in_memory_flow
if t.TYPE_CHECKING:
from .flow import Flow, Fork
class Comparable(t.Protocol):
def __eq__(self, other: t.Any) -> bool:
...
EType = t.TypeVar("EType", bound=Comparable)
Event = t.Tuple[EType, t.Dict[str, t.Any]]
class Dispatcher(t.Generic[EType]):
_event_fork: Fork[Event[EType]]
_trigger: EType
_is_async_callback: bool
def __init__(
self,
event_stream: "Flow[Event[EType]]",
trigger_event: EType,
callback: t.Callable[..., t.Any],
) -> None:
self._event_fork = event_stream.fork()
self._callback = callback
self._trigger = trigger_event
self._is_async_callback = inspect.iscoroutinefunction(callback)
@property
async def events(self) -> t.AsyncGenerator[Event[EType], None]:
async for event, kwargs in self._event_fork:
if event == self._trigger:
yield event, kwargs
def dispatch_forever(self) -> asyncio.Task[t.Any]:
async def handle() -> None:
async for event, kwargs in self.events:
if inspect.iscoroutinefunction(self._callback):
if self._is_async_callback:
await self._callback(**kwargs)
else:
self._callback(**kwargs)
task = asyncio.create_task(handle())
return task
def stop(self) -> None:
self._event_fork.stop()
class Subscriptable(t.Generic[EType]):
@abc.abstractmethod
async def events(self) -> t.AsyncGenerator[Event[EType], None]:
yield # type: ignore
@abc.abstractmethod
def subscribe(
self, event: EType, handler: t.Callable[..., t.Any]
) -> Dispatcher[EType]:
pass
class EventEmitter(Subscriptable[EType]):
_event_stream: "Flow[Event[EType]]"
def __init__(self) -> None:
self._event_stream = in_memory_flow.InMemoryFlow[Event[EType]]()
@property
async def events(self) -> t.AsyncGenerator[Event[EType], None]:
async for event, kwargs in self._event_stream:
yield event, kwargs
async def filter(self, trigger: EType) -> t.AsyncGenerator[Event[EType], None]:
async for event, kwargs in self._event_stream:
if event == trigger:
yield event, kwargs
def subscribe(
self, event: EType, handler: t.Callable[..., t.Any]
) -> Dispatcher[EType]:
return Dispatcher(self._event_stream, event, handler)
async def emit(self, event: EType, **kwargs: t.Any) -> None:
await self._event_stream.send((event, kwargs))
|
[
"ewriji@airlab.team"
] |
ewriji@airlab.team
|
12ad1a02b2450bb3663044ef6246c0d0947bec5f
|
0ed7fa8dda9d692f8c85bf6c93f4e63124fcf90b
|
/speg/position.py
|
f9222d738c138b8367b90d168584ff937f13f4ff
|
[
"MIT-0"
] |
permissive
|
avakar/speg
|
95866e580a5627ac4c710dbbbf3eaeabda317192
|
877acddfd5ac5ae8b4a4592d045e74e108477643
|
refs/heads/master
| 2023-03-17T12:01:34.766183
| 2018-08-19T20:16:41
| 2018-08-19T20:16:41
| 46,519,449
| 2
| 2
| null | 2018-06-20T18:44:10
| 2015-11-19T20:48:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
from functools import total_ordering
@total_ordering
class Position(object):
def __init__(self, offset=0, line=1, col=1):
self.offset = offset
self.line = line
self.col = col
def advanced_by(self, text):
text_len = len(text)
offset = self.offset + text_len
nl_pos = text.rfind('\n')
if nl_pos < 0:
line = self.line
col = self.col + text_len
else:
line = self.line + text[:nl_pos].count('\n') + 1
col = text_len - nl_pos
return Position(offset, line, col)
def __eq__(self, other):
if not isinstance(other, Position):
return NotImplemented
return self.offset == other.offset
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.offset)
def __lt__(self, other):
if not isinstance(other, Position):
return NotImplemented
return self.offset < other.offset
def __repr__(self):
return '{}({!r}, {!r}, {!r})'.format(Position.__name__,
self.offset, self.line, self.col)
def get_line_at_position(text, pos):
suffix = text[pos.offset - pos.col + 1:]
stop = suffix.find('\n')
if stop == -1:
return suffix, pos.col - 1
else:
return suffix[:stop], pos.col - 1
|
[
"vejnar.martin@gmail.com"
] |
vejnar.martin@gmail.com
|
0d91ac265f2efc25902758f309817b33515a272f
|
ddd466457316662a1455bae429740eb3c8411444
|
/python_bbq/Project_Modules/my_modules/vsearch.py
|
967205eb40ea9f80675e763743fca62f37bad6f8
|
[] |
no_license
|
fingerman/python_fundamentals
|
9ef46e51d6e9b8328e9c949fa0f807f30bd6e482
|
1fb604220922530d1171200a3cf3a927c028a6ed
|
refs/heads/master
| 2023-01-09T12:02:26.712810
| 2020-01-22T16:12:32
| 2020-01-22T16:12:32
| 151,728,846
| 0
| 0
| null | 2022-12-27T15:34:12
| 2018-10-05T13:58:10
|
Python
|
UTF-8
|
Python
| false
| false
| 318
|
py
|
#################
def search4vowels(phrase: str) ->set:
"""Return any vowels in phrase"""
vowels = set('aeiouAEIOU')
return vowels.intersection(set(phrase))
def search4letters(phrase: str, letters: str) ->set:
"""Return letters found in string"""
return set(letters).intersection(set(phrase))
|
[
"adamov.george@gmail.com"
] |
adamov.george@gmail.com
|
822df2d0e10c6ae5a245ed3b8a4fbb0b6b242884
|
71fec15a7b758c05f6749081d636a4a70ec159dd
|
/server.py
|
a5d443dd66c147ed8c3126f6ce957f3bd96207b2
|
[] |
no_license
|
clauxx/terminal_chat
|
e5b9f128d3e44f76c90fd6a00fb6f54ea533f32f
|
820a1eb9af75879237236eeb1efd37442918ac5d
|
refs/heads/master
| 2021-01-13T05:17:47.207350
| 2017-02-15T15:21:34
| 2017-02-15T15:21:34
| 81,321,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
import re
import socket
import time
class Server:
def __init__(self, port=8888):
self.port = port
self.users = {}
self.socket = None
def init_socket(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
host = self.socket.getsockname()[0]
self.socket.bind((host, self.port))
self.socket.setblocking(0)
print("Socket initialized on port %s" % self.port)
def close_socket(self):
self.socket.close()
def receive(self):
return self.socket.recvfrom(1024)
def parse(self, data):
return re.split(':+', data.decode())
def set_db(self, parsed, addr):
#parsed_data[0] = name of source
#parsed_data[1] = name of recipient
#parsed_data[2] = message
msg_source = parsed[0]
if(msg_source not in self.users.keys()):
self.users[msg_source] = addr
if(self.users[msg_source] != addr):
self.users[msg_source] = addr
def display(self, parsed, addr):
print(parsed)
print(addr)
print('***')
print(self.users)
print('***')
def check_quit(self):
return "!q" == parsed[2]
def send(self, parsed, data):
self.socket.sendto(data, self.users[parsed[1]])
print(data, self.users[parsed[1]])
def start(self):
stop = False
while not stop:
try:
data, addr = self.receive()
parsed = self.parse(data)
self.set_db(parsed, addr)
self.display(parsed, addr)
self.send(parsed, data)
stop = check_quit(parsed)
except:
pass
self.close_socket()
myServ = Server(8888)
myServ.init_socket()
myServ.start()
|
[
"cristian.lungu@hsrw.org"
] |
cristian.lungu@hsrw.org
|
fbf3880115b9c73fd35049960a201e9665ec6096
|
34ea39dbc91e339a61b9b301a127e34f29e559cd
|
/선형 자료구조/배열/Problem10_array-partion-i.py
|
6ccd4582a060bef9b32aa3c704ecafec91d3aa27
|
[] |
no_license
|
miniii222/PythonAlgorithm-Interview
|
402ed56df7e6c090451d49d3629f4927073d40d5
|
a6aee1a7d00e2d428b7a8d8d2e83d492de269359
|
refs/heads/master
| 2022-11-29T20:08:55.767986
| 2020-08-17T08:33:15
| 2020-08-17T08:33:15
| 284,401,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#https://leetcode.com/problems/array-partition-i/
#출처 : https://github.com/onlybooks/algorithm-interview
#Solution : Pythonic Code
def arrayPairSum(self, nums: list[int]) -> int:
return sum(sorted(nums)[::2])
|
[
"noreply@github.com"
] |
miniii222.noreply@github.com
|
27b818820f40ca578641b9ba891ac3ab278efebf
|
22af28d4b78599b6d105aec6b6e1c4fde3eadac7
|
/demo/settings.py
|
b702359d6a09562976d93813bda320879db1c5d5
|
[] |
no_license
|
JohnnyFang/celery_django
|
fe1a11885fab2b5cd4fcadf8be873d48268ce9b7
|
1bb79ef87c1bc3d6611c5372b3aa5f14fb5ecec6
|
refs/heads/master
| 2021-08-08T05:21:05.048664
| 2017-11-09T16:45:29
| 2017-11-09T16:45:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,628
|
py
|
"""
Django settings for demo project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yfo7kd!uvn3uz1mg9siizej@0hrc=4v1&cs2l_z6rg8i5=gaqb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_celery_beat',
'django_celery_results',
'example_app_celery',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('POSTGRES_DB', ''),
'USER': os.environ.get('POSTGRES_USER', ''),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', ''),
'HOST': 'postgres',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
# CELERY SETTINGS
CELERY_BROKER_URL = 'redis://redis:6379'
CELERY_RESULT_BACKEND = 'django-db'
#: Only add pickle to this list if your broker is secured
#: from unwanted access (see userguide/security.html)
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
|
[
"formatcomvinicio@gmail.com"
] |
formatcomvinicio@gmail.com
|
1a379c9bd7da175c9e0947adbcc3615b16118880
|
8a5ab31bc017747607847b36a409cb0e8b74fdbb
|
/downloadImage.py
|
c3e31e6ef48b97d74cfb245022b733f6d13e8e9f
|
[] |
no_license
|
talentneptuner/weibo_tr_clawer
|
7b341ae243749810a4148169f3ee39b995c03101
|
ebe1d6065791793a295bfd2315406983ff6529d4
|
refs/heads/master
| 2021-07-07T09:44:51.753047
| 2020-10-28T13:00:05
| 2020-10-28T13:00:05
| 193,014,701
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
import pymysql
import urllib
import time
if __name__ == '__main__':
conn = pymysql.connect(host='192.168.22.241', port=3306, user='my_admin', passwd='Alex19900807.',
db='yibin_earthquake',
charset='utf8mb4') # 基本的本机MYSQL配置
sql = 'select id,mid,img_list from weibo where img_list != ""'
cursor_s = conn.cursor()
cursor_s.execute(sql)
data = cursor_s.fetchall()
all_img = []
save_sus = []
for item in data:
img_list = set(item[2].split('|'))
for index,img in enumerate(img_list):
all_img.append(dict(id=item[0], name=str(item[1])+'-'+str(index), img_url=img, mid=item[1]))
print(len(all_img))
for item in all_img:
print(item)
if item['img_url'].startswith("http://") or item['img_url'].startswith("https://"):
src = item['img_url']
else:
src = 'https://{}'.format(item['img_url'].lstrip('//'))
try:
src = src.replace('thumb150', 'mw690')
img_local = r'C:\D\Files\data\weiboImageh\{}.jpg'.format(item['name'])
urllib.request.urlretrieve(src, img_local)
sql = 'replace into image VALUES (%s, %s) '
cursor_s.execute(sql,(item['mid'],item['name']))
conn.commit()
except:
try:
time.sleep(2)
img_local = r'C:\D\Files\data\weiboImageh\{}.jpg'.format(item['name'])
urllib.request.urlretrieve(src, img_local)
sql = 'replace into weibo_img VALUES (%s, %s) '
cursor_s.execute(sql, (item['mid'], item['name']))
conn.commit()
except:
print(item['img_url'])
print(src)
print('下载失败')
|
[
"talentneptuner@163.com"
] |
talentneptuner@163.com
|
db7831fb24e67bf4245437ef891c3154a6b6f4ce
|
856728b53544deb7b7f6c10e1570b601c1a2ba46
|
/pages/views.py
|
95f58c59fbf469dd59bcd22773f4c069a97353bb
|
[] |
no_license
|
mangakid/django-real-estate-tutorial
|
ccfd8b173276f39d7c7f32d96ee4372bb4fdc707
|
e2cbc1cc08dc974a09cf8d9ba73f68806a9a7300
|
refs/heads/master
| 2020-04-20T22:21:26.839603
| 2019-02-04T19:42:07
| 2019-02-04T19:42:07
| 169,137,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 877
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from listings.choices import price_choices, bedroom_choices, state_choices
from listings.models import Listing
from realtors.models import Realtor
def index(request):
listings = Listing.objects.order_by('-list_date').filter(is_published=True)[:3]
context = {
'listings': listings,
'state_choices': state_choices,
'bedroom_choices': bedroom_choices,
'price_choices': price_choices
}
return render(request, 'pages/index.html', context)
def about(request):
# Get all realtors
realtors = Realtor.objects.order_by('-hire_date')
# Get MVP
mvp_realtors = Realtor.objects.all().filter(is_mvp=True)
context = {
'realtors': realtors,
'mvp_realtors': mvp_realtors
}
return render(request, 'pages/about.html', context)
|
[
"8886696+mangakid@users.noreply.github.com"
] |
8886696+mangakid@users.noreply.github.com
|
4e43a1bf78ffedfac56d93899a6ca1fc7ca7e535
|
59db7f70000486c72e9fb6f6e67102f64e028022
|
/replace_to_make_regular_bracket.py
|
d95382c41cbb4cb72ab8f3b0e3c7eb855321c2ba
|
[] |
no_license
|
PritiGiramkar/Code-Forces-Coding
|
77e9abdea634747b1a72cb501081db4f5a7c7f7c
|
daf4e1db03ae0af0fb955e3c0866c0f97267bfdf
|
refs/heads/main
| 2023-05-31T10:34:05.698149
| 2021-06-23T16:46:04
| 2021-06-23T16:46:04
| 379,667,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
str1=input()
ls={'(':')','{':'}','[':']','<':'>'}
str2=[]
cnt=0
flag=0
for i in str1:
if i in ls:
str2.append(i)
else:
if len(str2)!=0:
if ls[str2[-1]]==i:
str2.pop()
else:
cnt+=1
str2.pop()
else:
flag=1
break
#print(cnt)
if len(str2)==0 and flag==0:
print(cnt)
else:
print('Impossible')
|
[
"noreply@github.com"
] |
PritiGiramkar.noreply@github.com
|
96705adc78d0df97195fbd2bec5f447c45cab3aa
|
a6b685845e5db8fd3ec476fabff2928378e291a1
|
/modulos_sopa/Configurar_juego.py
|
4c4b9a6785b1b9221971256b80194321b24357b9
|
[] |
no_license
|
KaitoKurogami/TrabajoPython
|
befc5d8e4797edd636d6d91676a1990d330a673d
|
2a87424cb8a0867d66334b4a646e6c7eb5103ae3
|
refs/heads/master
| 2020-06-06T15:34:04.452046
| 2019-07-29T00:00:59
| 2019-07-29T00:00:59
| 192,779,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,254
|
py
|
import PySimpleGUI as sg
import os
import json
ruta_app = os.getcwd()
from modulos_configurar import modulo_colores as mc
from modulos_configurar import modulo_palabras as mp
def Configurar(reporte_pantalla,dic_config):
'''
recibe un diccionario como estructura base para los reportes en pantalla
y un diccionario con configuracion previa
devuelve un diccionario con los datos de las palabras
los colores a usar para cada tipo de palabra
si se mostrará ayuda en pantalla
la orientacion de las palabras
si las letras serán mayusculas o minusculas
la tipografia de los titulos y cuerpos del reporte a mostrar en pantalla
y si el color del juego dependerá de las oficinas o será el neutro
'''
if not os.path.exists(os.path.join(ruta_app,'palabras anteriores')):
os.mkdir(os.path.join(ruta_app,'palabras anteriores'))
try:
archpalabras=open(os.path.join(ruta_app,'palabras anteriores','palabras.json'),'r+')
except FileNotFoundError:
archpalabras=open(os.path.join(ruta_app,'palabras anteriores','palabras.json'),'w+')
try:
dic_palabras=json.load(archpalabras)
except json.decoder.JSONDecodeError:
dic_palabras={}
archpalabras.close()
if dic_config=={} or dic_config==None:
config_estandar={"ayuda": "sin", "orientacion": "horizontal", "cant_sustantivos": "3",
"cant_adjetivos": "3", "cant_verbos": "3", "MayusOMinus": "mayusculas",
"tipografia titulo": "Times ", "tipografia cuerpo": "Times ", "elegir estilo": "Sin",
"oficina": "Elegir","colores": {"Sustantivos": "red", "Adjetivos": "green", "Verbos": "yellow"}}
else:
config_estandar=dic_config
if dic_palabras=={}:
dic_palabras=config_estandar['palabras']
if not os.path.exists(os.path.join(ruta_app,'oficinas')):
dic_oficinas={}
else:
try:
archofice=open(os.path.join(ruta_app,'oficinas','oficinas.json'),'r+')
dic_oficinas=json.load(archofice)
archofice.close()
except FileNotFoundError:
dic_oficinas={}
lista_oficinas=['Elegir']
lista_oficinas.extend(list(dic_oficinas.keys()))
fontsize='12'
tipografia=config_estandar["tipografia titulo"]
tipografia2=config_estandar["tipografia cuerpo"]
colores={'Sustantivos':config_estandar['colores']["Sustantivos"],'Adjetivos':config_estandar['colores']["Adjetivos"]
,'Verbos':config_estandar['colores']["Verbos"]}
columna=[
[sg.Text('Sustantivos',size=(11,1)),sg.InputText(default_text=config_estandar["cant_sustantivos"],key='cant_sustantivos',size=(4,1))],
[sg.Text('Adjetivos',size=(11,1)),sg.InputText(default_text=config_estandar["cant_adjetivos"],key='cant_adjetivos',size=(4,1))],
[sg.Text('Verbos',size=(11,1)),sg.InputText(default_text=config_estandar["cant_verbos"],key='cant_verbos',size=(4,1))],
]
layout_config=[
[sg.Button('Ingresar Palabras',key='B1',size=(50,2))],
[sg.Button('Elegir Colores',key='B2',size=(50,2))],
[sg.Text('Ayuda ',size=(35,1)),sg.InputCombo(['sin','palabra','definicion','ambas'],
default_value=config_estandar['ayuda'],key='ayuda',size=(10,1))],
[sg.Text('Orientacion ',size=(35,1)),sg.InputCombo(['vertical','horizontal'],
default_value=config_estandar['orientacion'],key='orientacion',size=(10,1))],
[sg.Text('Cantidad de palabras',size=(28,1)),sg.Column(columna)],
[sg.Text('Mayusculas/minusculas',size=(35,1)),sg.InputCombo(['mayusculas','minusculas'],
default_value=config_estandar['MayusOMinus'],key='MayusOMinus',size=(10,1))],
[sg.Text('Tipografia titulo',size=(11,2)),sg.InputCombo(['Courier ','Helvetica ','Times '],
change_submits=True,key='tipografia titulo',size=(10,1),default_value=config_estandar['tipografia titulo']),
sg.Text('Texto de Ejemplo',font=tipografia+fontsize,size=(20,1),key='ejemplo')],
[sg.Text('Tipografia texto',size=(11,2)),sg.InputCombo(['Courier ','Helvetica ','Times '],
change_submits=True,key='tipografia cuerpo',size=(10,1),default_value=config_estandar['tipografia cuerpo']),
sg.Text('Texto de Ejemplo',font=tipografia2+fontsize,size=(20,1),key='ejemplo2')],
[sg.Text('Estilo',size=(11,1)),sg.InputCombo(['normal','oficinas']
,disabled=len(lista_oficinas)==1,change_submits=True,key='elegir estilo',size=(10,1)),
sg.InputCombo(lista_oficinas,disabled=True,key='oficina',size=(10,1))],
[sg.Button('Guardar y salir',key='Guardar',size=(24,2)),sg.Button('Salir',key='Salir',size=(24,2))],
]
ventana_config=sg.Window('Configuración',margins=(10,30)).Layout(layout_config)
fin=False
while True:
configurado=False
event_config,values_config=ventana_config.Read()
if event_config==None:
return None
if event_config=='Salir':
if sg.PopupYesNo('¿Salir sin guardar?')=='Yes':
break
if tipografia!=values_config['tipografia titulo']:
tipografia=values_config['tipografia titulo']
ventana_config.FindElement('ejemplo').Update(font=tipografia+fontsize)
if tipografia2!=values_config['tipografia cuerpo']:
tipografia2=values_config['tipografia cuerpo']
ventana_config.FindElement('ejemplo2').Update(font=tipografia2+fontsize)
if event_config=='Guardar':
if(not values_config['cant_adjetivos'].isdigit() or not values_config['cant_sustantivos'].isdigit()
or not values_config['cant_verbos'].isdigit()):
sg.Popup('Revise que las cantidades ingresadas para cada tipo de palabra\nSean numeros adecuados\n(Tenga cuidado con espacios que no se vean)')
else:
configurado=True
break
if 'oficinas'==values_config['elegir estilo']:
ventana_config.FindElement('oficina').Update(disabled=False)
if 'normal'==values_config['elegir estilo']:
ventana_config.FindElement('oficina').Update(disabled=True)
if event_config=='B1':
ventana_config.Hide()
mp.Ingresar_palabras(dic_palabras,reporte_pantalla)
elif event_config=='B2':
ventana_config.Hide()
mc.elegir_colores(colores)
ventana_config.UnHide()
if configurado:
configuracion=values_config
configuracion['palabras']=dic_palabras
configuracion['colores']=colores
if not os.path.exists(os.path.join(ruta_app,'palabras anteriores')):
os.mkdir(os.path.join(ruta_app,'palabras anteriores'))
archpalabras=open(os.path.join(ruta_app,'palabras anteriores','palabras.json'),'w+')
json.dump(dic_palabras,archpalabras)
archpalabras.close()
ventana_config.Close()
return configuracion
ventana_config.Close()
return None
|
[
"andresmgr@gmail.com"
] |
andresmgr@gmail.com
|
c226692fab8359ef902f651b3a8ebccade940686
|
f8acc7aa41f0eea25ae4f157c3cd5e6990ce98d9
|
/tests/metrics/test_stats.py
|
71260c0b3405feefbe08169ab4bb3b2fb97290ce
|
[] |
no_license
|
lucarios1701/confluent_kafka_helpers
|
4878cb7b0657c19b1db245819dd317bd25416db1
|
f9a66e6d9f2bd905310db7d4be9187337a141fd5
|
refs/heads/master
| 2022-04-27T07:51:04.302004
| 2020-04-28T10:03:17
| 2020-04-28T10:03:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 897
|
py
|
import json
from confluent_kafka_helpers.metrics.stats import StatsParser
stats = {
'brokers': {
'b1': {
'name': 'b1'
}
},
'topics': {
't1': {
'topic': 't1',
'partitions': {
'p1': {
'partition': 'p1'
}
}
}
},
'cgrp': {
'foo': 'bar'
}
}
class StatsParserTests:
def test_get_top_level_attrs_should_return_str(self):
parsed = StatsParser(json.dumps(stats))
assert parsed.brokers[0].name == 'b1'
assert parsed.topics[0].topic == 't1'
assert parsed.topics[0].partitions[0].partition == 'p1'
assert parsed.cgrp['foo'] == 'bar'
def test_get_cgrp_attr_should_return_none(self):
stats.pop('cgrp')
parsed = StatsParser(json.dumps(stats))
assert parsed.cgrp is None
|
[
"simon.wahlgren@fyndiq.se"
] |
simon.wahlgren@fyndiq.se
|
00fd5b9ab4abff6d0f54ce71b3a70d669846407c
|
2297cd589424cb2326b7d919d2899382e7e1b7e2
|
/setup.py
|
7d48f94fcf00abe4487b119042015b0a630536e9
|
[] |
no_license
|
charlmert/restify
|
2492fd2781def6b6c4b6a71dc05e4918e23eda5b
|
850c272f93efdcba8750bd13de8b8684e8f18428
|
refs/heads/master
| 2021-01-10T19:10:46.467986
| 2013-01-29T16:38:46
| 2013-01-29T16:38:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,539
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from distutils.core import setup
import restify as _restify
import os
import sys
SETUP_ARGS = dict(
name="restify",
version=_restify.VERSION,
description="Python module to produce RESTful interfaces"
"from existing classes for a target framework e.g. django",
long_description = _restify.__doc__,
author="Charl Mert",
author_email="charl.mert -at- gmail.com",
url="http://git@github.com/charlmert/restify",
license="BSD",
platforms=['POSIX', 'Windows'],
keywords=['restify', 'rest', 'introspection', 'marshal',
'serialization', 'django'],
classifiers=[
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Development Status :: 1 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python"
],
options={'clean': {'all': 1}},
packages=["restify"],
)
def main():
if sys.argv[1] in ('install', 'build'):
os.system('cp bin/restify /usr/bin/restify')
os.system('chmod 775 /usr/bin/restify')
setup(**SETUP_ARGS)
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"charl.mert@gmail.com"
] |
charl.mert@gmail.com
|
e6ee973cf07efc51bd8ba452a6628918bba1b744
|
eb9549901447a2a990afd496feefe187ab943fe5
|
/diofant/tensor/array/tests/test_arrayop.py
|
e1566ca469838d33635250127cf31c04a0f4300b
|
[
"BSD-3-Clause"
] |
permissive
|
Blendify/diofant
|
9d81891a0ade4fb2f19ef0462d1932d09800e746
|
4f88083494cbe04a02ae92ec1a6b66d29f57b06d
|
refs/heads/master
| 2020-04-01T02:30:07.942059
| 2018-10-12T10:13:04
| 2018-10-12T10:13:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,205
|
py
|
import random
import pytest
from diofant import (Derivative, Permutation, adjoint, conjugate, cos, exp,
log, sin, symbols, transpose)
from diofant.abc import (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r,
s, t, u, v, w, x, y, z)
from diofant.combinatorics.permutations import _af_invert
from diofant.tensor.array import (Array, NDimArray, derive_by_array,
permutedims, tensorcontraction,
tensorproduct)
__all__ = ()
def test_tensorproduct():
assert tensorproduct() == 1
assert tensorproduct([x]) == Array([x])
assert tensorproduct([x], [y]) == Array([[x*y]])
assert tensorproduct([x], [y], [z]) == Array([[[x*y*z]]])
assert tensorproduct([x], [y], [z], [t]) == Array([[[[x*y*z*t]]]])
assert tensorproduct(x) == x
assert tensorproduct(x, y) == x*y
assert tensorproduct(x, y, z) == x*y*z
assert tensorproduct(x, y, z, t) == x*y*z*t
A = Array([x, y])
B = Array([1, 2, 3])
C = Array([a, b, c, d])
assert tensorproduct(A, B, C) == Array([[[a*x, b*x, c*x, d*x],
[2*a*x, 2*b*x, 2*c*x, 2*d*x],
[3*a*x, 3*b*x, 3*c*x, 3*d*x]],
[[a*y, b*y, c*y, d*y],
[2*a*y, 2*b*y, 2*c*y, 2*d*y],
[3*a*y, 3*b*y, 3*c*y, 3*d*y]]])
assert tensorproduct([x, y], [1, 2, 3]) == tensorproduct(A, B)
assert tensorproduct(A, 2) == Array([2*x, 2*y])
assert tensorproduct(A, [2]) == Array([[2*x], [2*y]])
assert tensorproduct([2], A) == Array([[2*x, 2*y]])
assert tensorproduct(a, A) == Array([a*x, a*y])
assert tensorproduct(a, A, B) == Array([[a*x, 2*a*x, 3*a*x],
[a*y, 2*a*y, 3*a*y]])
assert tensorproduct(A, B, a) == Array([[a*x, 2*a*x, 3*a*x],
[a*y, 2*a*y, 3*a*y]])
assert tensorproduct(B, a, A) == Array([[a*x, a*y], [2*a*x, 2*a*y],
[3*a*x, 3*a*y]])
def test_tensorcontraction():
B = Array(range(18), (2, 3, 3))
pytest.raises(ValueError, lambda: tensorcontraction(B, 1))
pytest.raises(ValueError, lambda: tensorcontraction(B, (2, 2)))
assert tensorcontraction(B, (1, 2)) == Array([12, 39])
C1 = Array([a, b, c, d, e, f, g, h, i, j, k, l, m,
n, o, p, q, r, s, t, u, v, w, x], (2, 3, 2, 2))
assert tensorcontraction(C1, (0, 2)) == Array([[a + o, b + p],
[e + s, f + t],
[i + w, j + x]])
pytest.raises(ValueError, lambda: tensorcontraction(C1, (0, 1)))
assert tensorcontraction(C1, (0, 2, 3)) == Array([a + p, e + t, i + x])
assert tensorcontraction(C1, (2, 3)) == Array([[a + d, e + h, i + l],
[m + p, q + t, u + x]])
def test_derivative_by_array():
bexpr = x*y**2*exp(z)*log(t)
sexpr = sin(bexpr)
cexpr = cos(bexpr)
a = Array([sexpr])
assert derive_by_array(sexpr, t) == x*y**2*exp(z)*cos(x*y**2*exp(z)*log(t))/t
assert derive_by_array(sexpr, [x, y, z]) == Array([bexpr/x*cexpr, 2*y*bexpr/y**2*cexpr, bexpr*cexpr])
assert derive_by_array(a, [x, y, z]) == Array([[bexpr/x*cexpr], [2*y*bexpr/y**2*cexpr], [bexpr*cexpr]])
assert derive_by_array(sexpr, [[x, y], [z, t]]) == Array([[bexpr/x*cexpr, 2*y*bexpr/y**2*cexpr], [bexpr*cexpr, bexpr/log(t)/t*cexpr]])
assert derive_by_array(a, [[x, y], [z, t]]) == Array([[[bexpr/x*cexpr], [2*y*bexpr/y**2*cexpr]], [[bexpr*cexpr], [bexpr/log(t)/t*cexpr]]])
assert derive_by_array([[x, y], [z, t]], [x, y]) == Array([[[1, 0], [0, 0]], [[0, 1], [0, 0]]])
assert derive_by_array([[x, y], [z, t]], [[x, y], [z, t]]) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]],
[[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])
pytest.raises(ValueError, lambda: derive_by_array(x, [Derivative(x**2, x)]))
def test_issue_emerged_while_discussing_sympyissue_10972():
ua = Array([-1, 0])
Fa = Array([[0, 1], [-1, 0]])
po = tensorproduct(Fa, ua, Fa, ua)
assert tensorcontraction(po, (1, 2), (4, 5)) == Array([[0, 0], [0, 1]])
sa = symbols('a0:144')
po = Array(sa, [2, 2, 3, 3, 2, 2])
assert tensorcontraction(po, (0, 1), (2, 3), (4, 5)) == sa[0] + sa[108] + sa[111] + sa[124] + sa[127] + sa[140] + sa[143] + sa[16] + sa[19] + sa[3] + sa[32] + sa[35]
assert tensorcontraction(po, (0, 1, 4, 5), (2, 3)) == sa[0] + sa[111] + sa[127] + sa[143] + sa[16] + sa[32]
assert tensorcontraction(po, (0, 1), (4, 5)) == Array([[sa[0] + sa[108] + sa[111] + sa[3], sa[112] + sa[115] + sa[4] + sa[7],
sa[11] + sa[116] + sa[119] + sa[8]],
[sa[12] + sa[120] + sa[123] + sa[15],
sa[124] + sa[127] + sa[16] + sa[19], sa[128] + sa[131] + sa[20] + sa[23]],
[sa[132] + sa[135] + sa[24] + sa[27], sa[136] + sa[139] + sa[28] + sa[31],
sa[140] + sa[143] + sa[32] + sa[35]]])
assert tensorcontraction(po, (0, 1), (2, 3)) == Array([[sa[0] + sa[108] + sa[124] + sa[140] + sa[16] + sa[32], sa[1] + sa[109] + sa[125] + sa[141] + sa[17] + sa[33]],
[sa[110] + sa[126] + sa[142] + sa[18] + sa[2] + sa[34], sa[111] + sa[127] + sa[143] + sa[19] + sa[3] + sa[35]]])
def test_array_permutedims():
pytest.raises(TypeError, lambda: permutedims(1, (1, 1)))
sa = symbols('a0:144')
m1 = Array(sa[:6], (2, 3))
assert permutedims(m1, (1, 0)) == transpose(m1)
assert m1.tomatrix().T == permutedims(m1, (1, 0)).tomatrix()
assert m1.tomatrix().T == transpose(m1).tomatrix()
assert m1.tomatrix().C == conjugate(m1).tomatrix()
assert m1.tomatrix().H == adjoint(m1).tomatrix()
assert m1.tomatrix().T == m1.transpose().tomatrix()
assert m1.tomatrix().C == m1.conjugate().tomatrix()
assert m1.tomatrix().H == m1.adjoint().tomatrix()
pytest.raises(ValueError, lambda: permutedims(m1, (0,)))
pytest.raises(ValueError, lambda: permutedims(m1, (0, 0)))
pytest.raises(ValueError, lambda: permutedims(m1, (1, 2, 0)))
# Some tests with random arrays:
dims = 6
shape = [random.randint(1, 5) for i in range(dims)]
elems = [random.random() for i in range(tensorproduct(*shape))]
ra = Array(elems, shape)
perm = list(range(dims))
# Randomize the permutation:
random.shuffle(perm)
# Test inverse permutation:
assert permutedims(permutedims(ra, perm), _af_invert(perm)) == ra
# Test that permuted shape corresponds to action by `Permutation`:
assert permutedims(ra, perm).shape == tuple(Permutation(perm)(shape))
z = NDimArray.zeros(4, 5, 6, 7)
assert permutedims(z, (2, 3, 1, 0)).shape == (6, 7, 5, 4)
assert permutedims(z, [2, 3, 1, 0]).shape == (6, 7, 5, 4)
assert permutedims(z, Permutation([2, 3, 1, 0])).shape == (6, 7, 5, 4)
po = Array(sa, [2, 2, 3, 3, 2, 2])
pytest.raises(ValueError, lambda: permutedims(po, (1, 1)))
pytest.raises(ValueError, lambda: po.transpose())
pytest.raises(ValueError, lambda: po.adjoint())
assert permutedims(po, reversed(range(po.rank()))) == Array(
[[[[[[sa[0], sa[72]], [sa[36], sa[108]]], [[sa[12], sa[84]], [sa[48], sa[120]]], [[sa[24],
sa[96]], [sa[60], sa[132]]]],
[[[sa[4], sa[76]], [sa[40], sa[112]]], [[sa[16],
sa[88]], [sa[52], sa[124]]],
[[sa[28], sa[100]], [sa[64], sa[136]]]],
[[[sa[8],
sa[80]], [sa[44], sa[116]]], [[sa[20], sa[92]], [sa[56], sa[128]]], [[sa[32],
sa[104]], [sa[68], sa[140]]]]],
[[[[sa[2], sa[74]], [sa[38], sa[110]]], [[sa[14],
sa[86]], [sa[50], sa[122]]], [[sa[26], sa[98]], [sa[62], sa[134]]]],
[[[sa[6],
sa[78]], [sa[42], sa[114]]], [[sa[18], sa[90]], [sa[54], sa[126]]], [[sa[30],
sa[102]], [sa[66], sa[138]]]],
[[[sa[10], sa[82]], [sa[46], sa[118]]], [[sa[22],
sa[94]], [sa[58], sa[130]]],
[[sa[34], sa[106]], [sa[70], sa[142]]]]]],
[[[[[sa[1],
sa[73]], [sa[37], sa[109]]], [[sa[13], sa[85]], [sa[49], sa[121]]], [[sa[25],
sa[97]], [sa[61], sa[133]]]],
[[[sa[5], sa[77]], [sa[41], sa[113]]], [[sa[17],
sa[89]], [sa[53], sa[125]]],
[[sa[29], sa[101]], [sa[65], sa[137]]]],
[[[sa[9],
sa[81]], [sa[45], sa[117]]], [[sa[21], sa[93]], [sa[57], sa[129]]], [[sa[33],
sa[105]], [sa[69], sa[141]]]]],
[[[[sa[3], sa[75]], [sa[39], sa[111]]], [[sa[15],
sa[87]], [sa[51], sa[123]]], [[sa[27], sa[99]], [sa[63], sa[135]]]],
[[[sa[7],
sa[79]], [sa[43], sa[115]]], [[sa[19], sa[91]], [sa[55], sa[127]]], [[sa[31],
sa[103]], [sa[67], sa[139]]]],
[[[sa[11], sa[83]], [sa[47], sa[119]]], [[sa[23],
sa[95]], [sa[59], sa[131]]],
[[sa[35], sa[107]], [sa[71], sa[143]]]]]]])
assert permutedims(po, (1, 0, 2, 3, 4, 5)) == Array(
[[[[[[sa[0], sa[1]], [sa[2], sa[3]]], [[sa[4], sa[5]], [sa[6], sa[7]]], [[sa[8], sa[9]], [sa[10],
sa[11]]]],
[[[sa[12], sa[13]], [sa[14], sa[15]]], [[sa[16], sa[17]], [sa[18],
sa[19]]], [[sa[20], sa[21]], [sa[22], sa[23]]]],
[[[sa[24], sa[25]], [sa[26],
sa[27]]], [[sa[28], sa[29]], [sa[30], sa[31]]], [[sa[32], sa[33]], [sa[34],
sa[35]]]]],
[[[[sa[72], sa[73]], [sa[74], sa[75]]], [[sa[76], sa[77]], [sa[78],
sa[79]]], [[sa[80], sa[81]], [sa[82], sa[83]]]],
[[[sa[84], sa[85]], [sa[86],
sa[87]]], [[sa[88], sa[89]], [sa[90], sa[91]]], [[sa[92], sa[93]], [sa[94],
sa[95]]]],
[[[sa[96], sa[97]], [sa[98], sa[99]]], [[sa[100], sa[101]], [sa[102],
sa[103]]],
[[sa[104], sa[105]], [sa[106], sa[107]]]]]],
[[[[[sa[36], sa[37]], [sa[38], sa[39]]], [[sa[40], sa[41]], [sa[42], sa[43]]],
[[sa[44], sa[45]], [sa[46], sa[47]]]],
[[[sa[48], sa[49]], [sa[50], sa[51]]], [[sa[52], sa[53]], [sa[54], sa[55]]],
[[sa[56], sa[57]], [sa[58], sa[59]]]],
[[[sa[60], sa[61]], [sa[62], sa[63]]], [[sa[64], sa[65]], [sa[66], sa[67]]],
[[sa[68], sa[69]], [sa[70], sa[71]]]]],
[[[[sa[108], sa[109]], [sa[110], sa[111]]],
[[sa[112], sa[113]], [sa[114], sa[115]]], [[sa[116], sa[117]], [sa[118], sa[119]]]],
[[[sa[120], sa[121]], [sa[122], sa[123]]], [[sa[124], sa[125]], [sa[126], sa[127]]],
[[sa[128], sa[129]], [sa[130], sa[131]]]],
[[[sa[132], sa[133]], [sa[134], sa[135]]], [[sa[136], sa[137]], [sa[138], sa[139]]],
[[sa[140], sa[141]], [sa[142], sa[143]]]]]]])
assert permutedims(po, (0, 2, 1, 4, 3, 5)) == Array(
[[[[[[sa[0], sa[1]], [sa[4], sa[5]], [sa[8], sa[9]]], [[sa[2], sa[3]], [sa[6], sa[7]], [sa[10],
sa[11]]]],
[[[sa[36], sa[37]], [sa[40], sa[41]], [sa[44], sa[45]]], [[sa[38],
sa[39]], [sa[42], sa[43]], [sa[46], sa[47]]]]],
[[[[sa[12], sa[13]], [sa[16],
sa[17]], [sa[20], sa[21]]], [[sa[14], sa[15]], [sa[18], sa[19]], [sa[22],
sa[23]]]],
[[[sa[48], sa[49]], [sa[52], sa[53]], [sa[56], sa[57]]], [[sa[50],
sa[51]], [sa[54], sa[55]], [sa[58], sa[59]]]]],
[[[[sa[24], sa[25]], [sa[28],
sa[29]], [sa[32], sa[33]]], [[sa[26], sa[27]], [sa[30], sa[31]], [sa[34],
sa[35]]]],
[[[sa[60], sa[61]], [sa[64], sa[65]], [sa[68], sa[69]]], [[sa[62],
sa[63]], [sa[66], sa[67]], [sa[70], sa[71]]]]]],
[[[[[sa[72], sa[73]], [sa[76],
sa[77]], [sa[80], sa[81]]], [[sa[74], sa[75]], [sa[78], sa[79]], [sa[82],
sa[83]]]],
[[[sa[108], sa[109]], [sa[112], sa[113]], [sa[116], sa[117]]], [[sa[110],
sa[111]], [sa[114], sa[115]],
[sa[118], sa[119]]]]],
[[[[sa[84], sa[85]], [sa[88],
sa[89]], [sa[92], sa[93]]], [[sa[86], sa[87]], [sa[90], sa[91]], [sa[94],
sa[95]]]],
[[[sa[120], sa[121]], [sa[124], sa[125]], [sa[128], sa[129]]], [[sa[122],
sa[123]], [sa[126], sa[127]],
[sa[130], sa[131]]]]],
[[[[sa[96], sa[97]], [sa[100],
sa[101]], [sa[104], sa[105]]], [[sa[98], sa[99]], [sa[102], sa[103]], [sa[106],
sa[107]]]],
[[[sa[132], sa[133]], [sa[136], sa[137]], [sa[140], sa[141]]], [[sa[134],
sa[135]], [sa[138], sa[139]],
[sa[142], sa[143]]]]]]])
po2 = po.reshape(4, 9, 2, 2)
assert po2 == Array([[[[sa[0], sa[1]], [sa[2], sa[3]]], [[sa[4], sa[5]], [sa[6], sa[7]]], [[sa[8], sa[9]], [sa[10], sa[11]]], [[sa[12], sa[13]], [sa[14], sa[15]]], [[sa[16], sa[17]], [sa[18], sa[19]]], [[sa[20], sa[21]], [sa[22], sa[23]]], [[sa[24], sa[25]], [sa[26], sa[27]]], [[sa[28], sa[29]], [sa[30], sa[31]]], [[sa[32], sa[33]], [sa[34], sa[35]]]], [[[sa[36], sa[37]], [sa[38], sa[39]]], [[sa[40], sa[41]], [sa[42], sa[43]]], [[sa[44], sa[45]], [sa[46], sa[47]]], [[sa[48], sa[49]], [sa[50], sa[51]]], [[sa[52], sa[53]], [sa[54], sa[55]]], [[sa[56], sa[57]], [sa[58], sa[59]]], [[sa[60], sa[61]], [sa[62], sa[63]]], [[sa[64], sa[65]], [sa[66], sa[67]]], [[sa[68], sa[69]], [sa[70], sa[71]]]], [[[sa[72], sa[73]], [sa[74], sa[75]]], [[sa[76], sa[77]], [sa[78], sa[79]]], [[sa[80], sa[81]], [sa[82], sa[83]]], [[sa[84], sa[85]], [sa[86], sa[87]]], [[sa[88], sa[89]], [sa[90], sa[91]]], [[sa[92], sa[93]], [sa[94], sa[95]]], [[sa[96], sa[97]], [sa[98], sa[99]]], [[sa[100], sa[101]], [sa[102], sa[103]]], [[sa[104], sa[105]], [sa[106], sa[107]]]], [[[sa[108], sa[109]], [sa[110], sa[111]]], [[sa[112], sa[113]], [sa[114], sa[115]]], [[sa[116], sa[117]], [sa[118], sa[119]]], [[sa[120], sa[121]], [sa[122], sa[123]]], [[sa[124], sa[125]], [sa[126], sa[127]]], [[sa[128], sa[129]], [sa[130], sa[131]]], [[sa[132], sa[133]], [sa[134], sa[135]]], [[sa[136], sa[137]], [sa[138], sa[139]]], [[sa[140], sa[141]], [sa[142], sa[143]]]]])
assert permutedims(po2, (3, 2, 0, 1)) == Array([[[[sa[0], sa[4], sa[8], sa[12], sa[16], sa[20], sa[24], sa[28], sa[32]], [sa[36], sa[40], sa[44], sa[48], sa[52], sa[56], sa[60], sa[64], sa[68]], [sa[72], sa[76], sa[80], sa[84], sa[88], sa[92], sa[96], sa[100], sa[104]], [sa[108], sa[112], sa[116], sa[120], sa[124], sa[128], sa[132], sa[136], sa[140]]], [[sa[2], sa[6], sa[10], sa[14], sa[18], sa[22], sa[26], sa[30], sa[34]], [sa[38], sa[42], sa[46], sa[50], sa[54], sa[58], sa[62], sa[66], sa[70]], [sa[74], sa[78], sa[82], sa[86], sa[90], sa[94], sa[98], sa[102], sa[106]], [sa[110], sa[114], sa[118], sa[122], sa[126], sa[130], sa[134], sa[138], sa[142]]]], [[[sa[1], sa[5], sa[9], sa[13], sa[17], sa[21], sa[25], sa[29], sa[33]], [sa[37], sa[41], sa[45], sa[49], sa[53], sa[57], sa[61], sa[65], sa[69]], [sa[73], sa[77], sa[81], sa[85], sa[89], sa[93], sa[97], sa[101], sa[105]], [sa[109], sa[113], sa[117], sa[121], sa[125], sa[129], sa[133], sa[137], sa[141]]], [[sa[3], sa[7], sa[11], sa[15], sa[19], sa[23], sa[27], sa[31], sa[35]], [sa[39], sa[43], sa[47], sa[51], sa[55], sa[59], sa[63], sa[67], sa[71]], [sa[75], sa[79], sa[83], sa[87], sa[91], sa[95], sa[99], sa[103], sa[107]], [sa[111], sa[115], sa[119], sa[123], sa[127], sa[131], sa[135], sa[139], sa[143]]]]])
|
[
"skirpichev@gmail.com"
] |
skirpichev@gmail.com
|
34cb64dc2d013efe0f40e5007b684dc7c638e017
|
fe059d3433d5d5b7d12521d51903c3472c9cac76
|
/source/show and tell- raghava/scrap.py
|
0ee8037fc9941b0ebf518efa199e722ce30d3851
|
[] |
no_license
|
gt784/BDA_Project
|
a32487ba131cbb5e826ae8e9e5d1f7e926d2ee30
|
5ec76340473218847258c4c9e8257c16f19383e9
|
refs/heads/master
| 2021-01-22T21:32:54.460355
| 2017-05-10T21:38:48
| 2017-05-10T21:38:48
| 85,436,282
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
import nltk
from nltk.corpus import brown
print(brown.words())
|
[
"noreply@github.com"
] |
gt784.noreply@github.com
|
df2be2d4f3a8c1d67dcb29bf830dcc3f5089e1ce
|
bfe48084578811546c1a7ca57f5efe25876eb329
|
/nlb_private_ips_finder/nlb_private_ips_finder.py
|
019dfedbcb6edb9cb1cf36c868bf0cb3b3e701bc
|
[] |
no_license
|
juanma-cvega/aws-nlb-alb-bridge
|
7709a572c57449582db084cd8f871737895ed676
|
d04cbc4ba0e277b7cc0ccf61384bb4bcb2a7fb7b
|
refs/heads/master
| 2020-06-01T10:41:33.997858
| 2019-06-17T09:28:36
| 2019-06-17T09:28:36
| 190,752,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
import boto3
import cfnresponse
DEFAULT_LISTENER_PORT = 80
def lambda_handler(event, context):
success = True
data = {}
try:
if event['RequestType'] == 'Create' or event['RequestType'] == 'Update':
print(event['RequestType'] + " resource using NLB private IPs")
nlb_description = event['ResourceProperties']['nlbDescription']
listener_port = event['ResourceProperties']['listenerPort'] or DEFAULT_LISTENER_PORT
print("Resource properties: listenerPort={}, nlbDescription={}".format(listener_port, nlb_description))
client = boto3.client('ec2')
nlb_nis = client.describe_network_interfaces(Filters=[
{
'Name': 'description',
'Values': ['*' + nlb_description + '*']
}
], MaxResults=100)
data = {
'privateIps': [{
'IpProtocol': 'tcp',
'CidrIp': ni['PrivateIpAddress'] + '/32',
'FromPort': listener_port,
'ToPort': listener_port
} for ni in nlb_nis['NetworkInterfaces']]
}
else:
print('Deleting resource, nothing to do')
cfnresponse.send(event, context, cfnresponse.SUCCESS, data)
except Exception as exception:
print("Exception finding the private IPs of the NLB", str(exception))
data = {}
success = False
finally:
status_response = cfnresponse.SUCCESS if success else cfnresponse.FAILED
print("Cloudformation status response: " + status_response)
cfnresponse.send(event, context, status_response, data)
|
[
"jmcarnicero@ippon.fr"
] |
jmcarnicero@ippon.fr
|
4ae945987b421ac692ec0423dac58906391d3383
|
35d69a44d0cecdd54c4ae92c721d0363b09481b5
|
/Preprocessing.py
|
77025db5655c1b5658b3daea7d3508dbfec4df36
|
[] |
no_license
|
CleverCracker/Quantum_Image_Based_Search_Engine
|
c9ac5da4a4ae2b38afc1b01e74323fd271f5b384
|
42baeb62714d685a011213fe209cab208154e301
|
refs/heads/main
| 2023-09-05T13:03:36.715114
| 2021-11-15T07:10:55
| 2021-11-15T07:10:55
| 428,158,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
import numpy as np
from PIL import Image
def image_normalization(imagePath, w=32, h=32):
image = Image.open(imagePath).convert('LA').resize((w, h), Image.ANTIALIAS)
image = np.array([[image.getpixel((x, y))[0]
for x in range(w)] for y in range(h)])
# 2-dimentional data convert to 1-dimentional array
image = image.flatten()
# change type
image = image.astype('float64')
# Normalization(0~pi/2)
image /= 255.0
generated_image = np.arcsin(image)
return generated_image
|
[
"clavercracker.gul@gmail.com"
] |
clavercracker.gul@gmail.com
|
07e306d29db8b59bc2a35ebb4973c20087b9075d
|
daad6e0af15f4fd0171808fb068e5d8e3faab42a
|
/examples/examples/settings.py
|
90538f8d7647a6ec0b52643764b431d6e4b3d1e2
|
[
"MIT"
] |
permissive
|
Captricity/drf_openapi
|
0f40dd45890f5db952bd9300e90e3ca0720a9a7b
|
55788a816638a08315d62ac81f045330404e7baf
|
refs/heads/master
| 2021-01-21T04:25:26.529516
| 2017-09-26T03:54:19
| 2017-09-26T04:21:43
| 101,911,666
| 0
| 0
|
MIT
| 2020-11-19T20:26:08
| 2017-08-30T17:47:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,255
|
py
|
"""
Django settings for examples project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!z1yj(9uz)zk0gg@5--j)bc4h^i!8))r^dezco8glf190e0&#p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'drf_openapi',
'snippets.apps.SnippetsConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'examples.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'examples.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning'
}
|
[
"limdauto@gmail.com"
] |
limdauto@gmail.com
|
bc2bc3ac824de42c2f0dedfef8bd25dec0d3f0b1
|
ed3a6c06014a8ef47a19eec2679cbae541685308
|
/bot1.py
|
3c59c7e5c7a3b7a3a6cac31a1c966c832cda6472
|
[] |
no_license
|
brianpaul-99/CPS847_Group10_Assignment1
|
594f253b1ab658554f90da16be82cfad9411985a
|
4a5528b5a9e0949253ed53be006b9c376595bfb0
|
refs/heads/main
| 2023-02-27T15:09:55.328603
| 2021-01-31T21:24:39
| 2021-01-31T21:24:39
| 333,554,616
| 0
| 0
| null | 2021-01-31T21:24:40
| 2021-01-27T20:42:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
import slack
import os
from pathlib import Path
from dotenv import load_dotenv
# Import Flask
from flask import Flask
# Handles events from Slack
from slackeventsapi import SlackEventAdapter
# Load the Token from .env file
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
# Configure your flask application
app = Flask(__name__)
# Configure SlackEventAdapter to handle events
slack_event_adapter = SlackEventAdapter(os.environ['SIGNING_SECRET'],'/slack/events',app)
# Using WebClient in slack, there are other clients built-in as well !!
client = slack.WebClient(token=os.environ['SLACK_TOKEN'])
# connect the bot to the channel in Slack Channel
client.chat_postMessage(channel='#assignment-1', text='Send Message Demo')
# Get Bot ID
BOT_ID = client.api_call("auth.test")['user_id']
# handling Message Events
@slack_event_adapter.on('message')
def message(payload):
print(payload)
event = payload.get('event',{})
channel_id = event.get('channel')
user_id = event.get('user')
text2 = event.get('text')
if BOT_ID !=user_id:
client.chat_postMessage(channel='#assignment-1', text=text2)
# Run the webserver micro-service
if __name__ == "__main__":
app.run(debug=True)
|
[
"noreply@github.com"
] |
brianpaul-99.noreply@github.com
|
92ff7182a4b2fd9d5bde25f42bbb76dbbf40fc54
|
9a61fc626d17994d77da478cfbde0d2c34ae2be7
|
/03_Django/03_django_crud/articles/urls.py
|
9773dfc02c72df49bd61813f940a144c6a89fa57
|
[] |
no_license
|
EUNS00/TIL
|
96679db3344d6bd6de60c118dc9de51d4b6b28ea
|
71c523406b856f68e10a32be06f837c20e3b3a06
|
refs/heads/master
| 2023-01-12T21:10:30.192150
| 2020-07-29T00:29:00
| 2020-07-29T00:29:00
| 195,937,989
| 1
| 1
| null | 2023-01-07T11:37:10
| 2019-07-09T05:22:44
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
from django.urls import path
from . import views
#6.
urlpatterns = [
path('', views.index),
path('new/', views.new),
path('create/', views.create),
]
|
[
"leeus5629@naver.com"
] |
leeus5629@naver.com
|
76419b2d98032140f8459c4ccf1b355a8b3e1356
|
71cb5d016dcb34f975dbea4c28ad08d2ec80e297
|
/your order, please.py
|
89d5cb6321ed06cc6fee2646db1ae2632ec0794e
|
[] |
no_license
|
viliam-gago/codewars
|
87e3eb5bee2eddcef041a2d17e6089eb006c395d
|
58a3d6ad406ff3693e2fd188dddbdf754a35aba3
|
refs/heads/master
| 2020-12-19T18:24:14.150932
| 2020-06-19T12:56:45
| 2020-06-19T12:56:45
| 235,813,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
def order(sentence):
if sentence:
sentence = sentence.split(' ')
ordered = list(range(len(sentence)))
for word in sentence:
for letter in word:
if letter.isnumeric():
ordered[int(letter) - 1] = word
return ' '.join(ordered)
return ''
print(order(""))
|
[
"viliam.gago@seznam.cz"
] |
viliam.gago@seznam.cz
|
12885ff9e784ef9753b4fc2fb746d103ddf42031
|
fd55bb02402691c90bc39ad4b1f8785e7f3a95ba
|
/4. Ingestion/PyCompute/main.py
|
f541469292448b092bc66dc701a0455c42466c08
|
[] |
no_license
|
Manju2012/bootcamp
|
5c3e4ee06ba279a1efb57501b8647f1cee0c37ea
|
4faaeb0493fa5300a28e397478e196432995c761
|
refs/heads/main
| 2023-09-06T09:18:06.708130
| 2021-11-19T06:08:58
| 2021-11-19T06:08:58
| 429,685,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,485
|
py
|
import psycopg2
import datetime
import os
from google.cloud import bigquery
import pandas as pd
import pandas_gbq
import pytz
import logging
import google.cloud.logging
def use_logging_handler():
clientlogging = google.cloud.logging.Client()
clientlogging.setup_logging()
text = "DAtaframe Generated"
logging.info(text)
print("Logged: {}".format(text))
bq_path = "/home/fagcpdebc1_09/ServiceAccountKeys/BigQuery/btcmp-1-569673e95ac5.json"
# bq token path
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]= bq_path
def query(q):
conn = psycopg2.connect(host="35.225.145.213",port="5432", database="myorg", user="postgres", password="Manju@2012")
return pd.read_sql(q, conn)
project = 'btcmp-1'
client = bigquery.Client(project=project)
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
dataset= 'pgdataset'
table=['project','employee','project_staff','department']
# array for list of tables to be created and input data
schema = [
[
{'name':'proj_id', 'type': 'INT64'},
{'name':'proj_name', 'type': 'STRING'},
{'name':'dept_id', 'type': 'INT64'},
{'name':'proj_start_date', 'type': 'DATE'},
{'name':'proj_end_date', 'type': 'DATE'}
],
[
{'name':'emp_id', 'type':'INT64'},
{'name':'proj_name', 'type': 'STRING'},
{'name':'name', 'type':'STRING'},
{'name':'dept_id', 'type':'INT64'},
{'name':'salary', 'type':'INT64'},
{'name':'joining_date', 'type':'DATE'},
{'name':'leaving_date', 'type':'DATE'},
{'name':'is_active', 'type':'BOOLEAN'}
],
[
{'name':'proj_id', 'type': 'INT64'},
{'name':'emp_id', 'type': 'INT64'},
{'name':'role_name', 'type': 'STRING'},
{'name':'start_date', 'type': 'DATE'},
{'name':'end_date', 'type': 'DATE'}
],
[
{'name':'dept_id', 'type': 'INT64'},
{'name':'dept_name', 'type': 'STRING'},
{'name':'dept_head_id', 'type': 'INT64'}
]
]
# for n,i in zip(table,[0,1,2,3]):
# print(n)
st='SELECT * FROM ' + 'project'
df = query (st)
#df2=pd.DataFrame(df)
#print(df2)
#print(df2.dtypes)
table_id="btcmp-1.{}.{}".format(dataset,'project_staff')
table = bigquery.Table(table_id)
job1 = pandas_gbq.to_gbq(df, table_id, table_schema=schema[0], if_exists="replace")
print("Loaded {}".format( table_id ))
st='SELECT * FROM ' + 'employee'
df = query (st)
#df2=pd.DataFrame(df)
#print(df2)
#print(df2.dtypes)
table_id="btcmp-1.{}.{}".format(dataset,'employee')
table = bigquery.Table(table_id)
job2 = pandas_gbq.to_gbq(df, table_id, table_schema=schema[1], if_exists="replace")
print("Loaded {}".format( table_id ))
st='SELECT * FROM ' + 'project_staff'
df = query (st)
#df2=pd.DataFrame(df)
#print(df2)
#print(df2.dtypes)
table_id="btcmp-1.{}.{}".format(dataset,'project_staff')
table = bigquery.Table(table_id)
job3 = pandas_gbq.to_gbq(df, table_id, table_schema=schema[2], if_exists="replace")
print("Loaded {}".format( table_id ))
st='SELECT * FROM ' + 'department'
df = query (st)
#df2=pd.DataFrame(df)
#print(df2)
#print(df2.dtypes)
table_id="btcmp-1.{}.{}".format(dataset,'department')
table = bigquery.Table(table_id)
job4 = pandas_gbq.to_gbq(df, table_id, table_schema=schema[3], if_exists="replace")
print("Loaded {}".format( table_id ))
if __name__ == "__main__":
use_logging_handler()
|
[
"naganurprasad@yahooo.in"
] |
naganurprasad@yahooo.in
|
b298ca299bd394996aa7361a70c57d92b2062222
|
eb42bf62df4d668eb0c509f026888f1010998afd
|
/src_clean/util_clean.py
|
aab119580f0aebc540d6502f81b02f4cab898bf4
|
[] |
no_license
|
Tar-Telperien/Spring2020_LowResourceNLP
|
04c6d652e1d26297273c771598701230041690de
|
b6f364d7e1485f45e1d5ab20ac1bf8becf352959
|
refs/heads/master
| 2023-02-06T18:54:21.508746
| 2020-12-22T17:46:31
| 2020-12-22T17:46:31
| 274,702,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,317
|
py
|
import logging
import math
import os
import random
import string
import sys
import time
from collections import defaultdict
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
from functools import partial
from typing import List
import numpy as np
from torch.optim.lr_scheduler import LambdaLR
from tqdm import tqdm
from dataloader_clean import BOS_IDX, EOS_IDX, STEP_IDX
tqdm = partial(tqdm, bar_format='{l_bar}{r_bar}')
class NamedEnum(Enum):
def __str__(self):
return self.value
def log_grad_norm(self, grad_input, grad_output, logger=None):
try:
logger.debug('')
logger.debug('Inside %r backward', self.__class__.__name__)
logger.debug('grad_input size: %r', grad_input[0].size())
logger.debug('grad_output size: %r', grad_output[0].size())
logger.debug('grad_input norm: %r', grad_input[0].detach().norm())
except:
pass
def grad_norm(parameters, norm_type=2):
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if norm_type == float('inf'):
total_norm = max(p.grad.detach().abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.detach().norm(norm_type)
total_norm += param_norm**norm_type
total_norm = total_norm**(1. / norm_type)
return total_norm
class WarmupInverseSquareRootSchedule(LambdaLR):
""" Linear warmup and then inverse square root decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Inverse square root decreases learning rate from 1. to 0. over remaining steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
self.decay_factor = warmup_steps**0.5
super(WarmupInverseSquareRootSchedule,
self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return self.decay_factor * step**-0.5
def maybe_mkdir(filename):
'''
maybe mkdir
'''
path = os.path.dirname(filename)
if not os.path.isdir(path):
try:
os.makedirs(path)
except FileExistsError:
pass
class LogFormatter():
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = round(record.created - self.start_time)
prefix = "%s - %s - %s" % (record.levelname, time.strftime('%x %X'),
timedelta(seconds=elapsed_seconds))
message = record.getMessage()
message = message.replace('\n', '\n' + ' ' * (len(prefix) + 3))
return "%s - %s" % (prefix, message) if message else ''
def get_logger(log_file, log_level='info'):
'''
create logger and output to file and stdout
'''
assert log_level in ['info', 'debug']
log_formatter = LogFormatter()
logger = logging.getLogger()
log_level = {'info': logging.INFO, 'debug': logging.DEBUG}[log_level]
logger.setLevel(log_level)
stream = logging.StreamHandler(sys.stdout)
stream.setFormatter(log_formatter)
logger.addHandler(stream)
filep = logging.FileHandler(log_file, mode='a')
filep.setFormatter(log_formatter)
logger.addHandler(filep)
return logger
def get_temp_log_filename(prefix='exp', dir='scratch/explog'):
id = id_generator()
fp = f'{dir}/{prefix}-{id}'
maybe_mkdir(fp)
return fp
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
@dataclass
class Eval:
desc: str
long_desc: str
res: float
class Evaluator(object):
def __init__(self):
pass
def evaluate_all(self, data_iter, nb_data, model, decode_fn) -> List[Eval]:
raise NotImplementedError
class BasicEvaluator(Evaluator):
'''docstring for BasicEvaluator'''
def evaluate(self, predict, ground_truth):
'''
evaluate single instance
'''
correct = 1
if len(predict) == len(ground_truth):
for elem1, elem2 in zip(predict, ground_truth):
if elem1 != elem2:
correct = 0
break
else:
correct = 0
dist = edit_distance(predict, ground_truth)
return correct, dist
def evaluate_all(self, data_iter, nb_data, model, decode_fn):
'''
evaluate all instances
'''
correct, distance, nb_sample = 0, 0, 0
for src, trg in tqdm(data_iter(), total=nb_data):
pred, _ = decode_fn(model, src)
nb_sample += 1
trg = trg.view(-1).tolist()
trg = [x for x in trg if x != BOS_IDX and x != EOS_IDX]
corr, dist = self.evaluate(pred, trg)
correct += corr
distance += dist
acc = round(correct / nb_sample * 100, 4)
distance = round(distance / nb_sample, 4)
return [
Eval('acc', 'accuracy', acc),
Eval('dist', 'average edit distance', distance)
]
class PairBasicEvaluator(BasicEvaluator):
'''docstring for PairBasicEvaluator'''
def evaluate(self, predict, ground_truth):
'''
evaluate single instance
'''
predict = [x for x in predict if x != STEP_IDX]
ground_truth = [x for x in ground_truth if x != STEP_IDX]
return super().evaluate(predict, ground_truth)
def edit_distance(str1, str2):
'''Simple Levenshtein implementation for evalm.'''
table = np.zeros([len(str2) + 1, len(str1) + 1])
for i in range(1, len(str2) + 1):
table[i][0] = table[i - 1][0] + 1
for j in range(1, len(str1) + 1):
table[0][j] = table[0][j - 1] + 1
for i in range(1, len(str2) + 1):
for j in range(1, len(str1) + 1):
if str1[j - 1] == str2[i - 1]:
dg = 0
else:
dg = 1
table[i][j] = min(table[i - 1][j] + 1, table[i][j - 1] + 1,
table[i - 1][j - 1] + dg)
return int(table[len(str2)][len(str1)])
|
[
"venicapiea@verizon.net"
] |
venicapiea@verizon.net
|
ff173730fa93fe66232ab20004d4e2e0af9d20d3
|
8dc7902014a01cce4220cde2b7bd5dd8044251b4
|
/app/models.py
|
b4f8dd82fb46659177f42c08b0b3a77e54bb5cc2
|
[
"MIT"
] |
permissive
|
parkerws/FishTips_Flask
|
a3817743deaf29a9e6581da828f469b4f6dd8e5d
|
481faea8f507999c534cd791cd43744ddb6c3c55
|
refs/heads/master
| 2023-02-15T05:18:54.125627
| 2020-12-23T03:40:48
| 2020-12-23T03:40:48
| 322,951,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,852
|
py
|
from datetime import datetime
from time import time
from app import db, login, app
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from hashlib import md5
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
location = db.Column(db.String(64))
about_me = db.Column(db.String(140))
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
class Station(db.Model):
id = db.Column(db.Integer, primary_key=True)
station_id = db.Column(db.String(8), index=True, unique=True)
state = db.Column(db.String(3))
name = db.Column(db.String(64))
timezone = db.Column(db.String(5))
lat = db.Column(db.Float)
long = db.Column(db.Float)
def __init__(self, station_id, state, name, timezone, lat, long):
self.station_id = station_id
self.state = state
self.name = name
self.timezone = timezone
self.lat = lat
self.long = long
def __repr__(self):
return '<Station {}, location {}>'.format(self.station_id, str(self.lat) +' '+ str(self.long))
def serialize(self):
return {
"station_id": self.station_id,
"state": self.state,
"name": self.name,
"timezone": self.timezone,
"lat": self.lat,
"long": self.long
}
@login.user_loader
def load_user(id):
return User.query.get(int(id))
|
[
"parkerws3824@gmail.com"
] |
parkerws3824@gmail.com
|
2a13aeb2fd4c1a9cb591b2a62d8e3e9831e5503e
|
82884dc7208e9c0a25eb1211daeda9c944cfd331
|
/handlers/users/ten_minutes_meet.py
|
5afd55bdca4b896545426e34a069aa99b7db713a
|
[] |
no_license
|
gradinarnn/telegram_bot
|
081100e22b6775f56d5fea8e88a21dac4640f15e
|
19d1cc20450cafc58d03239cf7263088543585d1
|
refs/heads/master
| 2023-08-06T01:07:25.665283
| 2021-09-14T07:06:50
| 2021-09-14T07:06:50
| 380,561,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.builtin import Command
from keyboards.inline.callback_data import change_meeting_status_callback
from keyboards.inline.inline_buttons import one_button
from loader import dp
@dp.message_handler(Command("fast_meet"))
async def bot_start(message: types.Message, state: FSMContext):
await message.answer("🧙♀️ В тот же миг Алиса юркнула за ним следом, не думая о том, как же она будет выбираться обратно.",
reply_markup=one_button(text_btn=" перейти к чат-боту с 10 минутными почтами",
callback_data=change_meeting_status_callback.new(status="10"),
url='t.me/fast_10_minute_meet_bot?start=666'))
|
[
"gradinarnn@gmail.com"
] |
gradinarnn@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.