blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b1e0c1c235c51767a012be75e7b2a6729897c0d
|
e9da16d2d1468a47de78c66e4397216038e7ba88
|
/zscdumin/谷歌翻译/python代码/getICAPSUrlList.py
|
e8dee91a1bf115825a8623c2154bb110ff77687d
|
[] |
no_license
|
ZSCDumin/Spider
|
9b28beb5d5166365c3b954c32d28a409a79fb085
|
ba63f7122ce8e530cdfaaee56670dd5624d4c864
|
refs/heads/master
| 2021-06-27T02:04:29.622633
| 2020-09-13T01:20:18
| 2020-09-13T01:20:18
| 137,719,217
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,269
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-01-28 20:30:54
# @Author : 杜敏 (2712220318@qq.com)
# @Link : https://github.com/ZSCDumin
# @Version : $Id$
import requests
from bs4 import BeautifulSoup
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "爬取失败"
def getUrlList(urlList, downLoadList, url):
html = getHTMLText(url)
soup = BeautifulSoup(html, 'html.parser')
urls = soup.find_all('a')
for url in urls:
try:
href = str(url.attrs['href']) # 获取照片路径
if "view" in href and "paper" in href and len(href) > 66:
paperInfoUrl = href.replace("view", "viewPaper")
print(paperInfoUrl)
paperDownloadUrl = href.replace("view", "download")
print(paperDownloadUrl)
urlList.append(paperInfoUrl)
downLoadList.append(paperDownloadUrl)
except:
continue
def saveUrlAsFile(urlList, downLoadList, fPath, num):
path = fPath + "\\" + "ICAPS_20" + num + "_PaperInfoUrl.txt"
with open(path, 'w') as f: # 写入文件
for url in urlList:
print(url)
f.write(url + "\n")
f.close()
print("Url列表文件保存成功")
path = fPath + "\\" + "ICAPS_20" + num + "_DownloadUrl.txt"
with open(path, 'w') as f: # 写入文件
for url in downLoadList:
print(url)
f.write(url + "\n")
f.close()
print("Download列表文件保存成功")
def main():
urlList = [] # URL列表
downLoadList = [] # 下载列表
fPath = "F:\\接单项目\\谷歌翻译\\论文数据\\ICAPS" # 文件存储路径
for i in range(9, 19):
if i < 10:
num = "0" + str(i)
else:
num = str(i)
url = "https://www.aaai.org/ocs/index.php/ICAPS/ICAPS" + num + "/schedConf/presentations" # 爬取页面URL
print(url)
getUrlList(urlList, downLoadList, url)
saveUrlAsFile(urlList, downLoadList, fPath, num)
urlList.clear()
downLoadList.clear()
main()
|
[
"2712220318@qq.com"
] |
2712220318@qq.com
|
070fec483cc8b3618847116669bce6bd58c2f158
|
b290100dc3f40cc7867e21080c92135a75bca06b
|
/labwork/labwork/urls.py
|
6b1f3561f1545c492a3cecbffcdb95dbef43ecd2
|
[] |
no_license
|
Kunduzha/labwork
|
a13d69eaff5ca3cad8a17bb038f85683648cc82f
|
366af69bbbffcdf0422fe0ea83021e52ca82a61e
|
refs/heads/master
| 2023-04-05T01:43:17.305601
| 2021-03-31T10:55:05
| 2021-03-31T10:55:05
| 353,324,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
"""labwork URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"kuzanai@mail.ru"
] |
kuzanai@mail.ru
|
0d1f8ac232ce4709e84ae40a42d8f4df1accaf4b
|
d6b4f2a17eacfda27a2823e180c45ae783cea43b
|
/introducao-python/knn/iris-knn.py
|
6778b550102d6ea70867ead5f0fa9ce434993cc0
|
[] |
no_license
|
leonardoFiedler/data-science-course
|
ba31d9c4b0cced259d554b00786981c8b4a6f1c9
|
fe7f576a66091bc9d2db4e28e9368d05575315cd
|
refs/heads/master
| 2020-09-04T19:30:03.941010
| 2019-11-22T21:56:13
| 2019-11-22T21:56:13
| 219,868,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
(X, y) = load_iris(return_X_y=True)
data = train_test_split(X, y, test_size=0.2, random_state=1)
(X_train, X_test, y_train, y_test) = data
# Numero do K - quantidade de itens a serem verificados e separados
k = 5
labelsResults = []
for i in range(len(X_test)):
x = X_test[i, :]
d = X_train - x
d = np.square(d).sum(axis=1)
sortedMatDis = np.argsort(d)
labels = []
for j in range(k):
idx = sortedMatDis[j]
labels.append(y_train[idx])
labelsResults.append(pd.value_counts(labels).idxmax())
print(labelsResults)
print('Score:', accuracy_score(y_test, labelsResults))
|
[
"leonardo.fiedler.96@gmail.com"
] |
leonardo.fiedler.96@gmail.com
|
337238a653f2c421c1f017238cbef58842b56a43
|
567ecf4ea5afbd7eb3003f7e14e00c7b9289b9c6
|
/ax/storage/json_store/decoders.py
|
7a586e03ddb3b32b0a5780c941e67e791e29d11a
|
[
"MIT"
] |
permissive
|
danielrjiang/Ax
|
f55ef168a59381b5a03c6d51bc394f6c72ed0f39
|
43014b28683b3037b5c7307869cb9b75ca31ffb6
|
refs/heads/master
| 2023-03-31T12:19:47.118558
| 2019-12-02T16:47:39
| 2019-12-02T16:49:36
| 225,493,047
| 0
| 0
|
MIT
| 2019-12-03T00:09:52
| 2019-12-03T00:09:51
| null |
UTF-8
|
Python
| false
| false
| 3,501
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from ax.core.arm import Arm
from ax.core.base_trial import TrialStatus
from ax.core.batch_trial import AbandonedArm, BatchTrial, GeneratorRunStruct
from ax.core.generator_run import GeneratorRun
from ax.core.runner import Runner
from ax.core.trial import Trial
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import core # noqa F401 # pragma: no cover
def batch_trial_from_json(
experiment: "core.experiment.Experiment",
index: int,
trial_type: Optional[str],
status: TrialStatus,
time_created: datetime,
time_completed: Optional[datetime],
time_staged: Optional[datetime],
time_run_started: Optional[datetime],
abandoned_reason: Optional[str],
run_metadata: Optional[Dict[str, Any]],
generator_run_structs: List[GeneratorRunStruct],
runner: Optional[Runner],
abandoned_arms_metadata: Dict[str, AbandonedArm],
num_arms_created: int,
status_quo: Optional[Arm],
status_quo_weight_override: float,
optimize_for_power: Optional[bool],
) -> BatchTrial:
"""Load Ax BatchTrial from JSON.
Other classes don't need explicit deserializers, because we can just use
their constructors (see decoder.py). However, the constructor for Batch
does not allow us to exactly recreate an existing object.
"""
batch = BatchTrial(experiment=experiment)
batch._index = index
batch._trial_type = trial_type
batch._status = status
batch._time_created = time_created
batch._time_completed = time_completed
batch._time_staged = time_staged
batch._time_run_started = time_run_started
batch._abandoned_reason = abandoned_reason
batch._run_metadata = run_metadata or {}
batch._generator_run_structs = generator_run_structs
batch._runner = runner
batch._abandoned_arms_metadata = abandoned_arms_metadata
batch._num_arms_created = num_arms_created
batch._status_quo = status_quo
batch._status_quo_weight_override = status_quo_weight_override
batch.optimize_for_power = optimize_for_power
return batch
def trial_from_json(
experiment: "core.experiment.Experiment",
index: int,
trial_type: Optional[str],
status: TrialStatus,
time_created: datetime,
time_completed: Optional[datetime],
time_staged: Optional[datetime],
time_run_started: Optional[datetime],
abandoned_reason: Optional[str],
run_metadata: Optional[Dict[str, Any]],
generator_run: GeneratorRun,
runner: Optional[Runner],
num_arms_created: int,
) -> Trial:
"""Load Ax trial from JSON.
Other classes don't need explicit deserializers, because we can just use
their constructors (see decoder.py). However, the constructor for Trial
does not allow us to exactly recreate an existing object.
"""
trial = Trial(experiment=experiment, generator_run=generator_run)
trial._index = index
trial._trial_type = trial_type
trial._status = status
trial._time_created = time_created
trial._time_completed = time_completed
trial._time_staged = time_staged
trial._time_run_started = time_run_started
trial._abandoned_reason = abandoned_reason
trial._run_metadata = run_metadata or {}
trial._runner = runner
trial._num_arms_created = num_arms_created
return trial
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
62e62e7af49f8e9e5000474f3e2369b29eac4b01
|
76fd9a2d3b732a73b688a0c227bfe07219ca1ace
|
/wp3db/__init__.py
|
951dc19a9d4263b1eba80b26bb6f304589845284
|
[] |
no_license
|
wp3-wearable/dbmodels
|
61d1add4b09eae1f7be9d36a8db73df8bf9b240d
|
f881f98e6091649d8b8a79b5b9bc895792f23d54
|
refs/heads/master
| 2020-04-13T14:25:25.887785
| 2018-12-27T11:14:51
| 2018-12-27T11:14:51
| 163,262,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
from .db import Session
|
[
"dev.trk.9001@gmail.com"
] |
dev.trk.9001@gmail.com
|
cac26fe53126cbc8d9f1890a5eb47aa89c59b35b
|
d998988cb9f6f73be4f5b3cc302424119f66c856
|
/MGITGatePass/basic_app/migrations/0005_auto_20191217_2052.py
|
78882130aea3599e56d61a033f94c7640406fed3
|
[] |
no_license
|
sai-sambhu/MGITGatePass
|
cab9ea545f8b8c5fd959613b364dd161c416fab5
|
a14ad7fb79163c1c53d81a5732d2ac0e1ec5544e
|
refs/heads/master
| 2020-11-26T00:51:49.287696
| 2019-12-18T20:02:49
| 2019-12-18T20:02:49
| 228,912,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
# Generated by Django 2.2.5 on 2019-12-17 15:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic_app', '0004_auto_20191217_2031'),
]
operations = [
migrations.AddField(
model_name='studentprofileinfo',
name='roll',
field=models.CharField(default='17261A0551', max_length=10),
),
migrations.AlterField(
model_name='studentprofileinfo',
name='profile_pic',
field=models.ImageField(blank=True, default='default.jpeg', upload_to='profile_pics_students'),
),
]
|
[
"saisambhuprasad@gmail.com"
] |
saisambhuprasad@gmail.com
|
278d818322d05275fb24dbbd4ce90fbb73f4aad5
|
cbd347d69f4ae9725ec479795e21ef45d6ccf41a
|
/tests/constants.py
|
968375f731522300035d26c8d7a6828c743fad50
|
[
"Apache-2.0"
] |
permissive
|
pyni/perception
|
9d1e398964312d8ebdef0374e1e089fa7ff28397
|
81262bd05524e9d28568d55107718783023ae14c
|
refs/heads/master
| 2020-03-24T02:23:21.647095
| 2018-07-19T20:05:00
| 2018-07-19T20:05:00
| 142,373,747
| 1
| 0
|
Apache-2.0
| 2018-07-26T01:46:36
| 2018-07-26T01:46:36
| null |
UTF-8
|
Python
| false
| false
| 122
|
py
|
IM_HEIGHT = 100
IM_WIDTH = 100
NUM_POINTS = 100
NUM_ITERS = 500
BINARY_THRESH = 127
COLOR_IM_FILEROOT = 'data/test_color'
|
[
"jmahler@berkeley.edu"
] |
jmahler@berkeley.edu
|
db1facd386d242b1b380c5534537494db308c88d
|
002aca3621afbe787f5f2133d3dbe8af6e5e9e33
|
/contraClientes.py
|
dfaa170150fe8c98060e42d61179b22d6a65867c
|
[] |
no_license
|
Toti848/Joselyn
|
5ce55f38ac3092b5f283ddad47a0351ad78eefe3
|
735e123ef7a80770d54a26afe8ef9f112b37e7be
|
refs/heads/master
| 2022-12-02T06:59:42.999170
| 2020-08-20T03:35:21
| 2020-08-20T03:35:21
| 284,575,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
from tkinter import *
from tkinter import font
from tkinter import messagebox as msg
from tkinter import ttk
class Main:
def __init__(self):
#Pantalla
self.raiz = Tk()
self.raiz.title ("Acceso a los Clientes")
self.raiz.geometry('600x200')
#Fuente
self.fuente = font.Font(weight="bold")
self.user = StringVar()
self.pasw = StringVar()
#Titulo
self.lb_tituloPantalla = Label(self.raiz, text = "ACCESO DEL LOS CLIENTES", font = self.fuente)
self.lb_tituloPantalla.place(x = 180, y = 20)
#User
self.lb_User = Label(self.raiz, text = "User:")
self.lb_User.place(x = 100, y = 60)
self.txt_User = Entry(self.raiz, textvariable=self.user, justify="right", width = 30)
self.txt_User.place(x = 230, y = 60)
#Password
self.lb_Password = Label(self.raiz, text = "Password:")
self.lb_Password.place(x = 100, y = 90)
self.txt_Password = Entry(self.raiz, textvariable=self.pasw, justify="right", width = 30)
self.txt_Password.place(x = 230, y = 90)
#Boton Limpiar
self.bt_borrar = Button(self.raiz, text="Limpiar", width=15, command = self.Limpiar)
self.bt_borrar.place(x = 190, y = 130)
#Boton Acceder
self.bt_enviar = Button(self.raiz, text="Acceder", width=15, command = self.Acceder)
self.bt_enviar.place(x = 310, y = 130)
self.raiz.mainloop()
def Acceder(self):
if(self.user.get() == "Cliente" and self.pasw.get() == "p34"):
from Cliente_Socket import Chat_C
self.raiz.destroy()
Chat_C()
else:
msg.showinfo("Error", "La contrasena o ususario es incorrecta")
def Limpiar(self):
self.user.set("")
self.pasw.set("")
def main():
Main()
return 0
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
4b6acdbc898c6ac10a32ac3ebabe2a347cac037f
|
a5846332d42c8705d054792bdac219560d77d32f
|
/Test/test.py
|
5dfe4d8d72022058c8dfbfa854f2b08bdaab2bc1
|
[] |
no_license
|
Jinsung-Jeon/Deep_Learning_Code
|
2927a65adc9c7ef125603d186d4d5f38cebcfade
|
a87658c4beb823f9c34019e28cad736b259a7747
|
refs/heads/master
| 2020-12-13T02:35:52.306209
| 2020-03-06T07:01:11
| 2020-03-06T07:01:11
| 234,288,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,159
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 23 14:32:43 2020
@author: Jinsung
"""
#Chap1 test
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap1-Regression analysis Estimation of the number of rings in abalone/abalone.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap1-Regression analysis Estimation of the number of rings in abalone')
abalone_exec()
#Chap2 test
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star/pulsar.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star')
pulsar_exec()
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star/pulsar_ext.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star')
pulsar_exec(adjust_ratio=True)
#Chap3 test
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap3-Multi Classification/steel_test.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap3-Multi Classification')
steel_exec()
#Chap4 test
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure/mlp.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure')
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap1-Regression analysis Estimation of the number of rings in abalone/abalone.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap1-Regression analysis Estimation of the number of rings in abalone')
set_hidden([])
abalone_exec(epoch_count=50, report=10)
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure/mlp.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure')
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star/pulsar.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star')
set_hidden(6)
pulsar_exec(epoch_count=50, report=10)
set_hidden([12,6])
pulsar_exec(epoch_count=50, report=10)
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure/mlp.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure')
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star/pulsar_ext.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap2-Binary Classification predicting a pulsar star')
set_hidden([12,6])
pulsar_exec(epoch_count=50, report=10, adjust_ratio=True)
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure/mlp.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap4-MLP based structure')
runfile('C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap3-Multi Classification/steel_test.py',
wdir='C:/Users/Jinsung/Documents/Deep_Learning_Code/Chap3-Multi Classification')
LEARNING_RATE = 0.0001
set_hidden([12,6,4])
steel_exec(epoch_count=50, report=10)
#Chap5 test
ad = AbaloneDataset()
am = MlpModel('abalone_model',ad,[])
am.exec_all(epoch_count=10, report=2)
pd = PulsarDataset()
pm = MlpModel('pulsar_model', pd, [4])
pm.exec_all()
pm.visualize(5)
sd = SteelDataset()
sm = MlpModel('steel_model', sd, [12,7])
sm.exec_all(epoch_count=50, report=10)
psd = PulsarSelectDataset()
psm = MlpModel('pulsar_select_model', psd, [4])
psm.exec_all()
fd = FlowersDataset()
fm = MlpModel('flowers_model_1', fd, [10]) #같은 추정확률분포를 통해 언제나 민들레라는 답을 냈다.
fm.exec_all(epoch_count=10, report=2)
fm2 = MlpModel('flowers_model_2', fd, [30,10])
fm2.exec_all(epoch_count=10, report=2)
#Chap6 test
od = Office31Dataset()
om1 = MlpModel('office31_model_1', od, [10])
om1.exec_all(epoch_count=20, report=10)
om2 = MlpModel('office31_model_2', od, [64,32,10])
om2.exec_all(epoch_count=20, report=10, learning_rate=0.0001)
om3 = MlpModel('office31_model_3', od, [64,32,10])
om3.use_adam = True
om3.exec_all(epoch_count=50, report=10, learning_rate=0.0001)
#Chap7 test
fd = FlowersDataset([96, 96], [96, 96, 3])
od = Office31Dataset([96, 96], [96, 96, 3])
fm1 = CnnBasicModel('flowers_model_1', fd, [30, 10])
fm1.exec_all(epoch_count=10, report=2)
fm2 = CnnBasicModel('flowers_model_2', fd, [['full', {'width':30}],['full', {'width':10}]])
fm2.use_adam=False
fm2.exec_all(epoch_count = 10, report = 2)
fm3 = CnnBasicModel('flowers_model_3', fd, [['conv', {'ksize':5, 'chn':6}],
['max', {'stride':4}],
['conv', {'ksize':3, 'chn':12}],
['avg', {'stride':2}]],
True)
fm3.exec_all(epoch_count = 10, report=2)
om1 = CnnBasicModel('officie31_model_1', od,
[['conv', {'ksize':3, 'chn':6}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':12}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':24}],
['avg', {'stride':3}]])
om1.exec_all(epoch_count=10, report =2)
om2 = CnnBasicModel('officie31_model_2', od,
[['conv', {'ksize':3, 'chn':6, 'actfunc':'sigmoid'}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':12, 'actfunc':'sigmoid'}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':24, 'actfunc':'sigmoid'}],
['avg', {'stride':3}]])
om2.exec_all(epoch_count=10, report =2)
om3 = CnnBasicModel('officie31_model_3', od,
[['conv', {'ksize':3, 'chn':6, 'actfunc':'tanh'}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':12, 'actfunc':'tanh'}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':24, 'actfunc':'tanh'}],
['avg', {'stride':3}]])
om3.exec_all(epoch_count=10, report =2)
#Chap8 test
fd = FlowersDataset([96, 96], [96, 96, 3])
od = Office31Dataset([96, 96], [96, 96, 3])
fm1 = CnnRegModel('flowers_model_1', fd, [30, 10])
fm1.exec_all(epoch_count=10, report=2, show_params=True)
fm2 = CnnRegModel('flowers_model_2', fd, [30, 10], l2_decay=0.1)
fm2.exec_all(epoch_count=10, show_cnt=0, show_params=True)
fm3 = CnnRegModel('flowers_model_3', fd, [30, 10], l1_decay=0.1)
fm3.exec_all(epoch_count=10, show_cnt=0, show_params=True)
cnn1 = [['conv', {'ksize':3, 'chn':6}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':12}],
['max', {'stride':2}],
['conv', {'ksize':3, 'chn':24}],
['avg', {'stride':3}]]
fcnn1 = CnnRegModel('flowers_cnn_1')
fcnn1.exec_all(epoch_count=10, report=2)
cnn2 = [['conv', {'ksize':3, 'chn':6}],
['max', {'stride':2}],
['dropout', {'keep_prob':0.6}],
['conv', {'ksize':3, 'chn':12}],
['max', {'stride':2}],
['dropout', {'keep_prob':0.6}],
['conv', {'ksize':3, 'chn':24}],
['avg', {'stride':3}],
['dropout', {'keep_prob':0.6}]]
fcnn2 = CnnRegModel('flowers_cnn_2',fd, cnn2)
fcnn2.exec_all(epoch_count=10, report=2, show_cnt=0)
cnn3 = [['noise', {'type':'normal','mean':0,'std':0.01}],
['conv', {'ksize':3, 'chn':6}],
['max', {'stride':2}],
['noise', {'type':'normal','mean':0,'std':0.01}],
['conv', {'ksize':3, 'chn':12}],
['max', {'stride':2}],
['noise', {'type':'normal','mean':0,'std':0.01}],
['conv', {'ksize':3, 'chn':24}],
['avg', {'stride':3}]]
fcnn3 = CnnRegModel('flowers_cnn_3',fd, cnn3)
fcnn3.exec_all(epoch_count=10, report=2, show_cnt=0)
cnn4 = [['batch_normal'],
['conv', {'ksize':3, 'chn':6}],
['max', {'stride':2}],
['batch_normal'],
['conv', {'ksize':3, 'chn':12}],
['max', {'stride':2}],
['batch_normal'],
['conv', {'ksize':3, 'chn':24}],
['avg', {'stride':3}]]
fcnn4 = CnnRegModel('flowers_cnn_4',fd, cnn4)
fcnn4.exec_all(epoch_count=10, report=2, show_cnt=0)
od = Office31Dataset([96, 96], [96, 96, 3])
ocnn1 = CnnRegModel('office31_cnn_1', od, cnn1)
ocnn2 = CnnRegModel('office31_cnn_2', od, cnn1)
ocnn3 = CnnRegModel('office31_cnn_3', od, cnn1)
ocnn4 = CnnRegModel('office31_cnn_4', od, cnn1)
ocnn1.exec_all(epoch_count=10, show_cnt=0)
ocnn2.exec_all(epoch_count=10, show_cnt=0)
ocnn3.exec_all(epoch_count=10, show_cnt=0)
ocnn4.exec_all(epoch_count=10, show_cnt=0)
# Chap9
# inception-v3 model
imagenet = DummyDataset('imagenet', 'select', [299,299,3], 200)
CnnExtModel.set_macro('v3_preproc',
['serial',
['conv', {'ksize':3, 'stride':2, 'chn':32, 'padding':'VALID'}],
['conv', {'ksize':3, 'chn':32, 'padding':'VALID'}],
['conv', {'ksize':3, 'chn':64, 'padding':'SAME'}],
['max', {'ksize':3, 'stride':2, 'padding':'VALID'}],
['conv', {'ksize':1, 'chn':80, 'padding':'VALID'}],
['max', {'ksize':3, 'stride':2, 'padding':'VALID'}]])
CnnExtModel.set_macro('v3_inception1',
['parallel',
['conv', {'ksize':1, 'chn':64}],
['serial',
['conv', {'ksize':1, 'chn':48}],
['conv', {'ksize':5, 'chn':64}]],
['serial',
['conv', {'ksize':1, 'chn':64}],
['conv', {'ksize':3, 'chn':96}],
['conv', {'ksize':3, 'chn':96}]],
['serial',
['avg', {'ksize':3, 'stride':1}],
['conv', {'ksize':1, 'chn':'#chn'}]]])
CnnExtModel.set_macro('v3_resize1',
['parallel',
['conv', {'ksize':1, 'stride':2, 'chn':384}],
['serial',
['conv', {'ksize':1, 'chn':64}],
['conv', {'ksize':3, 'chn':96}],
['conv', {'ksize':3, 'stride':2, 'chn':96}]],
['max', {'ksize':3, 'stride':2}]])
CnnExtModel.set_macro('v3_inception2',
['parallel',
['conv', {'ksize':1, 'chn':192}],
['serial',
['conv', {'ksize':[1,1], 'chn':'#chn'}],
['conv', {'ksize':[1,7], 'chn':'#chn'}],
['conv', {'ksize':[7,1], 'chn':192}]],
['serial',
['conv', {'ksize':[1,1], 'chn':'#chn'}],
['conv', {'ksize':[7,1], 'chn':'#chn'}],
['conv', {'ksize':[1,7], 'chn':'#chn'}],
['conv', {'ksize':[7,1], 'chn':'#chn'}],
['conv', {'ksize':[1,7], 'chn':192}]],
['serial',
['avg', {'ksize':3, 'stride':1}],
['conv', {'ksize':1, 'chn':192}]]])
CnnExtModel.set_macro('v3_resize2',
['parallel',
['serial',
['conv', {'ksize':1, 'chn':192}],
['conv', {'ksize':3, 'stride':2, 'chn':320}]],
['serial',
['conv', {'ksize':[1,1], 'chn':192}],
['conv', {'ksize':[1,7], 'chn':192}],
['conv', {'ksize':[7,1], 'chn':192}],
['conv', {'ksize':[3,3], 'stride':[2,2], 'chn':192}]],
['max', {'ksize':3, 'stride':2}]])
CnnExtModel.set_macro('v3_inception3',
['parallel',
['conv', {'ksize':1, 'chn':320}],
['serial',
['conv', {'ksize':[3,3], 'chn':384}],
['parallel',
['conv', {'ksize':[1,3], 'chn':384}],
['conv', {'ksize':[3,1], 'chn':384}]]],
['serial',
['conv', {'ksize':[1,1], 'chn':448}],
['conv', {'ksize':[3,3], 'chn':384}],
['parallel',
['conv', {'ksize':[1,3], 'chn':384}],
['conv', {'ksize':[3,1], 'chn':384}]]],
['serial',
['avg', {'ksize':3, 'stride':1}],
['conv', {'ksize':1, 'chn':192}]]])
CnnExtModel.set_macro('v3_postproc',
['serial',
['avg', {'stride':8}],
['dropout', {'keep_prob':0.7}]])
CnnExtModel.set_macro('inception_v3',
['serial',
['custom', {'name':'v3_preproc'}],
['custom', {'name':'v3_inception1', 'args':{'#chn':32}}],
['custom', {'name':'v3_inception1', 'args':{'#chn':64}}],
['custom', {'name':'v3_inception1', 'args':{'#chn':64}}],
['custom', {'name':'v3_resize1'}],
['custom', {'name':'v3_inception2', 'args':{'#chn':128}}],
['custom', {'name':'v3_inception2', 'args':{'#chn':160}}],
['custom', {'name':'v3_inception2', 'args':{'#chn':160}}],
['custom', {'name':'v3_inception2', 'args':{'#chn':192}}],
['custom', {'name':'v3_resize2'}],
['custom', {'name':'v3_inception3'}],
['custom', {'name':'v3_inception3'}],
['custom', {'name':'v3_postproc'}]])
inception_v3 = CnnExtModel('inception_v3', imagenet, [['custom', {'name':'inception_v3'}]], dump_structure=True)
fd = FlowersDataset([96, 96], [96, 96, 3])
CnnExtModel.set_macro('flower_preproc',
['serial',
['conv', {'ksize':3, 'stride':2, 'chn':6, 'actions':'#act'}]])
CnnExtModel.set_macro('flower_inception1',
['parallel',
['conv', {'ksize':1, 'chn':4, 'actions':'#act'}],
['conv', {'ksize':3, 'chn':6, 'actions':'#act'}],
['serial',
['conv', {'ksize':3, 'chn':6, 'actions':'#act'}],
['conv', {'ksize':3, 'chn':6, 'actions':'#act'}]],
['serial',
['avg', {'ksize':3, 'stride':1}],
['conv', {'ksize':1, 'chn':4, 'actions':'#act'}]]])
CnnExtModel.set_macro('flower_resize',
['parallel',
['conv', {'ksize':1, 'stride':2, 'chn':12, 'actions':'#act'}],
['serial',
['conv', {'ksize':3, 'chn':12, 'actions':'#act'}],
['conv', {'ksize':3, 'stride':2, 'chn':12, 'actions':'#act'}]],
['avg', {'ksize':3, 'stride':2}]])
CnnExtModel.set_macro('flower_inception2',
['parallel',
['conv', {'ksize':1, 'chn':8, 'action':'#act'}],
['serial',
['conv', {'ksize':[3,3], 'chn':8, 'actions':'#act'}],
['parallel',
['conv', {'ksize':[1,3], 'chn':8, 'actions':'#act'}],
['conv', {'ksize':[3,1], 'chn':8, 'actions':'#act'}]]],
['serial',
['conv', {'ksize':[1,1], 'chn':8, 'actions':'#act'}],
['conv', {'ksize':[3,3], 'chn':8, 'actions':'#act'}],
['parallel',
['conv', {'ksize':[1,3], 'chn':8, 'actions':'#act'}],
['conv', {'ksize':[3,1], 'chn':8, 'actions':'#act'}]]],
['serial',
['avg', {'ksize':3, 'stride':1}],
['conv', {'ksize':1, 'chn':8, 'actions':'#act'}]]])
CnnExtModel.set_macro('flower_postproc',
['serial',
['avg', {'stride':6}],
['dropout', {'keep_prob':0.7}]])
CnnExtModel.set_macro('inception_flower',
['serial',
['custom', {'name':'flower_preproc', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_inception1', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_resize', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_inception1', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_resize', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_inception2', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_resize', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_inception2', 'args':{'#act':'#act'}}],
['custom', {'name':'flower_postproc', 'args':{'#act':'#act'}}]])
conf_flower_LA = ['custom', {'name':'inception_flower', 'args':{'#act':'LA'}}]
model_flower_LA = CnnExtModel('model_flower_LA', fd, conf_flower_LA, dump_structure=True)
model_flower_LA.exec_all(report=2)
conf_flower_LAB = ['custom', {'name':'inception_flower', 'args':{'#act':'LAB'}}]
model_flower_LAB = CnnExtModel('model_flower_LAB', fd, conf_flower_LAB, dump_structure=False)
model_flower_LAB.exec_all(epoch_count=10, report=2)
#Chap10
ad = AutomataDataset()
am_4 = RnnBasicModel('am_4', ad, ['rnn', {'recur_size':4, 'outseq':False}])
am_16 = RnnBasicModel('am_16', ad, ['rnn', {'recur_size':16, 'outseq':False}])
am_64 = RnnBasicModel('am_64', ad, ['rnn', {'recur_size':64, 'outseq':False}])
am_4.exec_all(epoch_count=10, report=2)
am_16.exec_all(epoch_count=10, report=2)
am_64.exec_all(epoch_count=10, report=2)
am_64_drop = RnnBasicModel('am_64_drop', ad, [['rnn', {'recur_size':64, 'outseq':False}],['dropout', {'keep_prob':0.5}]])
am_64_drop.exec_all(epch_count=10, report=2)
#Chap11
ad = AutomataDataset()
am_4 = RnnLstmModel('am_4', ad, ['lstm', {'recur_size':64, 'outseq':False}])
am_4.exec_all(epoch_count=10, report=2)
usd_10_10 = UrbanSoundDataset(10, 10)
usd_10_100 = UrbanSoundDataset(10, 100)
conf_basic = ['rnn', {'recur_size':20, 'outseq':False}]
conf_lstm = ['lstm', {'recur_size':20, 'outseq':False}]
conf_state = ['lstm', {'recur_size':20, 'outseq':False, 'use_state':True}]
us_basic_10_10 = RnnLstmModel('us_basic_10_10', usd_10_10, conf_basic)
us_lstm_10_10 = RnnLstmModel('us_lstm_10_10', usd_10_10, conf_lstm)
us_state_10_10 = RnnLstmModel('us_state_10_10', usd_10_10, conf_state)
us_basic_10_100 = RnnLstmModel('us_basic_10_100', usd_10_100, conf_basic)
us_lstm_10_100 = RnnLstmModel('us_lstm_10_100', usd_10_100, conf_lstm)
us_state_10_100 = RnnLstmModel('us_state_10_100', usd_10_100, conf_state)
us_basic_10_10.exec_all(epoch_count=10, report=2)
us_lstm_10_10.exec_all(epoch_count=10, report=2)
us_state_10_10.exec_all(epoch_count=10, report=2, show_cnt=0)
#Chap12
vsd = np.load('C:\\Users\\Jinsung\\Documents\\Deep_Learning_Code\\Datasets\\chap12\\cache\\AstarIsBorn1937.mp4.npy')
conf1 = [['seqwrap', ['avg', {'stride':30}],
['conv', {'ksize':3, 'chn':12}],
['full', {'width':16}]],
['lstm', {'recur_size':8}]]
vsm1 = RnnExtModel('vsm1', vsd, conf1)
vsm1.exec_all(epoch_count=10, report=2, show_cnt=3)
vsd.shape
#Chap13
mset_all = MnistAutoDataset(train_ratio=1.00)
mset_1p = MnistAutoDataset(train_ratio=0.01)
conf_mlp = [['full',{'width':10}]]
mnist_mlp_all = RnnExtModel('mnist_mlp_all', mset_all, conf_mlp)
mnist_mlp_all.exec_all(epoch_count=10, report=2)
conf_auto = {
'encoder': [['full', {'width':10}]],
'decoder': [['full', {'width':784}]],
'supervised': [['full', {'width':10}]]
}
mnist_auto_1 = Autoencoder('mnist_auto_1',mset_1p, conf_auto)
mnist_auto_1.autoencode(epoch_count=10, report=2)
mnist_auto_1.exec_all(epoch_count=10, report=2)
mnist_auto_fix = Autoencoder('mnist_auto_fix', mset_1p, conf_auto, fix_encoder=True)
mnist_auto_fix.autoencode(epoch_count=10, report=5)
mnist_auto_fix.exec_all(epoch_count=10, report=5)
conf_auto_2 = {
'encoder': [['full', {'width':64}], ['full', {'width':10}]],
'decoder': [['full', {'width':64}], ['full', {'width':784}]],
'supervised': [['full', {'width':10}]]
}
mnist_auto_2 = Autoencoder('mnist_auto_2',mset_1p, conf_auto_2)
mnist_auto_2.autoencode(epoch_count=10, report=2)
mnist_auto_2.exec_all(epoch_count=10, report=2)
conf_hash_1 = {
'encoder': [['full', {'width':10, 'actfunc':'sigmoid'}]],
'decoder': [['full', {'width':784}]],
'supervised': []
}
mnist_hash_1 = Autoencoder('mnist_hash_1',mset_1p, conf_hash_1)
mnist_hash_1.autoencode(epoch_count=10, report=2)
mnist_hash_1.semantic_hashing_index()
mnist_hash_1.semantic_hashing_search()
conf_hash_2 = {
'encoder': [['full', {'width':64}],['full', {'width':10, 'actfunc':'sigmoid'}]],
'decoder': [['full', {'width':64}],['full', {'width':784}]],
'supervised': []
}
mnist_hash_2 = Autoencoder('mnist_hash_2',mset_1p, conf_hash_2)
mnist_hash_2.autoencode(epoch_count=10, report=2)
mnist_hash_2.semantic_hashing_index()
mnist_hash_2.semantic_hashing_search()
mnist_hash_2.autoencode(epoch_count=40, report=10)
mnist_hash_2.semantic_hashing_index()
mnist_hash_2.semantic_hashing_search()
#Chap14
mnist_eng = MnistEngDataset()
conf_eng1 = {
'encoder': [['full', {'width':10}]],
'decoder': [['lstm', {'recur_size':32, 'inseq':False,
'outseq':True, 'timesteps':6}],
['seqwrap', ['full', {'width':27, 'actfunc':'none'}]]]
}
encdec_eng1 = EncoderDecoder('encdec_eng1', mnist_eng, conf_eng1)
encdec_eng1.exec_1_step(epoch_count=10, report=2)
conf_eng2 = {
'encoder': [['full', {'width':10}],
['batch_normal'],
['full', {'width':10}]],
'decoder': [['lstm', {'recur_size':32, 'inseq':False,
'outseq':True, 'timesteps':6}],
['seqwrap', ['full', {'width':27, 'actfunc':'none'}]]]
}
encdec_eng2 = EncoderDecoder('encdec_eng2', mnist_eng, conf_eng2)
encdec_eng2.exec_1_step(epoch_count=10, report=2)
encdec_eng2_2 = EncoderDecoder('encdec_eng2_2', mnist_eng, conf_eng2)
encdec_eng2_2.exec_2_step(epoch_count=10, report=5)
encdec_eng2_3 = EncoderDecoder('encdec_eng2_3', mnist_eng, conf_eng2)
encdec_eng2_3.exec_3_step(epoch_count=10, report=5)
#Chap15
dset_pic_gogh = GanDatasetPicture('gogh.jpg')
dset_pic_jungsun = GanDatasetPicture('jungsun.jpg')
print(dset_pic_gogh)
print(dset_pic_jungsun)
conf_pic = {
'seed_shape': [16],
'generator': [['full', {'width':64}],
['full', {'width':32*32*3, 'actfunc':'sigmoid'}]],
'discriminor': [['full', {'width':64}],
['full', {'width':1, 'actfunc':'none'}]]
}
gan_pic_gogh = Gan("gan_pic_gogh", dset_pic_gogh, conf_pic, dump_structure=True)
gan_pic_gogh.exec_all(epoch_count=100, report=20)
|
[
"jjsjjs0902@naver.com"
] |
jjsjjs0902@naver.com
|
2e89c56bda2683eb9350f7721d4902cb212adae9
|
0bd2de354c696939fedcecb72c924eaa95feb5b2
|
/job/views.py
|
41186e674e7661a4fa1918f1f8466c05446a4a6b
|
[] |
no_license
|
RishabhVerma098/personel-portfolio
|
94a0643e522d8c43082712b8c0ec5f9094abebc8
|
c11aca5b878a5d7490c7159f3b447a45013a5d04
|
refs/heads/master
| 2021-07-17T09:45:38.546492
| 2019-02-09T14:28:20
| 2019-02-09T14:28:20
| 169,574,563
| 0
| 0
| null | 2020-06-07T14:32:31
| 2019-02-07T13:18:29
|
HTML
|
UTF-8
|
Python
| false
| false
| 190
|
py
|
from django.shortcuts import render
# Create your views here.
from .models import jobs
def home(request):
job = jobs.objects
return render(request, 'job/home.html', {'jobs': job})
|
[
"vermarishabh0987@gmail.com"
] |
vermarishabh0987@gmail.com
|
62098dedacaa8ca0444becbb5aa5a5b7341645ea
|
d11f36debe9c5c2b5af87221782eebb4d6968d2e
|
/lesson19.py
|
fa9276880d4cc7e112e9e6e78b74ae6fef529263
|
[] |
no_license
|
OmorovAzat/lesson1
|
cde12fcc19d86b22139ca9ac0773b059fd1165f8
|
ad1d113c56bbc0961b0543c45b4df94eea3c0314
|
refs/heads/master
| 2023-05-07T08:31:58.450064
| 2021-05-30T08:54:51
| 2021-05-30T08:54:51
| 372,166,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
#Решение домашнего задания
# a = [1, 2, 3]
# b = [i * 2 for i in a]
# print(b)
# l1 = [1, 2, 3]
# res = 0
# for num in l1:
# res += num ** 2
# print(res)
# time1 = 3
# time2 = 6.7
# time3 = 11.8
#
# print(time1 // 2)
# print(time2 // 2)
# print(time3 // 2)
# s = 'Hello,world'
# if ' ' in s:
# s = s.upper()
# else:
# s = s.lower()
#
# print(s)
|
[
"bdante025@gmail.com"
] |
bdante025@gmail.com
|
52e97583338ee135280976c809802e41e82f2615
|
4321285ff5eed67fbca253ba7647235032700dfe
|
/RequestService/liquid_requests.py
|
fd208f5fa6a8659798a0d78942ac7498d37a46d0
|
[] |
no_license
|
TheBigGinge/Analytics
|
e8fd5ce3f04ce8ce32458500a264c10682dfbdc5
|
27c82bfdd4f06b9e80ee8f7ac7226370c62c4eb2
|
refs/heads/master
| 2020-12-18T22:19:56.904634
| 2016-08-08T19:53:55
| 2016-08-08T19:53:55
| 34,743,144
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,305
|
py
|
import requests
import json
import os
import getpass
import base64
class LiquidPlannerRequest:
base_uri = 'https://app.liquidplanner.com/api'
workspace_id = None
project_id = None
email = 'ryanm@payscale.com'
password = 'Huge-Large1978'
session = None
def __init__(self, email=None, password=None):
if email is not None:
self.email = email
self.password = password
def get_workspace_id(self):
return self.workspace_id
def set_workspace_id(self, workspace_id):
self.workspace_id = workspace_id
def set_project_id(self, project_id):
self.project_id = project_id
def get(self, uri, options={}):
return requests.get(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password))
def post(self, uri, options={}):
return requests.post(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password))
def put(self, uri, options={}):
return requests.put(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password))
def account(self):
"""
Returns a dictionary with information about the current user.
"""
return json.loads(self.get('/account').content)
def workspaces(self):
"""
Returns a list of dictionaries, each a workspace in which the user is a member
Workspaces are the root directory
"""
return json.loads(self.get('/workspaces').content)
def packages(self):
"""
Returns a dictionary of all packages
A workspace is made up of packages
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/packages').content)
def projects(self):
"""
Returns a list of dictionaries, each a project in a workspace
A package can be made up of projects and tasks
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/projects').content)
def pull_all_tasks(self):
"""
Returns a list of dictionaries, each a task in a workspace
Tasks can live in projects or packages
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/tasks').content)
def create_task(self, data):
"""
Creates a task by POSTing data
:params data:
Commands for the api
"""
return json.loads(self.post('/workspaces/' + str(self.workspace_id) +
'/tasks', json.dumps({'task': data})).content)
def update_task(self, data):
"""
Updates a task by PUTing data
:params data:
Commands for the api
"""
return json.loads(self.put('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(data['id']), json.dumps({'task': data})).content)
def write_task_comment(self, task_id, comment):
"""
Writes a comment to a task
"""
return json.loads(self.post('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(task_id) + '/comments', json.dumps({'comment': comment})).content)
def check_for_task_changes(self):
return json.loads(self.get('/workspaces/' + str(self.workspace_id)
+ '/changes').content)
def pull_task_by_id(self, id_number):
"""
Returns a list of dictionaries, each a task in a workspace
Tasks can live in projects or packages
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(id_number)).content)
def pull_task_note(self, task_id):
return self.get('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(task_id) + '/note').content
|
[
"rphm78@gmail.com"
] |
rphm78@gmail.com
|
6e615abdf2ddd030aea3917e7b4d7214899e693e
|
e249e4bb6e3cb2aabf592bcd3f7ec07b7c080eb8
|
/cvp_modules/library/cv_server_provision.py
|
2d2d507f668d27dc109e4d45be3d50c71cef71db
|
[] |
no_license
|
arista-eosplus/ansible-cloudvision
|
c87e230e5286628c3a2f162efab585f4b16ab4c7
|
abe124577d1ebeb3dd7b493102fd15795f4a4506
|
refs/heads/master
| 2021-01-19T08:23:23.139937
| 2017-07-21T03:15:41
| 2017-07-21T03:15:41
| 72,491,582
| 5
| 0
| null | 2017-06-06T13:31:26
| 2016-11-01T01:09:52
|
Python
|
UTF-8
|
Python
| false
| false
| 24,752
|
py
|
#!/usr/bin/env python
#
# Copyright (c) 2017, Arista Networks EOS+
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cv_server_provision
version_added: "2.4"
author: "EOS+ CS (ansible-dev@arista.com) (@mharista)"
short_description:
Provision server port by applying or removing template configuration to a
configlet
description:
- This module allows a server team to provision server network ports for
new servers without having to access Arista CVP or asking the network team
to do it for them. Provide the information for connecting to CVP, switch
rack, port the new server is connected to, optional vlan, and an action
and the module will apply the configuration to the switch port via CVP.
Actions are add (applies template config to port),
remove (defaults the interface config) and
show (returns the current port config).
options:
host:
description:
- The hostname or IP address of the CVP node being connected to.
required: true
port:
description:
- The port number to use when making API calls to the CVP node. This
will default to the default port for the specified protocol. Port 80
for http and port 443 for https.
default: None
protocol:
description:
- The protocol to use when making API calls to CVP. CVP defaults to https
and newer versions of CVP no longer support http.
default: https
choices: [https, http]
username:
description:
- The user that will be used to connect to CVP for making API calls.
required: true
password:
description:
- The password of the user that will be used to connect to CVP for API
calls.
required: true
server_name:
description:
- The hostname or identifier for the server that is having it's switch
port provisioned.
required: true
switch_name:
description:
- The hostname of the switch is being configured for the server being
provisioned.
required: true
switch_port:
description:
- The physical port number on the switch that the new server is
connected to.
required: true
port_vlan:
description:
- The vlan that should be applied to the port for this server.
This parameter is dependent on a proper template that supports single
vlan provisioning with it. If a port vlan is specified by the template
specified does not support this the module will exit out with no
changes. If a template is specified that requires a port vlan but no
port vlan is specified the module will exit out with no changes.
default: None
template:
description:
- A path to a Jinja formatted template file that contains the
configuration block that will be applied to the specified switch port.
This template will have variable fields replaced by the module before
being applied to the switch configuration.
required: true
action:
description:
- The action for the module to take. The actions are add, which applies
the specified template config to port, remove, which defaults the
specified interface configuration, and show, which will return the
current port configuration with no changes.
default: show
choices: [show, add, remove]
auto_run:
description:
- Flag that determines whether or not the module will execute the CVP
task spawned as a result of changes to a switch configlet. When an
add or remove action is taken which results in a change to a switch
configlet, CVP will spawn a task that needs to be executed for the
configuration to be applied to the switch. If this option is True then
the module will determined the task number created by the configuration
change, execute it and wait for the task to complete. If the option
is False then the task will remain in the Pending state in CVP for
a network administrator to review and execute.
default: False
type: bool
notes:
requirements: [Jinja2, cvprac >= 0.7.0]
'''
EXAMPLES = '''
- name: Get current configuration for interface Ethernet2
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: template_file.j2
action: show
- name: Remove existing configuration from interface Ethernet2. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: template_file.j2
action: remove
auto_run: True
- name: Add template configuration to interface Ethernet2. No VLAN. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: single_attached_trunk.j2
action: add
auto_run: True
- name: Add template with VLAN configuration to interface Ethernet2. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
port_vlan: 22
template: single_attached_vlan.j2
action: add
auto_run: True
'''
RETURN = '''
changed:
description: Signifies if a change was made to the configlet
returned: success
type: bool
sample: true
currentConfigBlock:
description: The current config block for the user specified interface
returned: when action = show
type: string
sample: "interface Ethernet4\n!"
newConfigBlock:
description: The new config block for the user specified interface
returned: when action = add or remove
type: string
sample: "interface Ethernet3\n description example\n no switchport\n!"
oldConfigBlock:
description: The current config block for the user specified interface
before any changes are made
returned: when action = add or remove
type: string
sample: "interface Ethernet3\n!"
fullConfig:
description: The full config of the configlet after being updated
returned: when action = add or remove
type: string
sample: "!\ninterface Ethernet3\n!\ninterface Ethernet4\n!"
updateConfigletResponse:
description: Response returned from CVP when configlet update is triggered
returned: when action = add or remove and configuration changes
type: string
sample: "Configlet veos1-server successfully updated and task initiated."
portConfigurable:
description: Signifies if the user specified port has an entry in the
configlet that Ansible has access to
returned: success
type: bool
sample: true
switchConfigurable:
description: Signifies if the user specified switch has a configlet
applied to it that CVP is allowed to edit
returned: success
type: bool
sample: true
switchInfo:
description: Information from CVP describing the switch being configured
returned: success
type: dictionary
sample: {"architecture": "i386",
"bootupTimeStamp": 1491264298.21,
"complianceCode": "0000",
"complianceIndication": "NONE",
"deviceInfo": "Registered",
"deviceStatus": "Registered",
"fqdn": "veos1",
"hardwareRevision": "",
"internalBuildId": "12-12",
"internalVersion": "4.17.1F-11111.4171F",
"ipAddress": "192.168.1.20",
"isDANZEnabled": "no",
"isMLAGEnabled": "no",
"key": "00:50:56:5d:e5:e0",
"lastSyncUp": 1496432895799,
"memFree": 472976,
"memTotal": 1893460,
"modelName": "vEOS",
"parentContainerId": "container_13_5776759195930",
"serialNumber": "",
"systemMacAddress": "00:50:56:5d:e5:e0",
"taskIdList": [],
"tempAction": null,
"type": "netelement",
"unAuthorized": false,
"version": "4.17.1F",
"ztpMode": "false"}
taskCompleted:
description: Signifies if the task created and executed has completed successfully
returned: when action = add or remove, and auto_run = true,
and configuration changes
type: bool
sample: true
taskCreated:
description: Signifies if a task was created due to configlet changes
returned: when action = add or remove, and auto_run = true or false,
and configuration changes
type: bool
sample: true
taskExecuted:
description: Signifies if the automation executed the spawned task
returned: when action = add or remove, and auto_run = true,
and configuration changes
type: bool
sample: true
taskId:
description: The task ID created by CVP because of changes to configlet
returned: when action = add or remove, and auto_run = true or false,
and configuration changes
type: string
sample: "500"
'''
import re
import time
from jinja2 import meta
import jinja2
from ansible.module_utils.basic import AnsibleModule
from cvprac.cvp_client import CvpClient
from cvprac.cvp_client_errors import CvpLoginError, CvpApiError
def connect(module):
''' Connects to CVP device using user provided credentials from playbook.
:param module: Ansible module with parameters and client connection.
:return: CvpClient object with connection instantiated.
'''
client = CvpClient()
try:
client.connect([module.params['host']],
module.params['username'],
module.params['password'],
protocol=module.params['protocol'],
port=module.params['port'])
except CvpLoginError, e:
module.fail_json(msg=str(e))
return client
def switch_info(module):
''' Get dictionary of switch info from CVP.
:param module: Ansible module with parameters and client connection.
:return: Dict of switch info from CVP or exit with failure if no
info for device is found.
'''
switch_name = module.params['switch_name']
switch_info = module.client.api.get_device_by_name(switch_name)
if not switch_info:
module.fail_json(msg=str("Device with name '%s' does not exist."
% switch_name))
return switch_info
def switch_in_compliance(module, sw_info):
''' Check if switch is currently in compliance.
:param module: Ansible module with parameters and client connection.
:param sw_info: Dict of switch info.
:return: Nothing or exit with failure if device is not in compliance.
'''
compliance = module.client.api.check_compliance(sw_info['key'],
sw_info['type'])
if compliance['complianceCode'] != '0000':
module.fail_json(msg=str('Switch %s is not in compliance. Returned'
' compliance code %s.'
% (sw_info['fqdn'],
compliance['complianceCode'])))
def server_configurable_configlet(module, sw_info):
''' Check CVP that the user specified switch has a configlet assigned to
it that Ansible is allowed to edit.
:param module: Ansible module with parameters and client connection.
:param sw_info: Dict of switch info.
:return: Dict of configlet information or None.
'''
configurable_configlet = None
configlet_name = module.params['switch_name'] + '-server'
switch_configlets = module.client.api.get_configlets_by_device_id(
sw_info['key'])
for configlet in switch_configlets:
if configlet['name'] == configlet_name:
configurable_configlet = configlet
return configurable_configlet
def port_configurable(module, configlet):
''' Check configlet if the user specified port has a configuration entry
in the configlet to determine if Ansible is allowed to configure the
port on this switch.
:param module: Ansible module with parameters and client connection.
:param configlet: Dict of configlet info.
:return: True or False.
'''
configurable = False
regex = r'^interface Ethernet%s' % module.params['switch_port']
for config_line in configlet['config'].split('\n'):
if re.match(regex, config_line):
configurable = True
return configurable
def configlet_action(module, configlet):
''' Take appropriate action based on current state of device and user
requested action.
Return current config block for specified port if action is show.
If action is add or remove make the appropriate changes to the
configlet and return the associated information.
:param module: Ansible module with parameters and client connection.
:param configlet: Dict of configlet info.
:return: Dict of information to updated results with.
'''
result = dict()
existing_config = current_config(module, configlet['config'])
if module.params['action'] == 'show':
result['currentConfigBlock'] = existing_config
return result
elif module.params['action'] == 'add':
result['newConfigBlock'] = config_from_template(module)
elif module.params['action'] == 'remove':
result['newConfigBlock'] = ('interface Ethernet%s\n!'
% module.params['switch_port'])
result['oldConfigBlock'] = existing_config
result['fullConfig'] = updated_configlet_content(module,
configlet['config'],
result['newConfigBlock'])
resp = module.client.api.update_configlet(result['fullConfig'],
configlet['key'],
configlet['name'])
if 'data' in resp:
result['updateConfigletResponse'] = resp['data']
if 'task' in resp['data']:
result['changed'] = True
result['taskCreated'] = True
return result
def current_config(module, config):
''' Parse the full port configuration for the user specified port out of
the full configlet configuration and return as a string.
:param module: Ansible module with parameters and client connection.
:param config: Full config to parse specific port config from.
:return: String of current config block for user specified port.
'''
regex = r'^interface Ethernet%s' % module.params['switch_port']
match = re.search(regex, config, re.M)
if not match:
module.fail_json(msg=str('interface section not found - %s'
% config))
block_start, line_end = match.regs[0]
match = re.search(r'!', config[line_end:], re.M)
if not match:
return config[block_start:]
_, block_end = match.regs[0]
block_end = line_end + block_end
return config[block_start:block_end]
def valid_template(port, template):
''' Test if the user provided Jinja template is valid.
:param port: User specified port.
:param template: Contents of Jinja template.
:return: True or False
'''
valid = True
regex = r'^interface Ethernet%s' % port
match = re.match(regex, template, re.M)
if not match:
valid = False
return valid
def config_from_template(module):
''' Load the Jinja template and apply user provided parameters in necessary
places. Fail if template is not found. Fail if rendered template does
not reference the correct port. Fail if the template requires a VLAN
but the user did not provide one with the port_vlan parameter.
:param module: Ansible module with parameters and client connection.
:return: String of Jinja template rendered with parameters or exit with
failure.
'''
template_loader = jinja2.FileSystemLoader('./templates')
env = jinja2.Environment(loader=template_loader,
undefined=jinja2.DebugUndefined)
template = env.get_template(module.params['template'])
if not template:
module.fail_json(msg=str('Could not find template - %s'
% module.params['template']))
data = {'switch_port': module.params['switch_port'],
'server_name': module.params['server_name']}
temp_source = env.loader.get_source(env, module.params['template'])[0]
parsed_content = env.parse(temp_source)
temp_vars = list(meta.find_undeclared_variables(parsed_content))
if 'port_vlan' in temp_vars:
if module.params['port_vlan']:
data['port_vlan'] = module.params['port_vlan']
else:
module.fail_json(msg=str('Template %s requires a vlan. Please'
' re-run with vlan number provided.'
% module.params['template']))
template = template.render(data)
if not valid_template(module.params['switch_port'], template):
module.fail_json(msg=str('Template content does not configure proper'
' interface - %s' % template))
return template
def updated_configlet_content(module, existing_config, new_config):
''' Update the configlet configuration with the new section for the port
specified by the user.
:param module: Ansible module with parameters and client connection.
:param existing_config: String of current configlet configuration.
:param new_config: String of configuration for user specified port to
replace in the existing config.
:return: String of the full updated configuration.
'''
regex = r'^interface Ethernet%s' % module.params['switch_port']
match = re.search(regex, existing_config, re.M)
if not match:
module.fail_json(msg=str('interface section not found - %s'
% existing_config))
block_start, line_end = match.regs[0]
updated_config = existing_config[:block_start] + new_config
match = re.search(r'!\n', existing_config[line_end:], re.M)
if match:
_, block_end = match.regs[0]
block_end = line_end + block_end
updated_config += '\n%s' % existing_config[block_end:]
return updated_config
def configlet_update_task(module):
''' Poll device info of switch from CVP up to three times to see if the
configlet updates have spawned a task. It sometimes takes a second for
the task to be spawned after configlet updates. If a task is found
return the task ID. Otherwise return None.
:param module: Ansible module with parameters and client connection.
:return: Task ID or None.
'''
for num in range(3):
device_info = switch_info(module)
if (('taskIdList' in device_info) and
(len(device_info['taskIdList']) > 0)):
for task in device_info['taskIdList']:
if ('Configlet Assign' in task['description'] and
task['data']['WORKFLOW_ACTION'] == 'Configlet Push'):
return task['workOrderId']
time.sleep(1)
return None
def wait_for_task_completion(module, task):
''' Poll CVP for the executed task to complete. There is currently no
timeout. Exits with failure if task status is Failed or Cancelled.
:param module: Ansible module with parameters and client connection.
:param task: Task ID to poll for completion.
:return: True or exit with failure if task is cancelled or fails.
'''
task_complete = False
while not task_complete:
task_info = module.client.api.get_task_by_id(task)
task_status = task_info['workOrderUserDefinedStatus']
if task_status == 'Completed':
return True
elif task_status in ['Failed', 'Cancelled']:
module.fail_json(msg=str('Task %s has reported status %s. Please'
' consult the CVP admins for more'
' information.' % (task, task_status)))
time.sleep(2)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
host=dict(required=True),
port=dict(required=False, default=None),
protocol=dict(default='https', choices=['http', 'https']),
username=dict(required=True),
password=dict(required=True, no_log=True),
server_name=dict(required=True),
switch_name=dict(required=True),
switch_port=dict(required=True),
port_vlan=dict(required=False, default=None),
template=dict(require=True),
action=dict(default='show', choices=['show', 'add', 'remove']),
auto_run=dict(type='bool', default=False))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
result = dict(changed=False)
module.client = connect(module)
try:
result['switchInfo'] = switch_info(module)
if module.params['action'] in ['add', 'remove']:
switch_in_compliance(module, result['switchInfo'])
switch_configlet = server_configurable_configlet(module,
result['switchInfo'])
if not switch_configlet:
module.fail_json(msg=str('Switch %s has no configurable server'
' ports.' % module.params['switch_name']))
result['switchConfigurable'] = True
if not port_configurable(module, switch_configlet):
module.fail_json(msg=str('Port %s is not configurable as a server'
' port on switch %s.'
% (module.params['switch_port'],
module.params['switch_name'])))
result['portConfigurable'] = True
result['taskCreated'] = False
result['taskExecuted'] = False
result['taskCompleted'] = False
result.update(configlet_action(module, switch_configlet))
if module.params['auto_run'] and module.params['action'] != 'show':
task_id = configlet_update_task(module)
if task_id:
result['taskId'] = task_id
note = ('Update config on %s with %s action from Ansible.'
% (module.params['switch_name'],
module.params['action']))
module.client.api.add_note_to_task(task_id, note)
module.client.api.execute_task(task_id)
result['taskExecuted'] = True
task_completed = wait_for_task_completion(module, task_id)
if task_completed:
result['taskCompleted'] = True
else:
result['taskCreated'] = False
except CvpApiError, e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
[
"mhartzel@arista.com"
] |
mhartzel@arista.com
|
af6c43ee70e8b9d1ae987c97a80ae8707f4b001e
|
59dbbdf5d29d2490ec8a697dc137aa7456479e89
|
/usage/meta.py
|
492a6b0640a3b1458ec28a2c5f9d8bdf040928ea
|
[
"Apache-2.0"
] |
permissive
|
absalon-james/usage
|
15d424599528bec7d3184a72b5e9754c325e46ed
|
a67ceddda8a14244526b3b3a40c0c3feec7035d2
|
refs/heads/master
| 2021-01-21T14:58:06.023114
| 2016-10-03T20:56:16
| 2016-10-03T20:57:46
| 57,158,746
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
version = '0.1.2'
description = "Python tool for collecting usage information from ceilometer."
|
[
"james.absalon@rackspace.com"
] |
james.absalon@rackspace.com
|
8bdf10e79cd099752ace442f1093c5d4cfb72eca
|
a985414206b78dd6e15b485f1881287da6f67895
|
/app/__init__.py
|
4ad6d80e134bc4d66d4ae9840d13c461da368b7e
|
[] |
no_license
|
arangurenalonso/hackathon_sem11
|
927873c0010e02f5247b47eae2cbc882b3f03042
|
1d2810871f491e96cd414390234af9f6f560421f
|
refs/heads/main
| 2023-03-22T00:47:33.853778
| 2021-03-12T21:10:20
| 2021-03-12T21:10:20
| 347,198,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
from flask import Flask
from pathlib import Path
from config import Config
from flask_restx import Api
from app.category.categoryResource import CategoryResource, CategoriesResource, category_ns, categories_ns
from app.producto.productoResource import ProductResource, producto_ns
app = Flask(__name__)
app.config.from_object(Config)
authorizations = {
'Bearer Auth': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
}
api = Api(app,
title='Pachaqtec Blog',
version='v1',
description='RESTApi Blog',
prefix='/api/', doc='/swagger/',
contact='Jeancarlos De la cruz',
security='Bearer Auth',
authorizations=authorizations,
contact_url='https://www.linkedin.com/in/jeancarlosdelacruz/')
api.add_namespace(category_ns)
category_ns.add_resource(CategoryResource, '/<int:id>')
api.add_namespace(categories_ns)
categories_ns.add_resource(CategoriesResource, '')
api.add_namespace(producto_ns)
producto_ns.add_resource(ProductResource,'')
from app.category import categoryModel
from app.producto import productoModel
|
[
"aranguren.alonso@gmail.com"
] |
aranguren.alonso@gmail.com
|
5ab61d683509ed50cb0997df3f6350787fa30c58
|
e30afd7f5166afed9ebfd740765d74a152175f54
|
/algorithm-40case/leetcode_exercise/add_two_numbers.py
|
9851649b651ca875717c0399a4c9ef6a092d3e53
|
[] |
no_license
|
wqdchn/geektime
|
214c4ee627023723585ddbc66f3066f816a4fc76
|
d953f631d93a4493aed26392954ca1f18a440938
|
refs/heads/master
| 2021-12-27T16:19:03.680654
| 2021-12-26T11:21:35
| 2021-12-26T11:21:35
| 174,132,464
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# @program: PyDemo
# @description: https://leetcode.com/problems/add-two-numbers/
# @author: wqdong
# @create: 2019-10-01 14:31
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode):
divmod_carry = 0
res = curr = ListNode(0)
while l1 or l2 or divmod_carry:
if l1:
divmod_carry += l1.val
l1 = l1.next
if l2:
divmod_carry += l2.val
l2 = l2.next
divmod_carry, divmod_val = divmod(divmod_carry, 10)
curr.next = curr = ListNode(divmod_val)
return res.next
s = Solution()
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
res = s.addTwoNumbers(l1, l2)
while res:
print(res.val)
res = res.next
|
[
"wqdong.chn@gmail.com"
] |
wqdong.chn@gmail.com
|
63d15beb9622fc5048da342646160ea270b446d8
|
c4d379713ad8133c61d427c07b29f4121dcd86c5
|
/workspace/root/topology/tani_utils.py
|
1d93da9c1c42e8e0c77a07b025ee1c6ebc8e4cf3
|
[] |
no_license
|
agiulianomirabella/melanoma-detector
|
80fe02d4ca18034ee0119e2c797df886a00ceb9e
|
33ab38e3e559505f0225c86ba455a02636d3b839
|
refs/heads/master
| 2022-12-25T04:54:27.443543
| 2020-09-23T16:23:23
| 2020-09-23T16:23:23
| 292,315,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
from root.utils import * # pylint: disable= unused-wildcard-import
from root.utils import makeUnique, getGrayValueCoordinates
import numpy as np
from copy import copy
from scipy.ndimage.morphology import generate_binary_structure
'''
A helpful auxiliary permutation list for subcells computation
'''
#Maximum value for spaceDimension:
maximumSpaceDimension = 2
permutations1 = [[], [[-0.5], [0.5]], [[-0.5, -0.5], [-0.5, 0], [-0.5, 0.5], [0, -0.5], [0, 0.5], [0.5, -0.5], [0.5, 0], [0.5, 0.5]]]
'''
This module will compute cell's features, such as dimension, or subcells.
- A cell is a numpy array
'''
def dim(cell):
return len([i for i in range(len(cell)) if cell[i]%1 == 0])
def getRationalIndices(cell):
return [i for i in range(len(cell)) if cell[i]%1 != 0]
def getSubCells(cell): #return a list of subCells
out = []
x = permutations1[len(cell)]
for l in x:
if all(l[i]==0 for i in getRationalIndices(cell)):
a = cell + np.array(l)
out.append(a)
return makeUnique(out)
'''
This module will define functions to extract CCs eulerchar feature.
- A CC is a list of arrays (coordinates of cells belonging to the CC)
'''
def getAllCells(cc):
out = copy(cc)
for cell in cc:
out = out + getSubCells(cell)
return makeUnique(out)
def euler(cc):
if len(cc)==0:
return 0
out = 0
allCells = getAllCells(cc)
for d in range(len(cc[0])+1):
out = out + ((-1)**d) * len([c for c in allCells if dim(c) == d])
return out
|
[
"giulianomirabella@gmail.com"
] |
giulianomirabella@gmail.com
|
191a36449075d84cb10df8758f93f23b02e0ef64
|
cfb33b4a1216387c8417af9d04058b87e5d557a9
|
/Neutrons/Fast/1SourceMLEM.py
|
0d28aff7eb59e63f5c07e39fb4835a1fb576910b
|
[] |
no_license
|
loomisdevon/DRRSMask
|
8c3428acbab7095e8df29008d73d5ab4e7d17b0c
|
181955f68b0043a02b38e14b29654779a220b40a
|
refs/heads/master
| 2023-01-15T03:46:43.478169
| 2020-11-26T06:37:36
| 2020-11-26T06:37:36
| 277,006,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,054
|
py
|
# Dual Rotating Radiation Scattering Mask MCNP Code
# Rotate neutron and gamma flux from 0 to 2PI around mask and return signal received from detector
# Authors: Ivan Novikov and Devon Loomis
import os
import os.path
import math
import fileinput
import shutil
import subprocess
import io
import string
import numpy as np
import matplotlib.pyplot as plt
import time
import math
from decimal import Decimal
import csv
from tqdm import *
########### GLOBALS #############
#Configuration Name (this is name of input file without .txt)
CONFIG_NAME = 'DDRS3_rand2_absorber1Source'
SOURCE_NAME = 'source3'
tMATRIXCONFIG_NAME = 'DDRS3_rand2_absorber'
tMatrixFilename = tMATRIXCONFIG_NAME + "tMatrix.csv"
#####################
# Source Info
R = 100
Phi = 45
Theta = 140
##############
#################################
###################################### creating all MCNP input files for simulation of the increasing relative distance between source and detector #######################
''' Parameters:
file_name: MCNP input template
y-pos: y position of center of circle of source path
r: radius of circle of source path
init: inital angle away from xy-plane
final angle away from xy-plane
angle step size
limit: longest distance between the two
'''
def smoothing(fluxArray, smoothingParameter):
smoothingArray = []
for i in range(len(fluxArray)):
totSum = 0
numPnts = 0
if (i - smoothingParameter < 0):
for j in range(i,0,-1):
totSum += fluxArray[j]
numPnts += 1
for k in range(i,i+smoothingParameter,1):
totSum += fluxArray[k]
numPnts += 1
elif (i + smoothingParameter > len(fluxArray)):
for j in range(i,len(fluxArray),1):
totSum += fluxArray[j]
numPnts += 1
for k in range(i,i-smoothingParameter,-1):
totSum += fluxArray[k]
numPnts += 1
else:
for j in range(i,i+smoothingParameter,1):
totSum += fluxArray[j]
numPnts += 1
for k in range(i,i-smoothingParameter,-1):
totSum += fluxArray[k]
numPnts += 1
average = totSum/numPnts
smoothingArray.append(average)
return smoothingArray
def createFiles(file_name, phi, rad, init, final, step_size):
fileList = []
marker=0
rad_phi = math.radians(phi)
for new_theta in range(init, final, step_size):
text_search = None
f =open(file_name)
for line in f:
words = line
sdef = words[0:4]
if (sdef == "SDEF"):
text_search = words
break
f.close()
rad_theta = math.radians(new_theta)
x_pos = round(rad * np.cos(rad_theta)*np.sin(rad_phi),3)
y_pos = round(rad * np.sin(rad_theta)*np.sin(rad_phi),3)
z_pos = round(rad * np.cos(rad_phi),3)
r_mag = np.sqrt(x_pos**2+y_pos**2+z_pos**2)
vecx_pos = round(-x_pos/r_mag,3)
vecy_pos = round(-y_pos/r_mag,3)
vecz_pos = round(-z_pos/r_mag,3)
#theta_rad = np.arctan(z_pos/r)
#vecz_pos = round(-1 * (theta_rad/(np.pi/2)),5)
#replacement_text = sdef + " ERG = 1.42 POS " + str(x_pos) + " " + str(y_pos) + " " + str(z_pos) + " VEC= " + str(vecx_pos) + " " + str(vecy_pos) + " " + str(vecz_pos) + " DIR=d1 par=n" + "\n"
replacement_text = sdef + " ERG = 2.0 POS " + str(x_pos) + " " + str(y_pos) + " " + str(z_pos) + " VEC= " + str(vecx_pos) + " " + str(vecy_pos) + " " + str(vecz_pos) + " DIR=d1 WGT 20 par=n" + "\n"
#replacement_text = sdef + " ERG = 1.42 POS " + str(x_pos) + " " + str(y_pos) + " " + str(z_pos) + " par=n" + "\n"
read_name = file_name
write_name = CONFIG_NAME + SOURCE_NAME + "_" + str(new_theta) + ".txt"
f1 = open(read_name, 'r')
f2 = open(write_name, 'w')
for lines in f1:
f2.write(lines.replace(text_search, replacement_text))
f1.close()
f2.close()
fileList.append(write_name)
return (fileList)
################################# delete runtpe files after every set of commands and delete all output files and input files after program run #######################
''' Parameters
directory: directory containing all files
file: KSEF_2 #####################
remove_all: test to determine whether to delete all files or only runtpe files
'''
def removeFiles(directory, file1, file2, file3, outfile, initfile, t_file, save_one, remove_all):
dir_name = directory
for fname in os.listdir(dir_name):
if (fname != initfile and fname != t_file):
if fname.startswith("binRun"):
os.remove(os.path.join(dir_name, fname))
if (fname.startswith(file1[:-4]) or fname.startswith(outfile[:-4])) and remove_all:
if (fname != file1):
os.remove(os.path.join(dir_name, fname))
if (fname.startswith(file1[:-4]) or fname.startswith(outfile[:-4])) and save_one:
if (fname != file1 and fname != file2 and fname != file3):
os.remove(os.path.join(dir_name, fname))
####################### read MCNP output file, find and return flux value #########################
#######################_file_: MCNP output file name ##################################
'''
def readFlux(_file_):
flux_ = 0
error_ = 0
with open(_file_, 'r') as outfile:
for line in outfile:
if ('+ *Gamma flux in detector*' in line):
lines = [outfile.readline() for i in range(9)] #this reads 9 lines after the fc4 comment
spectrum = [outfile.readline() for i in range(13)] #this reads 13 lines which contain spectrum
#each line has an index [0]-[12]
#print(type(spectrum[1]))
#print(spectrum[1])
#print(float(spectrum[1].split()[1])) #this splits spectrum[i] using spaces
#each spectrum[i].split() has three new indeces [0]-[2]
#float converts each string to float
#Neutron energy is in [0]
#Neutron counts are in [1]
#Error is in [2]
#tmp = 0.0
#print (spectrum)
for j in range(13):
flux_ += float(spectrum[j].split()[1])
error_ += float(spectrum[j].split()[2])
#Fluxin3[i] = tmp
return flux_, error_
'''
def readFlux(_file_,energyBin, binWrite):
flux_Arr = []
error_Arr = []
flux_ = 0
error_ = 0
with open(_file_, 'r') as outfile:
for line in outfile:
if ('+ *Neutron Flux In Detector*' in line):
lines = [outfile.readline() for i in range(9)] #this reads 9 lines after the fc4 comment
spectrum = [outfile.readline() for i in range(energyBin+1)] #this reads 13 lines which contain spectrum
#each line has an index [0]-[12]
#print(type(spectrum[1]))
#print(spectrum[1])
#print(float(spectrum[1].split()[1])) #this splits spectrum[i] using spaces
#each spectrum[i].split() has three new indeces [0]-[2]
#float converts each string to float
#Neutron energy is in [0]
#Neutron counts are in [1]
#Error is in [2]
#tmp = 0.0
for j in range(energyBin+1):
if (binWrite == j and binWrite != 0):
flux_ = float(spectrum[j].split()[1])
error_ = float(spectrum[j].split()[1])
#print (float(spectrum[j].split()[1]))
#flux_Arr.append(float(spectrum[j].split()[1]))
#error_Arr.append(float(spectrum[j].split()[2]))
#flux_ += float(spectrum[j].split()[1])
#error_ += float(spectrum[j].split()[2])
if (binWrite == 0):
flux_ = float(spectrum[energyBin].split()[1])
error_ = float(spectrum[energyBin].split()[2])
#Fluxin3[i] = tmp
return flux_, error_
def initialize(_file_):
global intensity, activity, nps, t
global radius, init_theta, final_theta, step_theta
global init_phi, final_phi, step_phi
global packet
with open(_file_,"r", newline='') as file:
file.readline()
intensity = float(file.readline()[12:])
activity = float(file.readline()[11:])
nps = float(file.readline()[6:])
t = float(file.readline()[4:])
file.readline()
radius = float(file.readline()[9:])
init_theta = int(file.readline()[13:])
final_theta = int(file.readline()[14:])
step_theta = int(file.readline()[13:])
init_phi = float(file.readline()[11:])
final_phi = float(file.readline()[12:])
step_phi = float(file.readline()[11:])
file.readline()
file.readline()
packet = int(file.readline()[9:])
#**********************MAIN**************************
#dir_ = 'C:\\Users\\devon\\Documents\\DRRSMask\\Working_Version\\MLEM\\'
dir_ = os.path.dirname(os.path.abspath(CONFIG_NAME+SOURCE_NAME)) + "\\"
file_ = CONFIG_NAME + SOURCE_NAME + '.txt'
outFile_ = CONFIG_NAME + SOURCE_NAME + '_out.txt'
file_name_ = dir_ + file_
outFile_name_ = dir_ + outFile_
keepInFile = CONFIG_NAME + SOURCE_NAME + '_0.txt'
keepOutFile = CONFIG_NAME + SOURCE_NAME + '_out0.txt'
init_file= dir_ + 'init.txt'
t_file = CONFIG_NAME + "tMatrix.csv"
intensity, activity, nps, t = 0,0,0,0
radius,init_theta,final_theta,step_theta = 0,0,0,0
init_phi,final_phi,step_phi = 0,0,0
packet = 0
initialize(init_file)
originalThetaCountsArray = []
originalThetaErrorArray = []
init_theta = Theta
final_theta = init_theta+360
transmissionMatrix = []
start = time.time()
removeFiles(dir_, file_, keepInFile, keepOutFile, outFile_, init_file, t_file, False, True) # purge directory of any existing MCNP files from previous run
#files = createFiles(file_name_, z, radius, init_ang, final_ang, step)
files = createFiles(file_name_, Phi, R, init_theta, final_theta, step_theta) # create all MCNP input files
commands = []
outFileList = []
j = init_theta
#create set of commands for subprocess of all input files
for i in range(int((final_theta - init_theta) / step_theta)):
binFile = "binRun" + str(j) + ".r"
outFile = (CONFIG_NAME + SOURCE_NAME + "_out" + str(j) + ".txt")
commands.append("mcnp6 i=" + files[i] + " o=" + outFile + " runtpe=" + binFile)
outFileList.append(outFile)
j += step_theta
print("Simulating...")
# give subprocess pak amount of parallel programs to execute until all commands are executed
for x in tqdm(range(0,int((final_theta - init_theta) / step_theta),(packet))):
if (x < (len(commands) - packet)):
commandsub = commands[x:(x+packet)]
else:
commandsub = commands[x:]
processes = [subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, cwd=dir_) for cmd in commandsub]
removeFiles(dir_, file_, keepInFile, keepOutFile, outFile_, init_file, t_file, False, False) # remove runtpe files
for p in processes:
p.wait()
print ("Checkpoint")
theta = init_theta
fluxList = []
errorList = []
sourceThetaList = []
#use for neutrons
#energyBinOfInterest = 13
#use for gammas
energyBinOfInterest = 100
############################read and gather flux values and source distances for each output file and add them to lists###################################
for f in outFileList:
flux, error = readFlux(f, energyBinOfInterest,40)
fluxList.append(flux)
errorList.append(error)
rad_theta = math.radians(theta)
sourceThetaList.append(rad_theta)
theta += step_theta
removeFiles(dir_, file_, keepInFile, keepOutFile, outFile_, init_file, t_file, True, False)
end = time.time()
print("Runtime: ", round((end - start)/60, 2), " mins")
rawFluxArray = np.array(fluxList)
#print (rawFluxArray)
fluxArray = np.array(smoothing(fluxList, 8))
#fluxArray = np.array(fluxList)
errorArray = np.array(errorList)
#print (errorArray)
thetaArray = np.array(sourceThetaList)
countsArray = fluxArray * intensity * t
countsSum = np.sum(countsArray)
#normalizedCountsArray = countsArray / countsSum
normalizedCountsArray = np.copy(countsArray)
normalizedCountsErr = np.sqrt((1/countsArray) + (1/countsSum))
normalizedCountsErrorArray = np.multiply(normalizedCountsArray, normalizedCountsErr)
with open(CONFIG_NAME + SOURCE_NAME + "data.csv","w+", newline='') as file:
writer=csv.writer(file,delimiter=',')
for a in normalizedCountsArray:
writer.writerow([a])
with open(CONFIG_NAME + SOURCE_NAME + "background.csv","w+", newline='') as file:
writer=csv.writer(file,delimiter=',')
for b in normalizedCountsErrorArray:
writer.writerow([b])
###########################END MAIN###############################
|
[
"devon.loomis@scientic.com"
] |
devon.loomis@scientic.com
|
9c17bdc0d3beabedf1313533658ffff019329cce
|
8f5d2fb45d6452fc6df00b12fa0bd45446d1029b
|
/lessons/models.py
|
1227de579706645ec9701f1a78da6f80fcc40145
|
[] |
no_license
|
luminyanko/renshuu
|
4bcbd0f10e0a4a175ab92783e5346ac4b1c44927
|
54a98f59a7d2971e3ba20d5406b3e7ad8c482408
|
refs/heads/master
| 2023-06-01T20:40:30.090022
| 2021-06-21T08:47:19
| 2021-06-21T08:47:19
| 377,913,945
| 0
| 0
| null | 2021-06-21T08:44:52
| 2021-06-17T17:39:11
|
Python
|
UTF-8
|
Python
| false
| false
| 734
|
py
|
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
class Tag(models.Model):
tag_name = models.CharField(max_length=50)
def __str__(self):
return self.tag_name
class Lesson(models.Model):
title = models.CharField(max_length=150)
content = models.TextField()
date_created = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
tag = models.ForeignKey(Tag, on_delete=models.PROTECT)
def get_absolute_url(self):
return reverse('lesson-detail', kwargs={'pk': self.pk})
def __str__(self):
return self.title
|
[
"luminyanko@gmail.com"
] |
luminyanko@gmail.com
|
ee1a31f88eeb3c7e9f45e9d6e74e4f4ac8581dbf
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_381/ch15_2020_09_14_14_10_44_836878.py
|
03149e67069cc3803fab0866de1f386bfbe66feb
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
def chris(nome):
if chris == nome:
return 'Todo mundo odeia o Chris'
else:
return 'Olá, {0}'.format(nome)
nome = input('Qual seu nome?')
|
[
"you@example.com"
] |
you@example.com
|
739c0ed4a80c4bad6b0788a2f025475c8e864f1c
|
3ec9ace491cd5d06b5b998e7e309a13bd86c7126
|
/tests/system/conftest.py
|
535e9d9a46ea535834835fff3376b8c958e75f58
|
[
"Apache-2.0"
] |
permissive
|
Jitsusama/lets-do-dns
|
64467664f42df053b535156fc773be7e874d0bf5
|
faff4bf45e9a4be438e15afbe5caa249fe1e5210
|
refs/heads/master
| 2021-01-20T02:38:10.953843
| 2017-07-21T03:08:30
| 2017-07-21T03:08:30
| 89,433,363
| 8
| 0
|
Apache-2.0
| 2019-10-02T15:07:28
| 2017-04-26T03:24:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,843
|
py
|
try:
import ConfigParser as configparser
except ImportError:
import configparser
import os
import pytest
from requests import post
@pytest.fixture(autouse=True)
def os_environ_reset():
"""Reset os.environ in between test runs."""
original_env = os.environ.copy()
yield
os.environ.clear()
os.environ.update(original_env)
@pytest.fixture(scope='module')
def test_configuration():
"""Read test configuration from :file:`config.ini` file.
The INI file must have a ``[DEFAULT]`` section containing the following
parameters:
* ``do_api_key``
* ``do_domain``
* ``do_hostname``
"""
file_path = os.path.realpath(__file__)
directory_path = os.path.dirname(file_path)
config_file = '%s/config.ini' % directory_path
config = configparser.ConfigParser()
config.read(config_file)
return config
@pytest.fixture
def create_response(
do_base_uri, do_auth_header, do_domain, do_hostname, request):
return post(
'%s/%s/records' % (do_base_uri, do_domain),
headers=do_auth_header,
json={'type': 'TXT',
'name': do_hostname,
'data': request.function.__name__})
@pytest.fixture()
def do_api_key(test_configuration):
return test_configuration.get('DEFAULT', 'do_api_key')
@pytest.fixture
def do_auth_header(do_api_key):
return {'Authorization': 'Bearer %s' % do_api_key}
@pytest.fixture
def do_base_uri():
return 'https://api.digitalocean.com/v2/domains'
@pytest.fixture
def do_domain(test_configuration):
return test_configuration.get('DEFAULT', 'do_domain')
@pytest.fixture
def do_hostname(test_configuration):
return test_configuration.get('DEFAULT', 'do_hostname')
@pytest.fixture
def do_record_id(create_response):
return create_response.json()['domain_record']['id']
|
[
"joel@grrbrr.ca"
] |
joel@grrbrr.ca
|
41e2a093e82ce2eb956957ab8d320d97248524fb
|
1a1a10576c4fabe2879feb00393fe8d3f5211d9b
|
/todo/settings.py
|
dfb08b572fb8286e00d5acfcf40919ff7f4a9f4c
|
[] |
no_license
|
Machele-codez/todo-by-machele
|
8a50e7e6f9e560ba40dd7816f1c34324cf4c4f8d
|
f96903f24a5fdfb587cbf04977b0c2aaf5a92879
|
refs/heads/master
| 2022-09-13T02:42:36.410948
| 2020-06-03T19:59:33
| 2020-06-03T19:59:33
| 269,172,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,336
|
py
|
"""
Django settings for todo project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gk(c+=qm^h6-b-)g%=ej0%kzmgnbwl=k^35uupjpspq)ql^uw2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'apps.accounts',
'apps.tasks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
LOGIN_REDIRECT_URL = 'tasks:all_tasks '
|
[
"smithbeblack@gmail.com"
] |
smithbeblack@gmail.com
|
975d016f867e515a5d0d42fecfb3e22ccc3a61ff
|
55ddcae82338890a7101b2ff6db0856463702314
|
/perfectcushion/shop/admin.py
|
1ac35dfa7e64e770e7c84c4021ff8ced72dc2336
|
[] |
no_license
|
rixinhaha/DjangoEcommerce
|
d31d6e8c7a4a40ba3f32d0e27ef203c59475c1dc
|
0e3a188e8276bbfb63901747f553dd2ab483c284
|
refs/heads/master
| 2020-08-03T21:18:14.750498
| 2019-09-30T15:30:09
| 2019-09-30T15:30:09
| 211,887,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
from django.contrib import admin
from .models import Category,Product
# Register your models here.
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'slug']
prepopulated_fields = {'slug':['name',]}
admin.site.register(Category, CategoryAdmin)
class ProductAdmin(admin.ModelAdmin):
list_display= ['name', 'price', 'stock', 'available', 'created', 'updated']
list_editable= ['price', 'stock', 'available']
prepopulated_fields = {'slug':['name',]}
list_per_page = 20
admin.site.register(Product,ProductAdmin)
|
[
"rixinhaha@gmail.com"
] |
rixinhaha@gmail.com
|
dfd3789007df10b47fed17eb4ecef1dbbe054537
|
2edbdd6763f86aca4f6ee67ced390fb477ed0e44
|
/udf/extract_type_modify_law.py
|
b2453cfaf4db58be6073271509e34dc2f95f64b6
|
[] |
no_license
|
nhatan172/deepdive
|
f498901c3faa474d3ab5166ef7a694e1626e2f01
|
492afee641436e4d5a068a7cec1ff3969d964518
|
refs/heads/master
| 2021-04-06T02:05:17.095127
| 2018-04-12T04:07:45
| 2018-04-12T04:07:45
| 124,988,988
| 0
| 0
| null | 2018-03-21T23:19:06
| 2018-03-13T03:49:20
|
Python
|
UTF-8
|
Python
| false
| false
| 12,317
|
py
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
from deepdive import *
import re
import handle_string
import divlaw
def lenIterator(list):
sum = 0
for i in list :
sum += 1
return sum
def getTitle(string):
temp = re.finditer(r"\:(\s|\n|\*|\_|\#)*(\“|\")",string,re.DOTALL)
end_title = len(string)
if lenIterator(temp) > 0 :
temp = re.finditer(r"\:(\s|\n|\*|\_|\#)*(\“|\")",string,re.DOTALL)
for i in temp:
end_title = i.start()
break
return string[:end_title]
def get_numerical_symbol(title):
title = re.sub(r'(\“(.(?!\“|\”))+.{2})|(\"(.(?!\"))+.{2})',"",title,re.M|re.DOTALL)
get_title1 = re.search(r'(của\s.*)\s(đã được|được)',title)
get_title = re.search(r'[0-9]+(/[0-9]+)*((/|-)[A-ZĐƯ]+[0-9]*)+(\s|\_|\#|\*|\.|\\)',title,re.M|re.I)
# get_id = re.search(r'[0-9]+(/[0-9]+)*((/|-)[A-ZĐ]+[0-9]*)+',get_content.group())
# get_title1 = re.search(r'([0-9]+(/[0-9]+)*((/|-)[A-ZĐ]+[0-9]*)\s(đã được))|([0-9]+(/[0-9]+)*((/|-)[A-ZĐ]+[0-9]*)\s(được))',title)
if(get_title1 is not None):
number = re.search(r'[0-9]+(/[0-9]+)*((/|-)[A-ZĐƯ]+[0-9]*)+(\s|\_|\#|\*|\.|\\)',get_title1.group())
if(number is not None):
return (re.search(r'[0-9]+(/[0-9]+)*((/|-)[A-ZĐƯ]+[0-9]*)+',number.group(),re.U|re.I)).group()
elif ((get_title is not None) and (get_title1 is None)):
return (re.search(r'[0-9]+(/[0-9]+)*((/|-)[A-ZĐƯ]+[0-9]*)+',get_title.group(),re.U|re.I)).group()
else :
return None
@tsv_extractor
@returns(lambda
law_id ="text",
type = "int",
doc_content_update = "text",
symbol = "text",
position = "text",
modified_law_date_release = "text"
:[])
def extract(
law_id = "text",
totalLaw = "int",
law_content = "text",
law_len = "int",
totalItem = "int",
item_content = "text",
item_len = "int",
totalpoint = "int",
point_content = "text",
part_index ="int",
chap_index ="int",
sec_index ="int",
law_index ="int",
item_index ="int",
point_index ="int",
numerical_symbol = "text",
date_released ="text"
):
doc_content_update = None
if law_content is not None:
# law_content = handle_string.to_unicode(law_content)
law_content = law_content[:law_len]
# pass
# law_content = law_content.encode('utf-8')
if (item_content is not None) :
# # item_content = handle_string.to_unicode(item_content)
# # if item_len != len(item_content):
item_content = item_content[:item_len]
# pass
# item_content = item_content.encode('utf-8')
number = None
type = 0
point = 0
p = re.compile(r'((((S|s)ửa đổi)(\s|\,)*((b|B)ổ sung)*)|((b|B)ổ sung))')
p1= re.compile(r'(đã\s|đã được\s)((((S|s)ửa đổi)(\s|\,)*((b|B)ổ sung)*)|((b|B)ổ sung))')
position = "0_0_0_0_0_0"
if(totalpoint > 0):
number = get_numerical_symbol(getTitle(point_content))
if(number is not None):
numerical_symbol = number
date_released = None
position = "{}_{}_{}_{}_{}_{}".format(part_index+1,chap_index+1,sec_index+1,law_index+1,item_index+1,point_index+1)
type_modify = re.search(r'(((b|B)ổ sung cụm từ)|((b|B)ổ sung từ))',point_content)
if(type_modify is not None):
type = 3
doc_content_update = point_content
point = 1
else :
type_change_name = re.search(r'(S|s)ửa đổi tên',point_content)
if(type_change_name is not None):
type = 6
doc_content_update = point_content
point = 1
else:
type_delete = re.search(r'(b|B)ãi bỏ',point_content)
inQuote = False
if type_delete is not None :
inQuote = divlaw.itemInQuote(point_content,type_delete.start())
if(type_delete is not None) and not inQuote:
type = 2
doc_content_update = point_content
point = 1
else:
type_delete_text = re.search(r'(((b|B)ỏ cụm từ)|((b|B)ỏ từ))',point_content)
if(type_delete_text is not None):
type = 7
doc_content_update = point_content
point =1
else:
type_add_text = p.finditer(point_content)
type_add_text1 = p1.finditer(point_content)
len1 = lenIterator(type_add_text)
len2 = lenIterator(type_add_text1)
if( (len1 != len2) and (len1 > 0)):
type = 1
doc_content_update = point_content
point = 1
else :
# type_change_text = re.search(r'(t|T)hay\s.*cụm từ',point_content)
type_change_text = re.search(r'((t|T)hay\s)*(cụm\s)*từ\s.*(được\s)*(thay\s)*bằng\s(cụm\s)*từ',point_content)
if(type_change_text is not None):
type = 4
doc_content_update = point_content
point = 1
else :
type_name_to_name = re.search(r'((t|T)ên của\s).+(((S|s)ửa đổi\s)*(\,\s)*((b|B)ổ sung\s)*)(thành)',point_content)
if(type_name_to_name is not None):
type = 5
doc_content_update =point_content
point = 1
else :
point = 0
if(totalItem > 0 and point == 0):
number = get_numerical_symbol(getTitle(item_content))
if(number is not None):
numerical_symbol = number
date_released = None
position = "{}_{}_{}_{}_{}_{}".format(part_index+1,chap_index+1,sec_index+1,law_index+1,item_index+1,0)
type_modify = re.search(r'(b|B)ổ sung cụm từ',item_content)
if(type_modify is not None):
type = 3
doc_content_update = item_content
point = 1
else:
type_change_name = re.search(r'(S|s)ửa đổi tên',item_content)
if(type_change_name is not None):
type = 6
doc_content_update = item_content
point = 1
else:
type_delete = re.search(r'(b|B)ãi bỏ',item_content)
inQuote = False
if type_delete is not None :
inQuote = divlaw.itemInQuote(item_content,type_delete.start())
if(type_delete is not None) and not inQuote:
type = 2
doc_content_update = item_content
point = 1
else:
type_delete_text = re.search(r'(((b|B)ỏ cụm từ)|((b|B)ỏ từ))',item_content)
if(type_delete_text is not None):
type = 7
doc_content_update = item_content
point = 1
else:
# type_add_text = re.search(r'((((S|s)ửa đổi)(\s|\,)*((b|B)ổ sung)*)|((b|B)ổ sung))',item_content)
# if(type_add_text is not None):
type_add_text = p.finditer(item_content)
type_add_text1 = p1.finditer(item_content)
len1 = lenIterator(type_add_text)
len2 = lenIterator(type_add_text1)
if( (len1 != len2) and (len1 > 0)):
type = 1
doc_content_update = item_content
point=1
else:
# type_change_text = re.search(r'(t|T)hay\s.*cụm từ',item_content)
type_change_text = re.search(r'((t|T)hay\s)*(cụm\s)*từ\s.*(được\s)*(thay\s)*bằng\s(cụm\s)*từ',item_content)
if(type_change_text is not None):
type = 4
doc_content_update = item_content
point = 1
else :
type_name_to_name = re.search(r'((t|T)ên của\s).+(((S|s)ửa đổi\s)*(\,\s)*((b|B)ổ sung\s)*)(thành)',item_content)
if(type_name_to_name is not None):
type = 5
doc_content_update = item_content
point = 1
else :
point = 0
# if(totalpoint > 0 and point == 1 ):
# doc_content_update = point_content
if(totalLaw >0 and point == 0 ):
number = get_numerical_symbol(getTitle(law_content))
if(number is not None):
numerical_symbol = number
date_released = None
position = "{}_{}_{}_{}_{}_{}".format(part_index+1,chap_index+1,sec_index+1,law_index+1,0,0)
type_modify = re.search(r'(b|B)ổ sung cụm từ',law_content)
if(type_modify is not None):
type = 3
doc_content_update = law_content
point = 1
else:
type_change_name = re.search(r'(S|s)ửa đổi tên',law_content)
if(type_change_name is not None):
type = 6
doc_content_update = law_content
point = 1
else:
type_delete = re.search(r'(b|B)ãi bỏ',law_content)
inQuote = False
if type_delete is not None :
inQuote = divlaw.itemInQuote(law_content,type_delete.start())
if(type_delete is not None) and not inQuote:
type = 2
doc_content_update = law_content
point = 1
else:
type_delete_text = re.search(r'(((b|B)ỏ cụm từ)|((b|B)ỏ từ))',law_content)
if(type_delete_text is not None):
type = 7
doc_content_update = law_content
point = 1
else:
type_add_text = p.finditer(law_content)
type_add_text1 = p1.finditer(law_content)
len1 = lenIterator(type_add_text)
len2 = lenIterator(type_add_text1)
if( (len1 != len2) and (len1 > 0)):
type = 1
doc_content_update = law_content
point = 1
else:
type_change_text = re.search(r'((t|T)hay\s)*(cụm\s)*từ\s.*(được\s)*(thay\s)*bằng\s(cụm\s)*từ',law_content)
if(type_change_text is not None):
type = 4
doc_content_update = law_content
point = 1
else :
type_name_to_name = re.search(r'((t|T)ên của\s).+(((S|s)ửa đổi\s)*(\,\s)*((b|B)ổ sung\s)*)(thành)',law_content)
if(type_name_to_name is not None):
type = 5
doc_content_update = law_content
point = 1
else :
point = 0
# if(totalItem > 0):
# doc_content_update = item_content
if(point == 1):
yield[
law_id,
type,
doc_content_update,
numerical_symbol,
position,
date_released
]
|
[
"nhatan172@gmail.com"
] |
nhatan172@gmail.com
|
3cd6e2099d0754d1a415a93ab25595f0ada97a68
|
77909a8a93f60759e0fd32fb632d937c7c8d4d68
|
/curso_em_video_exercises/desafio10.py
|
c1c5bc683719ab4335f0251c26bf81cd73603c19
|
[] |
no_license
|
euricoteles/python
|
13e39bf0b5916b69794dac39dc55a213b5443718
|
dae10d87a9923646dd8257a2ce3da91dc355b603
|
refs/heads/master
| 2021-09-04T14:51:47.147309
| 2018-01-19T16:57:27
| 2018-01-19T16:57:27
| 116,609,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
# Crie um script que leia quanto dinheiro uma pessoa tem na carteira e converter em dolares.
# variables received by input
valor = int(input('Qual o valor para converter em dolares:'))
dolares = (1.18*valor)/1
# print information
print('O valor em dolares fica : {}'.format(dolares))
|
[
"euriconaz@hotmail.com"
] |
euriconaz@hotmail.com
|
94f4897b31040b8abc8c30479a440a8e0af48906
|
e793abb16a44eff7b48df2c774883d7a469f2005
|
/local_code/adam/test.py
|
1b238455a5659ce30f9888884b4d2cf00e25d092
|
[] |
no_license
|
Junyinghuang/DS4S_group2
|
a25f2fe6d31f66452fc03ad02594c9c15a4f0017
|
2a3bcd0fad858e7ef00d69342dbdd47853525b83
|
refs/heads/master
| 2022-06-20T23:43:11.420048
| 2020-05-12T07:07:56
| 2020-05-12T07:07:56
| 257,669,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
from local_code.adam.new_parameters import get_new_parameters
def test_parameter_creation():
'''
Test to make sure I don't get any wonky results from my parameter adjustor
'''
sigmas = [1,2,3,4]
initial_guesses = [0,0,0,0]
new_guesses = get_new_parameters(sigmas,initial_guesses)
#It would be surprising if any of these were more than, say, 4 sigma away from the initial guess.
is_more_than_four_sigma_away = [int(abs(guess)>(4*sigma)) for guess, sigma in zip(new_guesses,sigmas)]
assert sum(is_more_than_four_sigma_away)==0,"get_new_parameters yields surprising results."
print('Test of parameter creation complete.')
# I've written no other tests, as all the "visualization" code I've written can be tested simply by
# whether or not a plot shows up.
|
[
"apkunesh@ucdavis.edu"
] |
apkunesh@ucdavis.edu
|
de9ceaa3537c1f1edf2a30fedb2a4f538e0eec02
|
a2efa9d89a721aae5016280ca166caffab97e94f
|
/exercices_EDX_W1.py
|
44e668445b4b6b51f8543dd707285978c0d16491
|
[] |
no_license
|
samthib/python_edx
|
3dc8378c92092eedfb70b0890ee801465e3a4cd9
|
2970745f6671a0d314b2cd32261928c7a55da3f9
|
refs/heads/master
| 2022-07-04T19:00:19.037593
| 2020-05-05T20:54:57
| 2020-05-05T20:54:57
| 258,567,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,555
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 21:10:42 2020
@author: Sam
"""
##--------------------------------##
#Exercice 1
import string
alphabet = string.ascii_letters
sentence = 'Jim quickly realized that the beautiful gowns are expensive'
count_letters = {}
for i in range(len(sentence)):
if sentence[i] in count_letters:
count_letters[sentence[i]] += 1
else:
count_letters[sentence[i]] = 1
def counter(input_string):
alphabet = string.ascii_letters
for i in range(len(input_string)):
if input_string[i] in alphabet:
if input_string[i] in count_letters:
count_letters[input_string[i]] += 1
else:
count_letters[input_string[i]] = 1
return count_letters
#print(counter(sentence))
address = """Four score and seven years ago our fathers brought forth on this continent, a new nation,
conceived in Liberty, and dedicated to the proposition that all men are created equal. Now we are engaged in a
great civil war, testing whether that nation, or any nation so conceived and so dedicated, can long endure.
We are met on a great battle-field of that war. We have come to dedicate a portion of that field, as a final
resting place for those who here gave their lives that that nation might live. It is altogether fitting and proper
that we should do this. But, in a larger sense, we can not dedicate -- we can not consecrate -- we can not hallow --
this ground. The brave men, living and dead, who struggled here, have consecrated it, far above our poor power to add
or detract. The world will little note, nor long remember what we say here, but it can never forget what they did here.
It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so
nobly advanced. It is rather for us to be here dedicated to the great task remaining before us -- that from these honored
dead we take increased devotion to that cause for which they gave the last full measure of devotion -- that we here
highly resolve that these dead shall not have died in vain -- that this nation, under God, shall have a new birth of
freedom -- and that government of the people, by the people, for the people, shall not perish from the earth."""
address_count = counter(address)
#print(address_count)
count_letters_max = max(address_count, key=address_count.get)
#print(count_letters_max)
##--------------------------------##
#Excercie 2
import math
#print(math.pi/4)
import random
random.seed(1) # Fixes the see of the random number generator.
def rand():
return random.uniform(-1, 1)
rand()
def distance(x, y):
diff_x = y[0]-x[0]
diff_y = y[1]-x[1]
distance = math.sqrt(diff_x**2 + diff_y**2)
return distance
x=(0,0)
y=(1,1)
distance(x, y)
def in_circle(x, origin = [0,0]):
radius = distance(x, origin)
if radius < 1:
return True
else:
return False
in_circle((1,1))
random.seed(1)
R=10000
inside = []
count_true = 0
for i in range(R):
point = in_circle((rand(),rand()))
inside.append(point)
if point:
count_true += 1
#print(count_true / R)
difference = (math.pi / 4) - (count_true / R)
#print(difference)
##--------------------------------##
# Exercice 3
"""
Corection
def moving_window_average(x, n_neighbors=1):
n = len(x)
width = n_neighbors*2 + 1
x = [x[0]]*n_neighbors + x + [x[-1]]*n_neighbors
return [sum(x[i:(i+width)]) / width for i in range(n)]
x = [0,10,5,3,1,5]
#print(sum(moving_window_average(x, 1)))
"""
def moving_window_average(x, n_neighbors=1):
width = n_neighbors*2 + 1
x = [x[0]]*n_neighbors + x + [x[-1]]*n_neighbors
n = len(x)
list_x=[]
for i in range(n_neighbors,n-n_neighbors):
sum_x=0
for j in range(-n_neighbors, n_neighbors+1):
sum_x = sum_x + x[i+j]
mean = sum_x / width
list_x.append(mean)
return list_x
x = [0,10,5,3,1,5]
#print(moving_window_average(x, 1))
#print(sum(moving_window_average(x, 1)))
R = 1000
Y = []
x = []
ranges = []
random.seed(1)
for i in range(R):
x.append(random.uniform(0,1))
for i in range(1, 10):
Y.append(moving_window_average(x, i))
for i in range(9):
ranges.append(max(Y[i])-min(Y[i]))
#print(Y[5][9])
#print(ranges)
|
[
"noreply@github.com"
] |
noreply@github.com
|
1c125d93dacd44efb23a7c8db2107c3a5838f0bb
|
25ac403b85c141bde644f92785d086b2049ccbff
|
/数据/utf8csv2excel.py
|
5f12f35818e9db1abf5c1b20b9a128038025af4c
|
[] |
no_license
|
void126rlz/SHUJRA
|
62ac47313a2c42dc439cb027a73376bfbe116ec4
|
b7451af92df4bfdd80e71a2e7cbab7eecfb28f23
|
refs/heads/main
| 2023-07-17T06:26:01.119303
| 2021-09-07T13:39:25
| 2021-09-07T13:39:25
| 379,532,808
| 1
| 1
| null | 2021-07-11T08:42:34
| 2021-06-23T08:28:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
import os # sys
import pandas as pd
home = r'D:\tfp\project\新致软件\3岗位推荐算法\from新致\数据'
fileNamesCSV = os.listdir(home + r'\历史18天的数据') # \*.csv
print(fileNamesCSV)
# !dir D:\tfp\project\新致软件\3岗位推荐算法\from新致\数据\utf8
fileNames = [s[0:-4] for s in fileNamesCSV]
print(fileNames)
# InvalidWorksheetName: Excel worksheet name 'ttyc_personel_educational_experience' must be <= 31 chars.
# print(len('ttyc_personel_educational_experience'))
# [len(f) for f in fileNamesCSV] # <34-3=31
# [18, 14, 18, 41, 24, 37, 34, 17, 23]
[len(f) for f in fileNames]
fileNames=[
# 'ttyc_candidate',
# 'ttyc_label',
# 'ttyc_personnel',
'ttyc_personnel_educational_experience',
'ttyc_personnel_label',
'ttyc_personnel_project_experience',
'ttyc_personnel_work_experience',
'ttyc_position',
'ttyc_position_label'
]
for f in fileNames:
print(f)
df= pd.read_csv(home + '\\历史18天的数据\\' + f + '.csv', error_bad_lines=False )
df.to_excel(home + '\\excel18\\' + f + '.xlsx', sheet_name=f[5:36],index=False)
# f = 'ttyc_personnel'
df= pd.read_csv(home + '\\历史18天的数据\\ttyc_personnel-tfp.csv', error_bad_lines=False )
df.to_excel(home + '\\excel18\\ttyc_personnel-tfp.xlsx', sheet_name='ttyc_personnel',index=False)
# ;号分隔的cvs文件
fileNames2 = ['ttyc_position', 'ttyc_position_label']
for f in fileNames2:
print(f)
df= pd.read_csv(home + '\\utf8\\' + f + '.csv', sep=';') # quotechar='"',
df.to_excel(home + '\\excel\\' + f + '.xlsx', sheet_name=f,index=False)
f = 'ttyc_personnel_project_experienceT'
df= pd.read_csv(home + '\\utf8\\' + f + '.csv')
df.to_excel(home + '\\excel\\' + f + '.xlsx', sheet_name=f[5:36],index=False)
|
[
"noreply@github.com"
] |
noreply@github.com
|
0b8e26a5b14106ea4c7a0fcf55baed32e47f43de
|
06677c398ea51e3bf78dc45db8cdf97a2b2a296b
|
/table-cards/cardgen.py
|
773492e4f3acd63bb1b3f1079022250713136a9b
|
[] |
no_license
|
DoESLiverpool/Liverpool-Makefest-2017
|
ac0e9df9a8b7fdb68d70d81985690ce25192d856
|
5590cb5f58a3aa403d49c54a6e25ec5e85358b10
|
refs/heads/master
| 2021-01-21T19:51:51.046353
| 2019-06-28T14:48:22
| 2019-06-28T14:48:22
| 92,169,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,995
|
py
|
#!/usr/bin/env python3
import csv, re, os, shutil, argparse, urllib
#import lxml.etree.ElementTree as ET
from lxml import etree
SVG_FOLDER = 'svgs'
PDF_FOLDER = 'pdfs'
def main():
# parse some flags here
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile", help="ods or xlsx input file", default='makers.ods')
parser.add_argument("-t", "--template", help="SVG template filename", default='template.svg')
parser.add_argument("-o", "--outfile", help="PDF outfile name", default='makers.pdf')
parser.add_argument("-c", "--keepcsv", help="Keep the temporary CSV file", default=False, action='store_true')
parser.add_argument("-s", "--keepsvgs", help="Keep temporary SVG files", default=False, action='store_true')
parser.add_argument("-p", "--keeppdfs", help="Keep temporary PDF files", default=False, action='store_true')
parser.add_argument("-x", "--debugflag", help="process one entry and stop", default=False, action='store_true')
args = parser.parse_args()
infile = args.infile
template = args.template
outfile = args.outfile
keep_pdfs = args.keeppdfs
keep_svgs = args.keepsvgs
keep_csv = args.keepcsv
debugflag = args.debugflag
# generate the csv file (comma separated, double quotetd, utf-8)
# TODO: check if libreoffice is running, otherwise this generation fails silently
# because the lockfile exists
os.system('libreoffice --headless --convert-to csv:"Text - txt - csv (StarCalc)":44,34,76,1,1 --outdir . ' + infile)
csvfilename = re.sub(r'\.[a-zA-Z0-9]+$', '', infile) + '.csv'
# check the required dirs exist
if not os.path.exists(SVG_FOLDER):
os.makedirs(SVG_FOLDER)
if not os.path.exists(PDF_FOLDER):
os.makedirs(PDF_FOLDER)
# create each file from line of csv file
with open(csvfilename) as csvfile:
reader = csv.DictReader(csvfile)
i = 1
for row in reader:
#print(row.keys())
# generate the required variables to substitute into the SVG
filesafe_name = re.sub(r"[^\w\s]", '', row['{title}'])
filesafe_name = re.sub(r"\s+", '-', filesafe_name)
filesafe_name = str(i).zfill(2) + '-' + filesafe_name.strip()
filesafe_name = (filesafe_name[:14]) if len(filesafe_name) > 14 else filesafe_name
title = row['{title}'].strip()
name = row['{name}'].strip()
description = row['{description}'].replace('_x000D_','')
# standardise *some* of the possible twitter and web inputs
twitter = '@' + row['{twitter}'].strip().replace('http://','').replace('https://','').replace('twitter.com/','').lstrip('@').strip()
website = row['{website}'].strip().replace('http://','').replace('https://','').replace('www.','').strip()
# parse vars to standardise text input
# replace the placeholders in the new file
svg_file = SVG_FOLDER + '/' + filesafe_name + '.svg'
# read the svg template file in
#tree = ET.parse(template)
#root = tree.getroot()
tree = etree.parse(template)
root = tree.getroot()
for para in root.findall('.//{http://www.w3.org/2000/svg}flowPara'):
if para.text == '{title}':
para.text = title
if len(title) >= 34:
# reduce the text size
parent = para.find('..')
style_tag = parent.attrib['style']
# find the current font size
font_size_tag = re.search('font-size:[0-9.]+px;', style_tag).group()
font_size = float(re.search(r'[0-9.]+', font_size_tag).group())
if len(title) >= 50:
font_size = font_size*0.75
else:
font_size = font_size*0.85
style_tag = re.sub(r'font-size:[0-9.]+px;', 'font-size:' + str(font_size) + 'px;', style_tag)
parent.attrib['style'] = style_tag
print('title font-size: ' + str(font_size) + ' px;')
#print(parent.attrib['style'])
elif para.text == '{name}':
para.text = name
elif para.text == '{description}':
para.text = description
if len(description) >= 512:
# reduce the text size
parent = para.find('..')
style_tag = parent.attrib['style']
# find the current font size
font_size_tag = re.search('font-size:[0-9.]+px;', style_tag).group()
font_size = float(re.search(r'[0-9.]+', font_size_tag).group())
if len(description) > 1200:
font_size = font_size*0.65
elif len(description) > 800:
font_size = font_size*0.75
else:
font_size = font_size*0.85
style_tag = re.sub(r'font-size:[0-9.]+px', 'font-size:' + str(font_size) + 'px', style_tag)
parent.attrib['style'] = style_tag
print('description font-size: ' + str(font_size) + ' px;')
elif para.text == '{twitter}':
if twitter != '@': # is empty
para.text = twitter
else:
para.text = ''
elif para.text == '{website}':
if website[-1:] == '/':
para.text = website[:-1]
else:
para.text = website
# write the adjusted svg
tree.write(svg_file)
pdf_file = PDF_FOLDER + '/' + filesafe_name + '.pdf'
os.system('inkscape --without-gui --file ' + svg_file + ' --export-text-to-path --export-area-page --export-pdf ' + pdf_file)
print('Created: ' + title)
i+=1
if debugflag == True:
print ('Filename: ' + filesafe_name)
print ('Name: ' + name)
print ('Title: ' + title + ' [' + str(len(title)) + ']')
print ('Description: ' + description + ' [' + str(len(description)) + ']')
print ('Twitter: ' + twitter)
print ('Website: ' + website)
quit()
# concatenate all the pdf pages
os.chdir(PDF_FOLDER)
os.system('pdftk *.pdf output ../' + outfile)
os.chdir('..')
# cleanup temporary files
if keep_csv == False:
os.remove(csvfilename)
if keep_svgs == False:
shutil.rmtree(SVG_FOLDER)
if keep_pdfs == False:
shutil.rmtree(PDF_FOLDER)
if __name__ == "__main__":
main()
|
[
"patrick@defproc.co.uk"
] |
patrick@defproc.co.uk
|
0aca43944e8543cb2dde41c31bfa4b1db2c4dc93
|
969be4b7959617a4def52267595ed22a67caeaaa
|
/wsgi/closetBackend/Invetory/views.py
|
f0b1837c8705c2e6196fc3f5e170a4867ea44c0a
|
[] |
no_license
|
pjryan93/closet
|
0589d493958e5f9a63c760ebc4c52588622da913
|
04dcf33f0991c547cc27b7214a81ab0d3f149ff3
|
refs/heads/master
| 2021-01-10T06:15:36.359576
| 2016-03-20T22:02:52
| 2016-03-20T22:02:52
| 54,337,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,952
|
py
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import TemplateView
from django.template.context_processors import csrf
from django.shortcuts import render_to_response
from django.template import Context
from django.template.loader import get_template
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.contrib.auth import logout
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import user_passes_test
from django.conf import settings
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
from models import *
from serializers import *
from rest_framework.renderers import JSONRenderer
import base64, uuid
import cStringIO
import sys
from django.core.files.base import ContentFile
from base64 import b64decode
from django.core.files.images import ImageFile
class HomeView(APIView):
authentication_classes = (TokenAuthentication,)
def post(self,request,format=None):
return Response({'detail': "I suppose you are authenticated"})
def get(self,request,format=None):
print request.user
return Response({'detail': "I suppose you are authenticated"})
class CreateCloset(APIView):
authentication_classes = (TokenAuthentication,)
def get(self,request,format=None):
closets = Closet.objects.filter(owner=request.user)
serializer = ClosetSerializer(closets, many=True)
json = JSONRenderer().render(serializer.data)
defaults = DefaultAmounts.objects.filter(current_closet=closets[0].id)
serializer = ClosetDefaultsSerializer(defaults,many=False)
json_defaults = JSONRenderer().render(serializer.data)
print 'here'
return Response({'closets': json,'defaults':json_defaults})
def post(self,request,format=None):
user = request.user
print request.data
print request.data['name']
print 'done'
if 'name' in request.data and 'gender' in request.data and 'age' in request.data:
closetName = request.data['name']
age = request.data['age']
gender = request.data['gender']
cleaned_gender= 'Male'
if gender == "Male":
cleaned_gender = "M"
elif gender == "Female":
cleaned_gender = "F"
if 'closet_id' in request.data:
current_closet = Closet.objects.get(id = request.data["closet_id"])
current_closet.name = closetName
current_closet.age = age
current_closet.sex = gender
current_closet.save()
return Response({'success': "updated",'name':closetName,'id':current_closet.id})
elif Closet.objects.filter(owner=request.user,name = closetName).count() == 0:
new_closet = Closet(owner = request.user,name = closetName,age = age,sex = cleaned_gender)
new_closet.save()
defaults = DefaultAmounts(current_closet =new_closet )
defaults.save()
sizes = DefaultSizes(current_closet=new_closet)
sizes.setAll(age)
print 'created'
return Response({'success': "created",'name':closetName,'id':new_closet.id})
else:
return Response({'failure':'You have a closet with this name'})
return Response({'failure':'not created'})
class ClosetItem(APIView):
authentication_classes = (TokenAuthentication,)
def get(self,request,format=None):
print request.body
print request.user
closets = Closet.objects.filter(owner=request.user)
if len(closets) == 0:
defaults = DefaultAmounts()
serializer = ClosetDefaultsSerializer(defaults,many=False)
json = JSONRenderer().render(serializer.data)
return Response({'closets': json,'message':'no closets'})
else:
return self.getResponseNoId(request)
if 'id' in request.GET:
closets = Closet.objects.filter(owner=request.user)[0]
serializer1 = ClosetSerializer(closets, many=False)
json_defaults = JSONRenderer().render(serializer1.data)
defaults = DefaultAmounts.objects.filter(current_closet=closets.id)
serializer = ClosetDefaultsSerializer(defaults,many=True)
json = JSONRenderer().render(serializer.data)
clothing_items = ClothingItem.objects.filter(current_closet=closets.id)
item_serializer = ItemSerializer(clothing_items,many=True)
item_json = JSONRenderer().render(item_serializer.data)
size_defaults= DefaultSizes.objects.filter(current_closet=closets.id)
if len(size_defaults) == 0:
default_size = DefaultSizes(current_closet = closets)
default_size .save()
size_defaults= DefaultSizes.objects.filter(current_closet=closets.id)
size_serializer = ItemSizeSerializer(size_defaults,many=True)
size_json = JSONRenderer().render(size_serializer.data)
print 'size_json'
print size_json
return Response({'closets': json, 'defaults':json_defaults,'items':item_json,'sizes':size_json,'message':'success'})
def getResponseNoId(self,request):
closets = Closet.objects.filter(owner=request.user)[0]
serializer1 = ClosetSerializer(closets, many=False)
json_defaults = JSONRenderer().render(serializer1.data)
defaults = DefaultAmounts.objects.filter(current_closet=closets.id)
serializer = ClosetDefaultsSerializer(defaults,many=True)
json = JSONRenderer().render(serializer.data)
clothing_items = ClothingItem.objects.filter(current_closet=closets.id)
item_serializer = ItemSerializer(clothing_items,many=True)
item_json = JSONRenderer().render(item_serializer.data)
size_defaults= DefaultSizes.objects.filter(current_closet=closets.id)
if len(size_defaults) == 0:
default_size = DefaultSizes(current_closet = closets)
default_size .save()
size_defaults= DefaultSizes.objects.filter(current_closet=closets.id)
size_serializer = ItemSizeSerializer(size_defaults,many=True)
size_json = JSONRenderer().render(size_serializer.data)
print 'size_json'
print size_json
return Response({'closets': json, 'defaults':json_defaults,'items':item_json,'sizes':size_json,'message':'success'})
def post(self,request,format=None):
item_name = request.data['name']
item_type = request.data['type']
item_size = request.data['size']
closet_id = request.data['closet_id']
photoDataString = request.data['photoData']
image_output = cStringIO.StringIO()
image_output.write(photoDataString.decode('base64'))
image_output.read()
image_output.seek(0) # Write decoded image to buffer
current_closet = Closet.objects.get(id=closet_id)
x = ClothingItem(name=item_name,clothing_type = item_type,size=item_size,current_closet=current_closet)
file_name = str(x.id) + '.png'
image_data = b64decode(photoDataString)
uploadedImage = ContentFile(image_data,file_name)
print uploadedImage
x.save()
x.item_image = uploadedImage
x.save()
print x.id
return Response({'failure': 'no id' })
|
[
"pjryan@my.okcu.edu"
] |
pjryan@my.okcu.edu
|
6ec1b92adbf29c397050d278e8c4ddd379f0e719
|
874c45e64e28ec63829b22738c3e7744dac1aeb7
|
/test/rtt/utils.py
|
8edbf72b300db2b646aba48fc6af9dc0080c5cc6
|
[] |
no_license
|
zenokoller/rtt-timestamp-vs-spinbit
|
75372d04a29dc93c6161d61516d0cdb72e684bfc
|
a2aedc47dd8c48cdf9771e8c747dff422f97fa31
|
refs/heads/master
| 2020-03-20T18:32:38.876614
| 2018-07-06T15:39:14
| 2018-07-06T15:39:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
import pandas as pd
def load_dataframe(path: str, key: str) -> pd.DataFrame:
with pd.HDFStore(path) as store:
return store[key]
|
[
"zeno.koller@gmail.com"
] |
zeno.koller@gmail.com
|
670f081b1ef8d14da851e983a53c42b304f46728
|
ace2dc6096eb0b7a540f28e57df8459adafad6ed
|
/Advanced Algorithms and Complexity/Programming-Assignment-4/circuit_design/circuit_design.py
|
e2ec8b4f2aabddcc3f0a3899603eacd626bc5794
|
[] |
no_license
|
tdslivensky/AlgorithmsAndDataStructures
|
6ad2c28204600b1f8f72228c13d29d2c3c9437c9
|
e8b1011ab5210bc52854f911e2a7e41a83b36740
|
refs/heads/master
| 2023-01-11T16:32:49.399654
| 2020-11-13T13:49:18
| 2020-11-13T13:49:18
| 289,050,279
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
# python3
n, m = map(int, input().split())
clauses = [ list(map(int, input().split())) for i in range(m) ]
# This solution tries all possible 2^n variable assignments.
# It is too slow to pass the problem.
# Implement a more efficient algorithm here.
def isSatisfiable():
for mask in range(1<<n):
result = [ (mask >> i) & 1 for i in range(n) ]
formulaIsSatisfied = True
for clause in clauses:
clauseIsSatisfied = False
if result[abs(clause[0]) - 1] == (clause[0] < 0):
clauseIsSatisfied = True
if result[abs(clause[1]) - 1] == (clause[1] < 0):
clauseIsSatisfied = True
if not clauseIsSatisfied:
formulaIsSatisfied = False
break
if formulaIsSatisfied:
return result
return None
result = isSatisfiable()
if result is None:
print("UNSATISFIABLE")
else:
print("SATISFIABLE")
print(" ".join(str(-i-1 if result[i] else i+1) for i in range(n)))
|
[
"tdslivensky@gmail.com"
] |
tdslivensky@gmail.com
|
80aa265104445e40404828f18f155d2af0bd25b2
|
c2285444392e8eb1904255b82f62b49f317aca07
|
/scripts/load_tags.py
|
59a372e1337682c26b6ce0ab3ed0e6c41f62fb28
|
[] |
no_license
|
tlskr/tagger
|
ced0ed36437bb29fe488eb2fae8b03314c5a9558
|
1230a1f36b91bd7ef2d57840dcfa013ca07e5a4a
|
refs/heads/master
| 2022-12-16T00:44:36.798298
| 2018-08-17T13:23:02
| 2018-08-17T13:26:37
| 145,027,493
| 0
| 0
| null | 2022-12-08T02:46:37
| 2018-08-16T18:55:02
|
Python
|
UTF-8
|
Python
| false
| false
| 545
|
py
|
#!/usr/bin/env python
# pylint: disable=wrong-import-position
"""
Script loading tags from JSON file to data
Invocation (from project root)
./scripts/load_tags.py
"""
import os
import sys
import gflags
sys.path.append(os.getcwd())
from scripts.main_gflag import main_gflagged
from tagger.load_json import insert_tags
FLAGS = gflags.FLAGS
gflags.DEFINE_string(
"datafile", None, "file holding json data"
)
def main():
insert_tags(FLAGS.datafile)
if __name__ == "__main__":
sys.exit(main_gflagged(sys.argv, main))
|
[
"gordon@practicalhorseshoeing.com"
] |
gordon@practicalhorseshoeing.com
|
da731a7f36e0c4abfe56e401d84517f945860e52
|
14d5bccb090070fc212651d017b71a1f2c6fadce
|
/mysite/myapi/models.py
|
d29e391516d08b76c141036c24658c027c6e2666
|
[] |
no_license
|
ryyvntong/SuperheroAPI
|
223c78abd0a46a645ce36a871a7df5008dac6d02
|
6dd2e63a44bf7fc251abacd90cf3782fafb24e9d
|
refs/heads/master
| 2020-12-31T21:32:41.788871
| 2020-02-07T22:02:36
| 2020-02-07T22:02:36
| 239,028,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
from django.db import models
# Create your models here.
class Hero(models.Model):
name = models.CharField(max_length=60)
alias = models.CharField(max_length=60)
def __str__(self):
return self.name
|
[
"56896048+ryyvntong@users.noreply.github.com"
] |
56896048+ryyvntong@users.noreply.github.com
|
f4b58e180e2f6563fbc03f28f8137aab6bf58d1a
|
f9d9afad08272fc077f9809eedb8e920fc0fe883
|
/Observation/Task.py
|
a53536d6b3a8175d44f7420b06921eebde12790a
|
[] |
no_license
|
firekg/is-it-optimal
|
1c640a350d25057cf7b32602467d063e5ea65799
|
202b95ae370db7170a12ee942d6e1f0be29cca43
|
refs/heads/master
| 2020-05-01T13:31:35.441495
| 2019-10-27T15:34:49
| 2019-10-27T15:34:49
| 177,493,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,205
|
py
|
import numpy
import Observe
import Init
import copy
import Teach
import Learn
# Eq. 6a), 6b)
def Knowledgeability_Task(hypo, feature, label, p_teacher_xy_h, p_teacher_x_h, p_y_xh, delta_g_h, phx, num_iteration):
p_learner_h_xy = Learn.Init_step(hypo, feature, label, p_y_xh, phx)
for loop in range(num_iteration):
# Calculate teacher's table
Teach.K_PTeacher_xh(hypo, feature, label, p_teacher_xy_h, p_teacher_x_h, p_learner_h_xy, delta_g_h)
# Calculate learner's table
Learn.K_PLearner_h_xy(hypo, feature, label, p_y_xh, p_learner_h_xy, p_teacher_x_h, phx)
return p_learner_h_xy
# hypo_map: The map of the hypothesis
# return: a map from hypothesis to observation * probability
def Probability_Task(hypo_table, number_hypo, number_feature, number_label, p_teacher_x_h, knowledgeability, iter=100):
feature_set = []
# New knowledgeability table
# Axis 1: index of observations
# Axis 2~3: the delta knowledegeability table
new_knowledgeability_delta_table = numpy.zeros((number_feature + 1, number_hypo, number_hypo), dtype=float)
# Assume there is a true hypo = hypo
# Get all posible hypothesis in the hypo map
for hypo_idx in range(len(hypo_table)):
# Get the observable feature set
for f in range(number_feature):
feature_set.append(f)
obs = 0
# Set the environment
num_hypo, num_feature, num_label, p_teacher_x_h, p_teacher_xy_h, p_learner_h_xy, p_y_xh, delta_g_h, phx = Init.Set(hypo_table, knowledgeability=knowledgeability)
while True:
for h in range(number_hypo):
new_knowledgeability_delta_table[obs][hypo_idx][h] = phx[h]
# Get the PT
p_learner_h_xy = Knowledgeability_Task(num_hypo, num_feature, num_label, p_teacher_xy_h, p_teacher_x_h, p_y_xh, delta_g_h, phx, iter)
# Choose a feature
feature = Observe.Get_Feature(feature_set, hypo_idx, p_teacher_x_h)
obs += 1
prob_find, true_label = Observe.Observe(hypo_table, hypo_idx, feature, p_learner_h_xy)
# Assign the p_learner_h_xy to phx
for h in range(number_hypo):
phx[h] = p_learner_h_xy[h][feature][true_label]
# remove the feature in the feature set,
# make the same feature only be observed once
feature_set.remove(feature)
if (len(feature_set) == 0):
for h in range(number_hypo):
new_knowledgeability_delta_table[obs][hypo_idx][h] = phx[h]
break
return new_knowledgeability_delta_table
def Average_Hypo(prob_map, number_hypos, number_observations):
y = []
for obs in range(number_observations):
sum = 0
for hypo_index in prob_map:
sum += prob_map[hypo_index][obs]
y.append(sum / number_hypos)
return y
|
[
"noreply@github.com"
] |
noreply@github.com
|
7a1df63cd632b5b6f4ccaeaeee6eff6164e582d7
|
bffcfa6103ee72d7ac394c14aa861e60616c7ab8
|
/pytorch3d/datasets/__init__.py
|
1687213018a29e5d75a4c5490368d52e5f4d893a
|
[
"BSD-3-Clause"
] |
permissive
|
Amit2016-17/pytorch3d
|
ccac686bc1a3caeb4bd0f38519fbcb83f816501d
|
7944d24d4872bdb01b821450840049e28d0ce12b
|
refs/heads/master
| 2022-11-25T10:40:14.409087
| 2020-08-05T13:58:53
| 2020-08-05T14:00:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .r2n2 import R2N2, BlenderCamera
from .shapenet import ShapeNetCore
from .utils import collate_batched_meshes
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
d34828fbd987211cc81fd1989af985a0e9374a74
|
128104139a52489f21df67918fae50b647f27cbe
|
/printswapneg.py
|
e3d6c3796765ea2755a55619b285b3480622b3fb
|
[] |
no_license
|
AdamRichey/Python
|
aabb254742cf46c4d958b8aa55c26bdca100cdde
|
00f1be1e235f6f791c624e3496b78d8863355e81
|
refs/heads/master
| 2020-03-23T05:54:10.976815
| 2018-07-16T18:15:12
| 2018-07-16T18:15:12
| 141,174,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
arr=[-1]
def neg(arr):
for i in arr:
if arr[i]<0:
arr[i]="dojo"
print arr
print neg(arr)
|
[
"adamrichey88@gmail.com"
] |
adamrichey88@gmail.com
|
81adc9b89c325fae8eb969a4530b965c9f2ee337
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-ice/aliyunsdkice/request/v20201109/DescribeQueryConfigsRequest.py
|
d4de87a4f55304fb57408fb816c6f72a3b5b2c81
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkice.endpoint import endpoint_data
class DescribeQueryConfigsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ICE', '2020-11-09', 'DescribeQueryConfigs','ice')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
2a4a81a565fab19cc75a574eb4d85c9994bb0767
|
c67f2d0677f8870bc1d970891bbe31345ea55ce2
|
/zippy/lib-python/3/test/test_file.py
|
e9a1ceeeb8a48606fa1ad65140adac2fd3689d05
|
[
"BSD-3-Clause"
] |
permissive
|
securesystemslab/zippy
|
a5a1ecf5c688504d8d16128ce901406ffd6f32c2
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
refs/heads/master
| 2022-07-05T23:45:36.330407
| 2018-07-10T22:17:32
| 2018-07-10T22:17:32
| 67,824,983
| 324
| 27
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,928
|
py
|
import sys
import os
import unittest
from array import array
from weakref import proxy
import io
import _pyio as pyio
from test.support import TESTFN, run_unittest, gc_collect
from collections import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(b'teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
def testReadinto(self):
# verify readinto
self.f.write(b'12')
self.f.close()
a = array('b', b'x'*10)
self.f = self.open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual(b'12', a.tobytes()[:n])
def testReadinto_text(self):
# verify readinto refuses text files
a = array('b', b'x'*10)
self.f.close()
self.f = self.open(TESTFN, 'r')
if hasattr(self.f, "readinto"):
self.assertRaises(TypeError, self.f.readinto, a)
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList([b'1', b'2'])
self.f.writelines(l)
self.f.close()
self.f = self.open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testErrors(self):
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
if hasattr(f, "readinto"):
self.assertRaises((IOError, TypeError), f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = [('fileno', ()),
('flush', ()),
('isatty', ()),
('__next__', ()),
('read', ()),
('write', (b"",)),
('readline', ()),
('readlines', ()),
('seek', (0,)),
('tell', ()),
('write', (b"",)),
('writelines', ([],)),
('__iter__', ()),
]
methods.append(('truncate', ()))
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname, args in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method, *args)
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1/0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
class CAutoFileTests(AutoFileTests):
open = io.open
class PyAutoFileTests(AutoFileTests):
open = staticmethod(pyio.open)
class OtherFileTests(unittest.TestCase):
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = self.open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises((IOError, ValueError), sys.stdin.seek, -1)
else:
print((
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.'), file=sys.__stdout__)
self.assertRaises((IOError, ValueError), sys.stdin.truncate)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.open(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = self.open(TESTFN, 'wb', s)
f.write(str(s).encode("ascii"))
f.close()
f.close()
f = self.open(TESTFN, 'rb', s)
d = int(f.read().decode("ascii"))
f.close()
f.close()
except IOError as msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
f.write(b'12345678901') # 11 bytes
f.close()
f = self.open(TESTFN,'rb+')
data = f.read(5)
if data != b'12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods.
dataoffset = 16384
filler = b"ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
b"spam, spam and eggs\n",
b"eggs, spam, ham and spam\n",
b"saussages, spam, spam and eggs\n",
b"spam, ham, spam and eggs\n",
b"spam, spam, spam, spam, spam, ham, spam\n",
b"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
try:
# Prepare the testfile
bag = self.open(TESTFN, "wb")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = self.open(TESTFN, 'rb')
if next(f) != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
meth(*args) # This simply shouldn't fail
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = self.open(TESTFN, 'rb')
for i in range(nchunks):
next(f)
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("b", b"\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tobytes()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
f.close()
# Reading after iteration hit EOF shouldn't hurt either
f = self.open(TESTFN, 'rb')
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class COtherFileTests(OtherFileTests):
open = io.open
class PyOtherFileTests(OtherFileTests):
open = staticmethod(pyio.open)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(CAutoFileTests, PyAutoFileTests,
COtherFileTests, PyOtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
[
"thezhangwei@gmail.com"
] |
thezhangwei@gmail.com
|
ebbf514c6ec2b30cad1b8c147dfef1377c951533
|
82e5c56806e75a51097a51cb721df1ef07376e6f
|
/initial_freia/fun.py
|
6b89a7f268c642c02b65a655544d8257951ff747
|
[] |
no_license
|
hahahasan/MSc-pre-process
|
77a2fcdf795830d2b37b2d538e3638d29bd55e80
|
ba69d58bc1f9dd7becea431ce9a328b2843d8b66
|
refs/heads/master
| 2020-03-21T05:43:07.501035
| 2018-06-21T14:14:30
| 2018-06-21T14:14:30
| 138,175,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 11:06:10 2018
@author: hm1234
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
rng = np.linspace(0.01, np.sqrt(2), 500)
rng2 = np.arange(10,500)
max_rng = np.amax(rng2)
hi = []
for j in rng2:
n = j
tmp = []
for i in rng:
a = i
b = '**a'
c = str(a) + n*b
tmp.append(eval(c))
if j % 23 == 0:
print('{0:1.0f}'.format(j/max_rng *100), '%')
hi.append(tmp)
print('Done!')
X_rng, Y_rng = np.meshgrid(rng2, rng)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X_rng, Y_rng, np.array(hi).T)
|
[
"hm1234@york.ac.uk"
] |
hm1234@york.ac.uk
|
7aaee542c16692bd7e6bafbc73a9debf04054e3d
|
504dbb060f00d373278f2f210af39fb89a27916b
|
/karyawan.py
|
5dad1d91ebbe692dd9127b5753080694a65460bd
|
[] |
no_license
|
Aldidwi53/projek-PBO
|
e56f1b764c632f6d59a550feadac3583b35e1345
|
7232feb29e5e81e4e82241105bfd41b85ec6d21b
|
refs/heads/main
| 2023-02-06T02:01:53.925205
| 2021-01-02T09:09:28
| 2021-01-02T09:09:28
| 324,373,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
import user
import sqlite3
class Karyawan(user.User):
def __init__(self, email, password, nama, gender, alamat, telepon):
super().__init__(email, password, nama, gender, alamat, telepon)
|
[
"noreply@github.com"
] |
noreply@github.com
|
d95df558fb04770dfcce9d62774614c18e3bfa25
|
401e455157f74b28b0ad12df6bdae3c159888db6
|
/tiviapp/apis/twitter_rest.py
|
2711d64b25094018a1871401c9eeafc7e1407e66
|
[] |
no_license
|
AnilSener/tivi
|
0624e356524ae03fddd771a43a174211b8e1909d
|
701e1ab2948d898fe12999bc5be954f61a3f4e99
|
refs/heads/master
| 2021-01-01T06:04:31.707925
| 2015-07-03T15:34:36
| 2015-07-03T15:34:36
| 38,221,946
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
__author__ = 'anil'
from twython import TwythonStreamer,Twython,TwythonError
from tivi.celery import app
from tiviapp.models import *
import time
####################################################################
consumer_key="Vs7V2k4vPWMMyTFqLzqPkM6wE"
consumer_secret="aWNRzh74LUT1fuW35y6VzRDtvuimQ4LjFGMnMMkEXI0Y9LSpkf"
access_token="258113369-63Y2Cqr9q0Bo02WU4AS8Bjiv3JnHP2Us7HimK26G"
access_token_secret="Z4Sf9EyLbOJ4jPI5WlZPZUyv3OwluuZXiKXn0pamk8Dly"
###################################################################
twitter = Twython(consumer_key, consumer_secret,access_token,access_token_secret)
@app.task()
def exec_User_Follows():
twitter_users=TwitterUser.objects.all()
if len(twitter_users)==0:
print "No users available Wait 5 minutes for the next API call"
time.sleep(300)
else:
for i,user in enumerate(twitter_users):
print user.userName,"!!!"
try:
print "!!!TIME FOR FOLLOWERS!!!"
followers=twitter.get_followers_list(screen_name=user.userName,include_user_entities=True,count=200)
for f in followers:
print f
except TwythonError as e:
print e.message
|
[
"anil_sener@yahoo.com"
] |
anil_sener@yahoo.com
|
cc2cf6e6ef6fb653f7cb8de7cc061e8c3a300512
|
34192b4a3964c6dec6ff34bddc06c3690c02bf98
|
/Social/feed/models.py
|
23c6456108e74c85bdff44c8203513abb97e3ed5
|
[] |
no_license
|
Aditya-23/The-Social
|
6806882f8688f577047d79d0a02b3092cf3c4a4b
|
aea48c2ef816196839cad75f9dced50fa85af6f2
|
refs/heads/master
| 2022-10-22T11:33:22.849389
| 2019-08-11T19:31:07
| 2019-08-11T19:31:07
| 201,806,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
from django.db import models
from django.db.models import Q
# Create your models here.
class Question_query(models.query.QuerySet):
def search(self, query = None):
return self.filter(
Q(question__icontains = query)
)
class Search(models.Manager):
def get_queryset(self, query = None):
return Question_query(self.model, using = self._db)
def search(self, query = None):
return self.get_queryset().search(query)
class user(models.Model):
username = models.CharField(max_length = 20, blank = False)
password = models.CharField(max_length = 20, blank = False)
firstname = models.CharField(max_length = 40, blank = False)
lastname = models.CharField(max_length = 40, blank = False)
email = models.EmailField(max_length = 40, blank = False)
#search_user = Search()
def __str__(self):
return self.username
class Question(models.Model):
question = models.CharField(max_length = 100, blank = False)
asked_user = models.ForeignKey('user', on_delete = models.CASCADE)
datetime = models.DateTimeField(auto_now = True)
objects = models.Manager()
search_question = Search()
def __str__(self):
return self.question
class Answer(models.Model):
answer = models.CharField(max_length = 1000, blank = False)
datetime = models.DateTimeField(auto_now = True)
question = models.ForeignKey('Question', on_delete = models.CASCADE)
answered_user = models.ForeignKey('user', on_delete = models.CASCADE)
def __str__(self):
return self.answer
|
[
"adityamysore002@gmail.com"
] |
adityamysore002@gmail.com
|
1a7e98eac3530ccd63f562cdfb6de3ad851647f7
|
5223229cbdbe883c6c1c09980c60d845e8255dd3
|
/laliga_sb_analysis/Scripts/Barca_Manager_tenure_graph.py
|
e6efd1c05d1abfb8fc9f6185443ca62b15abc13d
|
[] |
no_license
|
derrik-hanson/Python_Analysis_xG_Barca_plus
|
39b112c47672b5f3133a1b439c942e4cee35791e
|
65af5acb02deadb610b182a4b74b444114eb3ec6
|
refs/heads/main
| 2023-08-26T18:18:17.546730
| 2021-11-13T06:26:36
| 2021-11-13T06:26:36
| 422,980,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,627
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 12 19:39:17 2021
@author: Derrik Hanson
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 12 17:03:57 2021
@author: Derrik Hanson
"""
import plotly.express as px
import pandas as pd
manager_tenures = [
{'manager_name': 'Frank Rijkaard', 'start':'2003-06' ,'end':'2008-06'},
{'manager_name': 'Pep Guardiola', 'start':'2008-06' ,'end':'2012-06'},
{'manager_name': 'Tito Vilanova', 'start':'2012-07' ,'end':'2013-01'},
{'manager_name': 'Jordi Roura', 'start':'2013-01' ,'end':'2013-03'},
{'manager_name': 'Tito Vilanova', 'start':'2013-03' ,'end':'2013-07'},
{'manager_name': 'Gerard Martino', 'start':'2013-07' ,'end':'2014-05'},
{'manager_name': 'Luis Enrique', 'start':'2014-05' ,'end':'2017-05'},
{'manager_name': 'Ernesto Valverde', 'start':'2017-05' ,'end':'2020-01'},
{'manager_name': 'Quique Setien', 'start':'2020-01' ,'end':'2020-08'},
{'manager_name': 'Ronald Koeman', 'start':'2020-08' ,'end':'2021-10'},
]
# load DataFrame
df = pd.DataFrame(manager_tenures)
# Create Gantt Plot
fig = px.timeline(df, x_start="start", x_end="end", y="manager_name",
labels = {
'manager_name': 'Manager Name'}
)
fig.update_layout(
title={
'text': "Barcelona Manager Tenures",
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'})
fig.update_yaxes(autorange="reversed") # otherwise tasks are slisted from the bottom up
fig.show()
fig.write_image("figures/barca_manager_tenure.pdf")
|
[
"hanson.derrik@gmail.com"
] |
hanson.derrik@gmail.com
|
d01f9d1b57765a72c85ec040eab037e9d12c89bb
|
ca77e9e45d666771c7b0897e7e3093b3d3c12f65
|
/scripts/trigger/add_prices.py
|
ec79a8be575fe0f59c9b16754b18afc1910a7a29
|
[] |
no_license
|
2gDigitalPost/custom
|
46175d3a3fc4c3be21dc20203ff0a48fb93b5639
|
6a3a804ef4ef6178044b70ad1e4bc5c56ab42d8d
|
refs/heads/master
| 2020-04-04T07:40:17.962611
| 2016-12-28T18:35:28
| 2016-12-28T18:35:28
| 39,648,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,857
|
py
|
"""
This file was generated automatically from a custom script found in Project -> Script Editor.
The custom script was moved to a file so that it could be integrated with GitHub.
"""
__author__ = 'Topher.Hughes'
__date__ = '04/08/2015'
import traceback
def main(server=None, input=None):
"""
The main function of the custom script. The entire script was copied
and pasted into the body of the try statement in order to add some
error handling. It's all legacy code, so edit with caution.
:param server: the TacticServerStub object
:param input: a dict with data like like search_key, search_type, sobject, and update_data
:return: None
"""
if not input:
input = {}
try:
# CUSTOM_SCRIPT00035
# Matthew Tyler Misenhimer
# This is used to have the prices on projects trickle up to titles, then orders
# This is DEPRECATED
sobj = input.get('sobject')
sk = input.get('search_key')
price_str = sobj.get('price')
price = 0
if price_str not in [None,'']:
price = float(price_str)
proj = server.eval("@SOBJECT(twog/proj['code','%s'])" % sobj.get('proj_code'))[0]
current_proj_price_str = proj.get('price')
current_proj_price = 0
if current_proj_price_str not in [None,'']:
current_proj_price = float(current_proj_price_str)
new_proj_price = current_proj_price + price
server.update(proj.get('__search_key__'), {'price': new_proj_price})
title = server.eval("@SOBJECT(twog/title['code','%s'])" % proj.get('title_code'))[0]
current_title_price_str = title.get('price')
current_title_price = 0
if current_title_price_str not in [None,'']:
current_title_price = float(current_title_price_str)
new_title_price = current_title_price + price
server.update(title.get('__search_key__'), {'price': new_title_price})
order = server.eval("@SOBJECT(twog/order['code','%s'])" % title.get('order_code'))[0]
current_order_price_str = order.get('price')
current_order_price = 0
if current_order_price_str not in [None,'']:
current_order_price = float(current_order_price_str)
new_order_price = current_order_price + price
server.update(order.get('__search_key__'), {'price': new_order_price})
except AttributeError as e:
traceback.print_exc()
print str(e) + '\nMost likely the server object does not exist.'
raise e
except KeyError as e:
traceback.print_exc()
print str(e) + '\nMost likely the input dictionary does not exist.'
raise e
except Exception as e:
traceback.print_exc()
print str(e)
raise e
if __name__ == '__main__':
main()
|
[
"topher.hughes@2gdigital.com"
] |
topher.hughes@2gdigital.com
|
7dfcead14cfcc41518ec35eaa9c96ca9cfbc0be3
|
8fb846f4f4ac5fd417489d731eae8a8a1bdc77c3
|
/rllab/misc/console.py
|
b32d21a249a3d389e0aef97f641591cdb13bb35a
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
zhongwen/rllab
|
0a9f9ea2d8995037b83aaae5853a299d5cf9e432
|
d8239c05179fcc55d865db7ce933defa3baae24d
|
refs/heads/master
| 2021-01-14T08:36:37.272071
| 2016-08-17T12:29:00
| 2016-08-17T12:29:00
| 65,801,245
| 1
| 1
| null | 2016-08-16T08:18:47
| 2016-08-16T08:18:46
| null |
UTF-8
|
Python
| false
| false
| 5,514
|
py
|
import sys
import time
import os
import errno
import shlex
import pydoc
import inspect
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def log(s): # , send_telegram=False):
print s
sys.stdout.flush()
class SimpleMessage(object):
def __init__(self, msg, logger=log):
self.msg = msg
self.logger = logger
def __enter__(self):
print self.msg
self.tstart = time.time()
def __exit__(self, etype, *args):
maybe_exc = "" if etype is None else " (with exception)"
self.logger("done%s in %.3f seconds" %
(maybe_exc, time.time() - self.tstart))
MESSAGE_DEPTH = 0
class Message(object):
def __init__(self, msg):
self.msg = msg
def __enter__(self):
global MESSAGE_DEPTH # pylint: disable=W0603
print colorize('\t' * MESSAGE_DEPTH + '=: ' + self.msg, 'magenta')
self.tstart = time.time()
MESSAGE_DEPTH += 1
def __exit__(self, etype, *args):
global MESSAGE_DEPTH # pylint: disable=W0603
MESSAGE_DEPTH -= 1
maybe_exc = "" if etype is None else " (with exception)"
print colorize('\t' * MESSAGE_DEPTH + "done%s in %.3f seconds" % (maybe_exc, time.time() - self.tstart), 'magenta')
def prefix_log(prefix, logger=log):
return lambda s: logger(prefix + s)
def tee_log(file_name):
f = open(file_name, 'w+')
def logger(s):
log(s)
f.write(s)
f.write('\n')
f.flush()
return logger
def collect_args():
splitted = shlex.split(' '.join(sys.argv[1:]))
return {arg_name[2:]: arg_val
for arg_name, arg_val in zip(splitted[::2], splitted[1::2])}
def type_hint(arg_name, arg_type):
def wrap(f):
meta = getattr(f, '__tweak_type_hint_meta__', None)
if meta is None:
f.__tweak_type_hint_meta__ = meta = {}
meta[arg_name] = arg_type
return f
return wrap
def tweak(fun_or_val, identifier=None):
if callable(fun_or_val):
return tweakfun(fun_or_val, identifier)
return tweakval(fun_or_val, identifier)
def tweakval(val, identifier):
if not identifier:
raise ValueError('Must provide an identifier for tweakval to work')
args = collect_args()
for k, v in args.iteritems():
stripped = k.replace('-', '_')
if stripped == identifier:
log('replacing %s in %s with %s' % (stripped, str(val), str(v)))
return type(val)(v)
return val
def tweakfun(fun, alt=None):
"""Make the arguments (or the function itself) tweakable from command line.
See tests/test_misc_console.py for examples.
NOTE: this only works for the initial launched process, since other processes
will get different argv. What this means is that tweak() calls wrapped in a function
to be invoked in a child process might not behave properly.
"""
cls = getattr(fun, 'im_class', None)
method_name = fun.__name__
if alt:
cmd_prefix = alt
elif cls:
cmd_prefix = cls + '.' + method_name
else:
cmd_prefix = method_name
cmd_prefix = cmd_prefix.lower()
args = collect_args()
if cmd_prefix in args:
fun = pydoc.locate(args[cmd_prefix])
if type(fun) == type:
argspec = inspect.getargspec(fun.__init__)
else:
argspec = inspect.getargspec(fun)
# TODO handle list arguments
defaults = dict(
zip(argspec.args[-len(argspec.defaults or []):], argspec.defaults or []))
replaced_kwargs = {}
cmd_prefix += '-'
if type(fun) == type:
meta = getattr(fun.__init__, '__tweak_type_hint_meta__', {})
else:
meta = getattr(fun, '__tweak_type_hint_meta__', {})
for k, v in args.iteritems():
if k.startswith(cmd_prefix):
stripped = k[len(cmd_prefix):].replace('-', '_')
if stripped in meta:
log('replacing %s in %s with %s' % (stripped, str(fun), str(v)))
replaced_kwargs[stripped] = meta[stripped](v)
elif stripped not in argspec.args:
raise ValueError(
'%s is not an explicit parameter of %s' % (stripped, str(fun)))
elif stripped not in defaults:
raise ValueError(
'%s does not have a default value in method %s' % (stripped, str(fun)))
elif defaults[stripped] is None:
raise ValueError(
'Cannot infer type of %s in method %s from None value' % (stripped, str(fun)))
else:
log('replacing %s in %s with %s' % (stripped, str(fun), str(v)))
# TODO more proper conversions
replaced_kwargs[stripped] = type(defaults[stripped])(v)
def tweaked(*args, **kwargs):
all_kw = dict(zip(argspec[0], args) +
kwargs.items() + replaced_kwargs.items())
return fun(**all_kw)
return tweaked
|
[
"dementrock@gmail.com"
] |
dementrock@gmail.com
|
2566347edc4da7664cffa99c4c72f58678e8c26a
|
648384ac8ff1e1a414c41c1ed19c6d54d7f1aeb2
|
/pygame_basic/4_keyboard_event.py
|
98eb5facda5d2b3900695aa414f57d47e2adf207
|
[] |
no_license
|
Online-abayss/--
|
dadb65b372ed7c39c391bc0a85355a314028ce19
|
4ac61ad20167a784419365317d61722d5bbddf3b
|
refs/heads/main
| 2023-08-03T13:13:51.623181
| 2021-09-23T08:42:10
| 2021-09-23T08:42:10
| 397,529,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,212
|
py
|
import pygame
from pygame.constants import K_LEFT, K_RIGHT
pygame.init() # 초가화작업 (무조건 필수) (클래스 정의할떄도 self.init 하는것처럼 그런듯)
# 화면 크기 설정
screen_width = 480 ## 가로크기
screen_height = 640 ## 세로크기
screen = pygame.display.set_mode((screen_width,screen_height)) ## 게임 화면 크기 설정
# 화면 타이틀 설정 (2. background 시작)
pygame.display.set_caption("Test Game") # 게임 타이틀 제작
# 배경 이미지 불러오기
background = pygame.image.load("C:\\Users\\kang\\Desktop\\Pythonworkspace\\pygame_basic\\background.png")
# 캐릭터(스프라이트) 불러오기 (3.man sprite 시작)
character = pygame.image.load("C:\\Users\\kang\\Desktop\\Pythonworkspace\\pygame_basic\\character.png")
character_size = character.get_rect().size ## 캐릭터의 이미지의 가로 및 세로 크기값을 알수있음.
character_width = character_size[0] # 캐릭터의 가로 크기
character_heigth = character_size[1] # 캐릭터의 세로 크기
# 캐릭터 움직임의 관한 좌표를 설정
# 좌표는 11시 꼭짓점 기준으로 0,0을 잡고 우측 밑으로 증가한다.
# y좌표를 바로 밑에껏처럼 하면 캐릭터가 안보인다. 왜냐하면 캐릭터도 마찬가지로 좌표는 11시 꼭짓점을 기준으로 잡아주기에
# 캐릭터의 크기를 생각하고 그만큼 위로 올려서 보이게 해야한다. 또한 중앙으로 캐릭터를 옮기고 싶으면 그냥 화면 가로/2가 아닌 캐릭터 크기의 절반만큼 더 왼쪽으로 옮겨야한다.
character_x_pos = (screen_width/2) -(character_width/2)# x위치를 설정
character_y_pos = screen_height - character_heigth# Y위치를 설정
# 이동 할 좌표
to_x = 0
to_y = 0
# 이벤트 루프
# 키보드 입력에 따른 이동 여부 설정(4. keyboard_event 시작)
running = True # 게임이 계속 진행중인지? 파악
while running:
for event in pygame.event.get(): # 키보드 및 마우스 입력이 들어올경우 그 값에 대응으로 처리 (이벤트 발생 여부)
if event.type == pygame.QUIT: ## 1시 방향 X 표시의 창끄기 표시 명령어
running = False ## 내가 실수로 = 한개만 할걸 두개로 해서 확정이 아닌 조건으로 되서 무한루프로 빠져나오지 못했음.
if event.type == pygame.KEYDOWN: #키가 눌려졌는지 확인
if event.key == pygame.K_LEFT: # 캐릭터를 좌측으로 이동
to_x -= 2
elif event.key == pygame.K_RIGHT: # 캐릭터를 우측으로 이동
to_x += 2
elif event.key == pygame.K_UP: # 캐릭터를 위로 이동
to_y -= 2
elif event.key == pygame.K_DOWN: # 캐릭터를 밑으로 이동
to_y += 2
if event.type == pygame.KEYUP: # 키보드를 때면 멈추기.
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
to_x = 0
elif event.key == pygame.K_UP or event.key == pygame.K_DOWN:
to_y = 0
character_x_pos += to_x
character_y_pos += to_y
# 화면 밖으로 넘어가는걸 방지
if character_x_pos < 0:
character_x_pos = 0
elif character_x_pos > screen_width - character_width: # 우측 끝 - 캐릭터 넓이만큼
character_x_pos = screen_width - character_width
if character_y_pos < 0:
character_y_pos = 0
elif character_y_pos > screen_height - character_heigth: # 스크린 맨밑 - 캐릭터 높이만큼
character_y_pos = screen_height - character_heigth
screen.blit(background, (0,0)) #배경 그리기 ## 여기까지만 하면 반영을 하지 않는다.
#rgb값을 이용하여 배경을 넣을수 있다.
#screen.fill((0,0,255)
screen.blit(character, (character_x_pos,character_y_pos)) # 캐릭터 그리기 및 위치 설정한 값으로 지정
pygame.display.update() # 매 프레임마다 배경을 그려줘야 하기에 설정함
# 게임 종료
pygame.quit()
|
[
"noreply@github.com"
] |
noreply@github.com
|
14450e65ad686acdab9fb6fecaa1b50a8a7d5106
|
84fda250fd32b37d74f07f6d00106226881e70ee
|
/shop/cart/admin.py
|
85a9d7c503ea1cc245e73f3574a659d5c5459871
|
[] |
no_license
|
NovosadVictor/OnlineShop
|
8b80b1122f9d8019fb2d1641e08ea9eb64cc6ed6
|
afe6826bf0494be6af3a033cf8c2e3a205ffb571
|
refs/heads/master
| 2020-12-02T16:13:26.312803
| 2017-08-22T16:29:55
| 2017-08-22T16:29:55
| 96,091,899
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
from django.contrib import admin
from .models import ProductInCart
class ProductInCartAdmin(admin.ModelAdmin):
list_display = ['owner', 'product', 'quantity',]
list_editable = ['quantity',]
admin.site.register(ProductInCart, ProductInCartAdmin)
|
[
"novosad_msu@mail.ru"
] |
novosad_msu@mail.ru
|
9f800cece6947820a5e6c1324c4b395ea3efdf40
|
a1c397a8b4dccfbef8b91a67d6910a58ff6aa98e
|
/02-19-Cuma/tryExcept.py
|
13bb01e466c735f27fe0a613acc049813ea2ed45
|
[
"MIT"
] |
permissive
|
hhsalik/staj
|
f0c9a17064c444fa084a102e4c36050ff62fdb4e
|
d0ee95d5e77a7d7a1f16611d49c87be429a25b31
|
refs/heads/master
| 2023-04-29T15:17:17.928023
| 2021-05-10T19:25:35
| 2021-05-10T19:25:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
try:
answer = 10 / 0
number = int(input("Enter a number: "))
print(number)
except ZeroDivisionError as err:
print(err)
except ValueError:
print("invalid input")
|
[
"cihat02020202@gmail.com"
] |
cihat02020202@gmail.com
|
0192859b8b887bddad03294ac6c0618bf2e5e2a9
|
0ce9fbb56b1bb142eed50491c00adeb5eed52d3f
|
/chatbot.py
|
01cda0956f43c0b5dbe7a8189c09280cd267198e
|
[] |
no_license
|
lucasB97/ARTUR
|
0336a7c1bc0e6fc9504dbd46a1a57e2d7c4dd299
|
287625fa5f30923abb676b6ce3fb326ff8ceda81
|
refs/heads/main
| 2023-04-27T12:01:47.335612
| 2021-04-17T17:31:16
| 2021-04-17T17:31:16
| 316,075,171
| 0
| 0
| null | 2021-04-17T02:59:21
| 2020-11-25T23:21:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from chatterbot.trainers import ChatterBotCorpusTrainer
# Creating ChatBot Instance
chatbot = ChatBot(
'A.R.T.U.R.',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch',
'default_response': 'Me desculpe, mas eu não entendi. Ainda estou aprendendo :(',
'maximum_similarity_threshold': 0.90
}
],
filters=[
'chatterbot.filters.RepetitiveResponseFilter'
],
database_uri='sqlite:///database.sqlite3'
)
# Training with Personal Ques & Ans
training_data = open('training/ques_ans.txt').read().splitlines()
trainer = ListTrainer(chatbot)
trainer.train(training_data)
# Training with Portugues Corpus Data
trainer_corpus = ChatterBotCorpusTrainer(chatbot)
trainer_corpus.train(
"chatterbot.corpus.portuguese",
"chatterbot.corpus.portuguese.greetings",
"chatterbot.corpus.portuguese.conversations",
"chatterbot.corpus.portuguese.linguistic_knowledge"
)
|
[
"lucasbessa708@gmail.com"
] |
lucasbessa708@gmail.com
|
102f8aafd37eaff350427f12ac5e476d7dda9c04
|
992816021cb79580e9c739ca2650fe9bc83829bd
|
/medapp/profile/admin.py
|
b838f95fde2b89db4d347aec60b042db9c24c2e0
|
[] |
no_license
|
etiennekruger/medapp-api
|
096de56dc523425981b48c6d80930bb3df799639
|
af9232f548db28e6716c0a14e38b44dcb9b57690
|
refs/heads/master
| 2020-04-18T22:58:35.859694
| 2012-07-16T06:17:17
| 2012-07-16T06:17:17
| 5,150,748
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
from django.contrib import admin
from profile.models import Profile
class ProfileAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'created', 'updated']
admin.site.register(Profile, ProfileAdmin)
|
[
"makhonin@steelkiwi.com"
] |
makhonin@steelkiwi.com
|
6128440a21b6bad0591564200e67f0c7ab7f0018
|
a219c9b0f3ccd1b35c3bb7bb3c7b50e1d9d8ef93
|
/d002_1_for_dongusu.py
|
9910b109c6ddd3f3d306143754c0ebd1ef137785
|
[] |
no_license
|
f0xmulder/python_ornekleri
|
3293541b5d4e594dc39e6df623e47ecd4e5e94c2
|
d1ebbcefdd7390a4e20a61864b150097f9919e29
|
refs/heads/master
| 2022-11-04T07:12:20.766931
| 2017-06-22T13:30:45
| 2017-06-22T13:30:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
For Döngüsü
cok satirli
aciklama
Uşak Universitesi
"""
# tek satır aciklama
import numpy
print "Program Baslangıcı"
print "0-10 arası 1.7 artarak"
elemanlar=numpy.arange(0,10,1.7)
print "Sayı\t(3)\t(5)\t(7)"
for e in elemanlar:
print e,"\t", e%3,"\t", e%5,"\t", e%7
print "Herhangi bir liste"
elemanlar=[3,8,7.2,85]
print "Sayı\t(3)\t(5)\t(7)"
for e in elemanlar:
print e,"\t", e%3,"\t", e%5,"\t", e%7
print "Program Sonu"
|
[
"noreply@github.com"
] |
noreply@github.com
|
a9c2ba7de5529d4ee631d9a51afa34de0f801869
|
78f3ffc90eec06e3ea638b0a87b73562dc311984
|
/damsht.py
|
9665480edef3cb1d8c6ffd418a561d9b1c8e1296
|
[] |
no_license
|
ashavt/dsh
|
a19647db44ce8112ce80184c6b5351cd16b649ec
|
deef4281ddd71d472b4b1d0b0b633634da3bf6a5
|
refs/heads/master
| 2022-12-22T07:05:45.672516
| 2018-02-18T09:13:54
| 2018-02-18T09:13:54
| 121,934,464
| 0
| 0
| null | 2022-12-08T00:54:53
| 2018-02-18T08:49:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,224
|
py
|
import requests
import datetime
class BotHandler:
def __init__(self, token):
self.token = token
self.api_url = "https://api.telegram.org/bot{}/".format(token)
def get_updates(self, offset=None, timeout=30):
method = 'getUpdates'
params = {'timeout': timeout, 'offset': offset}
resp = requests.get(self.api_url + method, params)
result_json = resp.json()['result']
return result_json
def send_message(self, chat_id, text):
params = {'chat_id': chat_id, 'text': text}
method = 'sendMessage'
resp = requests.post(self.api_url + method, params)
return resp
def get_last_update(self):
get_result = self.get_updates()
if len(get_result) > 0:
last_update = get_result[-1]
else:
last_update = get_result[len(get_result)]
return last_update
greet_bot = BotHandler(token)
greetings = ('hello', 'hi', 'greetings', 'sup')
now = datetime.datetime.now()
def main():
new_offset = None
today = now.day
hour = now.hour
while True:
greet_bot.get_updates(new_offset)
last_update = greet_bot.get_last_update()
last_update_id = last_update['update_id']
last_chat_text = last_update['message']['text']
last_chat_id = last_update['message']['chat']['id']
last_chat_name = last_update['message']['chat']['first_name']
if last_chat_text.lower() in greetings and today == now.day and 6 <= hour < 12:
greet_bot.send_message(last_chat_id, 'Доброе утро {}'.format(last_chat_name))
today += 1
elif last_chat_text.lower() in greetings and today == now.day and 12 <= hour < 17:
greet_bot.send_message(last_chat_id, 'Добрый день {}'.format(last_chat_name))
today += 1
elif last_chat_text.lower() in greetings and today == now.day and 17 <= hour < 23:
greet_bot.send_message(last_chat_id, 'Добрый вечер {}'.format(last_chat_name))
today += 1
new_offset = last_update_id + 1
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
|
[
"ashavt@yandex.ru"
] |
ashavt@yandex.ru
|
feee22f88d28bb23ee6a97c99f3208c769d58972
|
dc569d08a025447e5386abf0388c99d45bdbfcf1
|
/utils.py
|
6cc2d12ab1651734c79094fcbdcdd4f3dd2f23ad
|
[] |
no_license
|
samuelBB/Translating-OOV-Words-Via-Images
|
cf992d1f27f10352fee61bc89a60ac0bc2d91a9f
|
5ae8f3b89786a0813e93b619189e181de8a955dd
|
refs/heads/master
| 2020-05-30T10:08:45.056985
| 2019-05-31T22:37:25
| 2019-05-31T22:37:25
| 189,665,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,088
|
py
|
"""
misc utility functions
"""
import re
import os
import errno
import pickle
import random
import logging
import argparse
import datetime as dt
RANDOM_SEED = 2018
### logging
class UpToLevel(object):
def __init__(self, lvl=logging.FATAL):
self.lvl = lvl
def filter(self, record):
return record.levelno <= self.lvl
ROOT = '*'
def init_logging(file=None, stdout=False, stderr=False,
lo_lvl=logging.DEBUG, hi_lvl=logging.FATAL,
file_lo_lvl=None, stdout_lo_lvl=None, stderr_lo_lvl=None,
file_hi_lvl=None, stdout_hi_lvl=None, stderr_hi_lvl=None,
fmt='[%(asctime)s|%(levelname)s|%(module)s'
'.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d_%H:%M:%S',
mode='w'):
logger = logging.getLogger(ROOT)
if is_(lo_lvl):
logger.setLevel(lo_lvl)
if is_(hi_lvl):
logger.addFilter(UpToLevel(hi_lvl))
for name, obj, args, prefix in [
('stdout', stdout, [logging.sys.stdout], 'Stream'),
('stderr', stderr, (), 'Stream'),
( 'file', file, (file, mode), 'File')
]:
if obj:
handler = getattr(logging, prefix + 'Handler')(*args)
handler.setFormatter(logging.Formatter(fmt, datefmt))
lo, hi = locals()[name+'_lo_lvl'], locals()[name+'_hi_lvl']
if is_(lo):
handler.setLevel(lo)
if is_(hi):
handler.addFilter(UpToLevel(hi))
logger.addHandler(handler)
if name == 'file':
return handler
def main_module_name(name, ext=True):
if name == '__main__':
try:
main_file = __import__(name).__file__
name_and_ext = main_file[main_file.rfind('/')+1:]
if ext:
return name_and_ext[:name_and_ext.rfind('.')]
return name_and_ext
except:
pass
return name
def get_logger(name, main=False):
name = main_module_name(name) if main else name
return logging.getLogger(ROOT + '.' + name)
### timing
def time_stamp(fmt='%Y-%-m-%-d_%-H-%-M-%-S'):
return dt.datetime.now().strftime(fmt)
### io
def write_lines(iterable, path):
with open(path, 'w') as io:
for item in iterable:
print(item, file=io)
def read_lines(path):
with open(path) as io:
return [line.strip() for line in io]
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_path_elems_unix(path, i, j='', delim='_'):
elems = re.sub('//+', '/', path).strip('/').split('/')
return elems[i] if j == '' or i == j else delim.join(elems[i:j])
def load(path, method=pickle):
with open(path, 'rb') as f:
return method.load(f)
def dump(obj, path, method=pickle, **kw):
if not kw and method.__name__ in ('pickle', 'dill'):
kw = dict(protocol=-1)
with open(path, 'wb') as f:
method.dump(obj, f, **kw)
def scandir_r(path):
for entry in os.scandir(path):
if entry.is_dir(follow_symlinks=False):
yield from scandir_r(entry.path)
else: yield entry
def mapread(path, f):
with open(path) as io:
yield from map(f, io) if f else io
### convenience
def is_(x):
return x is not None
def dedupe(it):
s = set()
for el in it:
if el not in s:
s.add(el)
yield el
### parsing
def arg(*ar, **kw):
return ar, kw
def strip_attrs(opts, *attrs):
for attr in attrs:
yield getattr(opts, attr)
delattr(opts, attr)
def parse_args(*args, strip=None):
parser = argparse.ArgumentParser()
for ar, kw in args:
parser.add_argument(*ar, **kw)
opts = parser.parse_args()
if is_(strip):
return (opts, *strip_attrs(opts, *strip))
return opts
### sampling
_RNG = random.Random(RANDOM_SEED)
def sample(lst, n=None):
return _RNG.sample(lst, n or len(lst))
|
[
"baldsammy@gmail.com"
] |
baldsammy@gmail.com
|
7c3197dc1c975ece63e9829ab29ce34714bbbbbf
|
07d5dbbaecddb7ddb3341861fddd5490d54840af
|
/Computational_Physics/plot2.py
|
4da8f397c3aca7afa346f8b38707c1ec770873ec
|
[] |
no_license
|
julpotter/College-Projects-and-code
|
49382e5df82a80c3f2c8162c610c7f465860782e
|
b1a0508f88a4a6ef1b04de66c9653c68ea03a3cc
|
refs/heads/main
| 2023-03-01T15:25:51.618680
| 2021-02-05T00:54:02
| 2021-02-05T00:54:02
| 326,104,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
from __future__ import print
import matplotlib.pyplot as plt
g = 9.81
t0 = 0
y0 = 1.5 # initial height
v0 = float(input("Enter the initial velocity v0 :"))
#use lists to save position and time
y = []
t = []
ycalc = 0
tcalc = 0
#create a loop to populate our lists with values
while(ycalc >= 0):
ycalc = y0 + v0*tcalc - 1/2.*g*tcalc**
y.append(ycalc)
t.append(tcalc)
print "Height is {0:5.2f} and time is {1:5.2f}".format(ycalc,tcalc)
tcalc = tcalc + 0.05 #increment
print "The maximum height reach is ", max(y)
#graph our results
plt.plot(t,y, 'r^')
plt.xlabel("Time t (s)")
plt.ylabel('Height y (m)')
plt.show()
|
[
"jpotter@udallas.edu"
] |
jpotter@udallas.edu
|
d50868da0f47f63d568213860fafad9a262e1a9d
|
926651d078851f96e71e2cc419fe87348b333ba3
|
/mysite/mysite/urls.py
|
1e5ccbb2f5ede005dbd394a42c6f06242f90e827
|
[] |
no_license
|
Kitabo258/secretary2
|
edc9c55133aed40b2d66a89a3d32c90afd923297
|
d89e74309abe1577aa4248c77ecc73412e27eba3
|
refs/heads/master
| 2021-08-14T15:11:43.836979
| 2017-11-16T02:32:12
| 2017-11-16T02:32:12
| 110,913,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 907
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^questions/create/$','question.views.question_create'),
url(r'^admin/', admin.site.urls),
url(r'^$', 'show.views.index'),
url(r'^tama/$', 'show.views.tama'),
]
|
[
"kiki901028@gmail.com"
] |
kiki901028@gmail.com
|
3010b06f90027e9d8277db75879d2a23679e061c
|
934107eaba17b352bf7bf3a9c0a45af4f263fd54
|
/favorites/manager.py
|
5106bb9da8161cbc63e045832e3288fd4baca22e
|
[] |
no_license
|
quentin338/Purebeurre-p8
|
d0324f0fbc7e96a1418b367ea41ef3f4f51cc437
|
15bb4192df331d790ef28140e65213ac604cf96e
|
refs/heads/master
| 2023-08-07T22:25:10.975870
| 2023-07-26T10:21:04
| 2023-07-26T10:21:04
| 207,123,046
| 0
| 0
| null | 2023-07-26T10:21:06
| 2019-09-08T14:17:16
|
CSS
|
UTF-8
|
Python
| false
| false
| 310
|
py
|
from django.db import models
from django.db.utils import IntegrityError
class FavoriteManager(models.Manager):
def is_favorite(self, user, ancient_product, new_product):
return bool(self.filter(user=user, ancient_product=ancient_product,
new_product=new_product))
|
[
"quentin.bertrand@yahoo.fr"
] |
quentin.bertrand@yahoo.fr
|
7875cbdde6257275a92695eeecc06813145d79d1
|
1ad64831a79a6b163f6a31a9255994c1e4d8a322
|
/settings.py
|
a2fe5dbf18ed9edbb73469731b1b139a097fe630
|
[
"Apache-2.0"
] |
permissive
|
benhosmer/brzr-desktop
|
1f17b767a9d1b7d6f4bbb3787e5deb9df92f9165
|
1e5e88e0b33325d35002636192019a40d080224e
|
refs/heads/master
| 2020-03-30T07:49:24.701545
| 2013-07-11T16:29:12
| 2013-07-11T16:29:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('brzr.cfg')
database_name = config.get('main', 'database_name')
event_name = config.get('main', 'event_name')
|
[
"ben.hosmer@gmail.com"
] |
ben.hosmer@gmail.com
|
1da98ce1969f888ec8962c9239a84d4f7a580f78
|
b72dbc51279d3e59cb6410367b671f8a956314c1
|
/leet_code/leet_372.py
|
5c1d0057a5ac67543ab059922519a69fe52287d6
|
[] |
no_license
|
ddobokki/coding-test-practice
|
7b16d20403bb1714d97adfd1f47aa7d3ccd7ea4b
|
c88d981a1d43b986169f7884ff3ef1498e768fc8
|
refs/heads/main
| 2023-07-08T15:09:32.269059
| 2021-08-08T12:19:44
| 2021-08-08T12:19:44
| 344,116,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
from typing import List
class Solution:
def superPow(self, a: int, b: List[int]) -> int:
if a in [1,0]:
return a
return int(pow(a,int("".join(str(i) for i in b)),1337))
|
[
"44228269+ddobokki@users.noreply.github.com"
] |
44228269+ddobokki@users.noreply.github.com
|
a03c242a1a98c722662d25054877d5f1d75f61b1
|
8b1ef7f1a7e11b8c39b0546f521a4533f82bb6ba
|
/PyQt5- Login Form/main.py
|
57c90f7324b9bdcd010b805087c425faa78e58d7
|
[] |
no_license
|
muhammedzahit/QT5
|
4b6c8e1461f352d4143949486019071285e42d53
|
b82283d517023d5860d37e421e5b9babad503469
|
refs/heads/master
| 2023-02-03T08:19:31.775039
| 2020-12-17T10:25:08
| 2020-12-17T10:25:08
| 289,100,610
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,098
|
py
|
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
import sqlite3
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(800, 600))
MainWindow.setMaximumSize(QtCore.QSize(800, 600))
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName("centralwidget")
self.bg_photo = QtWidgets.QLabel(self.centralwidget)
self.bg_photo.setGeometry(QtCore.QRect(0, 0, 800, 600))
self.bg_photo.setAutoFillBackground(False)
self.bg_photo.setText("")
self.bg_photo.setPixmap(QtGui.QPixmap("imgs/bg.jpg"))
self.bg_photo.setScaledContents(True)
self.bg_photo.setObjectName("bg_photo")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setGeometry(QtCore.QRect(110, 50, 511, 411))
self.stackedWidget.setObjectName("stackedWidget")
self.page_register = QtWidgets.QWidget()
self.page_register.setStyleSheet("*{\n"
"font: italic 16pt \"Brush Script MT\";\n"
"color : brown;\n"
"background: transparent\n"
"}\n"
"\n"
"QPushButton\n"
"\n"
"{\n"
"background-color : rgb(85, 0, 0, 0.7);\n"
"}\n"
"\n"
"QLabel\n"
"{\n"
"color: yellow\n"
"}\n"
"\n"
"QLineEdit{\n"
"background-color : rgb(85, 0, 0, 0.7);\n"
"color:blue;\n"
"}")
self.page_register.setObjectName("page_register")
self.formLayoutWidget = QtWidgets.QWidget(self.page_register)
self.formLayoutWidget.setGeometry(QtCore.QRect(10, 150, 481, 191))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.register_name = QtWidgets.QLineEdit(self.formLayoutWidget)
self.register_name.setObjectName("register_name")
self.verticalLayout_4.addWidget(self.register_name)
self.register_password = QtWidgets.QLineEdit(self.formLayoutWidget)
self.register_password.setObjectName("register_password")
self.register_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.verticalLayout_4.addWidget(self.register_password)
self.register_confirm_password = QtWidgets.QLineEdit(self.formLayoutWidget)
self.register_confirm_password.setObjectName("register_confirm_password")
self.register_confirm_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.verticalLayout_4.addWidget(self.register_confirm_password)
self.formLayout.setLayout(0, QtWidgets.QFormLayout.FieldRole, self.verticalLayout_4)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_3 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_3.setObjectName("label_3")
self.verticalLayout_3.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_4.setObjectName("label_4")
self.verticalLayout_3.addWidget(self.label_4)
self.label_5 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_5.setObjectName("label_5")
self.verticalLayout_3.addWidget(self.label_5)
self.formLayout.setLayout(0, QtWidgets.QFormLayout.LabelRole, self.verticalLayout_3)
self.registerButton = QtWidgets.QPushButton(self.formLayoutWidget)
self.registerButton.setStyleSheet("")
self.registerButton.setObjectName("registerButton")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.registerButton)
self.stackedWidget.addWidget(self.page_register)
self.page_login = QtWidgets.QWidget()
self.page_login.setStyleSheet("*{\n"
"font: italic 16pt \"Brush Script MT\";\n"
"color : brown;\n"
"background: transparent\n"
"}\n"
"\n"
"QPushButton\n"
"\n"
"{\n"
"background-color : rgb(85, 0, 0, 0.7);\n"
"}\n"
"\n"
"QLabel\n"
"{\n"
"color: yellow\n"
"}\n"
"\n"
"QLineEdit{\n"
"background-color : rgb(85, 0, 0, 0.7);\n"
"color:blue;\n"
"}")
self.page_login.setObjectName("page_login")
self.frame = QtWidgets.QFrame(self.page_login)
self.frame.setGeometry(QtCore.QRect(10, 10, 491, 391))
self.frame.setStyleSheet("")
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.login_button = QtWidgets.QPushButton(self.frame)
self.login_button.setGeometry(QtCore.QRect(70, 290, 161, 51))
self.login_button.setStyleSheet("")
self.login_button.setObjectName("login_button")
self.to_register_button = QtWidgets.QPushButton(self.frame)
self.to_register_button.setGeometry(QtCore.QRect(260, 290, 161, 51))
self.to_register_button.setStyleSheet("")
self.to_register_button.setObjectName("to_register_button")
self.verticalLayoutWidget = QtWidgets.QWidget(self.frame)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 70, 181, 141))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_2.setStyleSheet("")
self.label_2.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setStyleSheet("")
self.label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.frame)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(220, 70, 211, 151))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.login_name = QtWidgets.QLineEdit(self.verticalLayoutWidget_2)
self.login_name.setStyleSheet("")
self.login_name.setObjectName("login_name")
self.verticalLayout_2.addWidget(self.login_name)
self.login_password = QtWidgets.QLineEdit(self.verticalLayoutWidget_2)
self.login_password.setStyleSheet("")
self.login_password.setObjectName("login_password")
self.login_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.verticalLayout_2.addWidget(self.login_password)
self.stackedWidget.addWidget(self.page_login)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.stackedWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# sayfalar arası geçiş -->
self.to_register_button.clicked.connect(self.to_register_page)
self.registerButton.clicked.connect(self.to_home_page)
self.login_button.clicked.connect(self.check_info)
conn = sqlite3.connect("data.db")
cursor = conn.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS USERS(ID INTEGER NOT NULL PRIMARY KEY, USERNAME TEXT, PASSWORD TEXT)")
conn.close()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_3.setText(_translate("MainWindow", "Username: "))
self.label_4.setText(_translate("MainWindow", "Password:"))
self.label_5.setText(_translate("MainWindow", "Confirm Password:"))
self.registerButton.setText(_translate("MainWindow", "register"))
self.login_button.setText(_translate("MainWindow", "login"))
self.to_register_button.setText(_translate("MainWindow", "register"))
self.label_2.setText(_translate("MainWindow", "USERNAME :"))
self.label.setText(_translate("MainWindow", "PASSWORD :"))
def to_register_page(self):
self.stackedWidget.setCurrentIndex(0)
def check_username(self, username):
conn = sqlite3.connect("data.db")
curr = conn.cursor()
curr.execute("SELECT USERNAME FROM USERS")
usernames = curr.fetchall()
conn.close()
for name in usernames:
if username == name[0]:
return False
return True
def to_home_page(self):
_translate = QtCore.QCoreApplication.translate
if self.register_name.text() == "" or self.register_password.text() == "" or self.register_confirm_password.text() == "":
msg = QtWidgets.QMessageBox()
msg.setText(_translate("MainWindow", "You must fill all lines !!!"))
msg.setIcon(QtWidgets.QMessageBox.Warning)
x = msg.exec_()
elif self.register_password.text() != self.register_confirm_password.text():
msg = QtWidgets.QMessageBox()
msg.setText(_translate("MainWindow", "Passwords didn't match !!!"))
msg.setIcon(QtWidgets.QMessageBox.Warning)
x = msg.exec_()
elif not self.check_username(self.register_name.text()):
msg = QtWidgets.QMessageBox()
msg.setText(_translate("MainWindow", "Username already taken !!!"))
msg.setIcon(QtWidgets.QMessageBox.Warning)
x = msg.exec_()
else:
conn = sqlite3.connect("data.db")
conn.execute("INSERT INTO USERS(USERNAME,PASSWORD) values(?,?)",
(self.register_name.text(), self.register_password.text(),))
conn.commit()
conn.close()
self.stackedWidget.setCurrentIndex(1)
def check_info(self):
_translate = QtCore.QCoreApplication.translate
conn = sqlite3.connect("data.db")
curr = conn.cursor()
curr.execute("SELECT * FROM USERS")
user_list = curr.fetchall()
print(user_list)
conn.close()
flag = False
for item in user_list:
if item[1] == self.login_name.text() and item[2] == self.login_password.text():
msg = QtWidgets.QMessageBox()
msg.setWindowTitle(_translate("Login", "Logged in"))
msg.setText(_translate("Login", "Login succesfull :)"))
x = msg.exec_()
flag = True
break
if not flag:
msg = QtWidgets.QMessageBox()
msg.setText(_translate("Login", "Username and pasword didn't match !!!"))
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setWindowTitle(_translate("Login", "Login Error"))
x = msg.exec_()
else:
exit()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
list = ["Türkçe", "English"]
t = QtCore.QTranslator()
lang = QtWidgets.QInputDialog.getItem(MainWindow, "Select Language", "Language:", list)
#print(lang)
if lang[0] == "Türkçe":
t.load("turkish.qm")
app.installTranslator(t)
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"muhammetzahitaydin@gmail.com"
] |
muhammetzahitaydin@gmail.com
|
d2db1ee6174ddceb75d614d139e0bf52ec6cc9c6
|
65c8e86e276fb9ff8159af867d3793063e480d34
|
/config.py
|
037c7406057066de211e539d476d582edc9ada7d
|
[] |
no_license
|
prosass83/ucbotplot
|
504d0d16f1625c839d624f95017ca20597b1bf59
|
25b4bb10b4c637d4349ecab56f29a8f9720f7012
|
refs/heads/master
| 2020-03-10T03:36:40.328166
| 2018-04-12T00:34:43
| 2018-04-12T00:34:43
| 129,169,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
# Twitter API Keys
consumer_key = "Uu04Kdr6N3SFmbEi334mhy9HQ"
consumer_secret = "d9WJsr5GCCdBI6wbyQopug6HeHpB3T2mKoez8DEHeJFy8D9ko0"
access_token = "2518417008-SC6yochSVIeAYERFWe3BClzJ2pit2iH6YWOUwcA"
access_token_secret = "FBkOuixi1SSXC9xP2k8PpxY07GWsoDZ3MvtC7lj1K59Xo"
|
[
"prosass@gmail.com"
] |
prosass@gmail.com
|
dcb1ace6bff0ebe626977fad8beb5902ed036ab4
|
8a742d701f6e3106f138fb5bb67a90de64c99abd
|
/lesson1/task2.py
|
6dfca1d43eda210d4e9c6cd878da0521bc1d1796
|
[] |
no_license
|
alexabolot1/python-homework
|
ac51f20ad31b866d56783aaffc44d26582f6b113
|
5d719b5f5e3e67ef7e85a8f347d858255cb31ac7
|
refs/heads/main
| 2023-03-10T16:11:43.824269
| 2021-01-22T06:23:16
| 2021-01-22T06:23:16
| 330,006,488
| 0
| 0
| null | 2021-02-16T12:52:04
| 2021-01-15T19:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 123
|
py
|
a = input('Введите число')
print(f"Сумма вашего числа {int(a) + int(a + a) + int(a + a + a)}")
|
[
"alexabolot@gmail.com"
] |
alexabolot@gmail.com
|
f987964ac120b8b838a54bd03b1a6cd553eba48d
|
e4b52bd79c3a2fdf64f1927ddd04c87fa401fed8
|
/myshop/migrations/0003_migrate_translatable_fields.py
|
cd9bbd763d5d51bc2a494ed80d982b7711caf5e0
|
[] |
no_license
|
ObsidianRock/shop
|
8e1051efb298c65d6803fe76be07f222c19d60dd
|
2a6613f28de3d577288653afc6b1a470fcde6a89
|
refs/heads/master
| 2021-01-19T10:01:26.797821
| 2017-04-13T16:02:34
| 2017-04-13T16:02:34
| 87,817,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
translatable_models = {
'Category': ['name', 'slug'],
'Product': ['name', 'slug', 'description'],
}
def forwards_func(apps, schema_editor):
for model, fields in translatable_models.items():
Model = apps.get_model('myshop', model)
ModelTranslation = apps.get_model('myshop', '{}Translation'.format(model))
for obj in Model.objects.all():
translation_fields = {field: getattr(obj, field) for field in fields}
translation = ModelTranslation.objects.create(
master_id=obj.pk,
language_code=settings.LANGUAGE_CODE,
**translation_fields)
def backwards_func(apps, schema_editor):
for model, fields in translatable_models.items():
Model = apps.get_model('myshop', model)
ModelTranslation = apps.get_model('myshop', '{}Translation'.format(model))
for obj in Model.objects.all():
translation = _get_translation(obj, ModelTranslation)
for field in fields:
setattr(obj, field, getattr(translation, field))
obj.save()
def _get_translation(obj, MyModelTranslation):
translations = MyModelTranslation.objects.filter(master_id=obj.pk)
try:
return translations.get(language_code=settings.LANGUAGE_CODE)
except ObjectDoesNotExist:
return translations.get()
class Migration(migrations.Migration):
dependencies = [
('myshop', '0002_add_translation_model'),
]
operations = [
migrations.RunPython(forwards_func, backwards_func),
]
|
[
"ObsidianRock@users.noreply.github.com"
] |
ObsidianRock@users.noreply.github.com
|
f9d8cfefda12f3f541879b5d40b545e5a08c6842
|
0751fa2615079decfe8c1446f6dcbd7d1048bc31
|
/HW3/Code.py
|
10d368b3246cb1e5023cca87882b4c37397c8493
|
[] |
no_license
|
fatihselimyakar/AlgorithmAndDesign
|
0fa4ca8454dc641a57161657ceca29952f45054e
|
519107789c77f60dfc03036f7fda962f49203720
|
refs/heads/master
| 2021-01-06T01:50:07.846213
| 2020-02-17T19:55:35
| 2020-02-17T19:55:35
| 241,194,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,280
|
py
|
import itertools
#############QUESTION1###############
def createBoxList( size ):
boxes = []
for i in range(0,(int)(size/2)):
boxes.append("black")
for i in range((int)(size/2),size):
boxes.append("white")
return boxes
def boxTail(boxList,low,middle):
if(middle>=len(boxList)):
return boxList
boxList[low],boxList[middle]=boxList[middle],boxList[low]
boxTail(boxList,low+2,middle+2)
def boxRec(boxList):
return boxTail(boxList,1,(int)(len(boxList)/2))
#############QUESTION2###############
def findFakeCoin( arr ):
if(len(arr)==1):
return arr[0]
if(len(arr)%2==0):
if(sum( arr[0:(int)(len(arr)/2)] ) < sum( arr[(int)(len(arr)/2):len(arr)] )):
return findFakeCoin(arr[0:(int)(len(arr)/2)])
else:
return findFakeCoin(arr[(int)(len(arr)/2):len(arr)])
elif(len(arr)%2==1):
if(sum( arr[0:(int)(len(arr)/2)] ) == sum( arr[(int)(len(arr)/2):len(arr)-1] )):
return arr[len(arr)-1]
elif(sum( arr[0:(int)(len(arr)/2)] ) < sum( arr[(int)(len(arr)/2):len(arr)-1] )):
return findFakeCoin(arr[0:(int)(len(arr)/2)])
else:
return findFakeCoin(arr[(int)(len(arr)/2):len(arr)-1])
#############QUESTION3###############
quicksortSwapNum = 0
insertionSortSwapNum = 0
def insertionSort(arr): #decrease and conquer
global insertionSortSwapNum
for i in range(1,len(arr)):
current=arr[i]
position=i-1
while position>=0 and current<arr[position]:
arr[position+1]=arr[position]
insertionSortSwapNum+=1
position-=1
arr[position+1]=current
return insertionSortSwapNum
def rearrange(arr,low,high):
global quicksortSwapNum
i = ( low-1 )
pivot = arr[high]
for j in range(low , high):
if arr[j] < pivot:
i = i+1
arr[i],arr[j] = arr[j],arr[i]
quicksortSwapNum+=1
arr[i+1],arr[high] = arr[high],arr[i+1]
quicksortSwapNum+=1
return ( i+1 )
def quickSort(arr,low,high):#divide and conquer
if high > low:
index = rearrange(arr,low,high)
quickSort(arr, low, index-1)
quickSort(arr, index+1, high)
return quicksortSwapNum
#############QUESTION4###############
def findMedian(arr):
insertionSort(arr)
if(len(arr)%2==0):
return (arr[(int)(len(arr)/2)]+arr[(int)(len(arr)/2-1)])/2
else:
return arr[(int)(len(arr)/2)]
#############QUESTION5###############
def multiply(numbers):
total = 1
for x in numbers:
total *= x
return total
def optimalSubArray(arr):
value=(max(arr)+min(arr))*(len(arr)/4)
minMult=None
minList=None
for i in range(1,len(arr)+1):
combs=itertools.combinations(arr,i)
minList,minMult = (recSub((list)(combs),value,minList,minMult))
return minList
def recSub(combs,value,minList,minMult):
if(len(combs)==0):
return minList,minMult
elif(sum(combs[0])>=value):
if( (minMult==None) or (multiply(combs[0])<minMult) ):
minMult=multiply(combs[0])
minList=combs[0]
return recSub(combs[1:len(combs)],value,minList,minMult)
def main():
print ("TEST FUNCTION")
print ("\n**Box Test**")
boxList=createBoxList(8)
print ("Unchanged list:",boxList)
boxRec(boxList)
print ("Changed list:",boxList)
print ("\n**Fake Coin Test**")
coins=[2,2,1,2,2,2,2]
print ("Coin list:",coins)
print ("Fake coin:",findFakeCoin(coins))
print ("\n**Insertion and quicksort test**")
arr=[10,9,8,7,6,5,4,3,2,1]
print ("Unsorted array:",arr)
print ("Quicksort number of swap:",quickSort(arr,0,len(arr)-1))
print ("Quicksorted array:",arr)
arr2=[10,9,8,7,6,5,4,3,2,1]
print ("Unsorted array:",arr2)
print ("Insertion sort number of swap:",insertionSort(arr2))
print ("Insertion sorted array:",arr2)
print ("\n**Find median test**")
arr3=[10,20,12,13,19,1]
print ("Median array:",arr3)
print ("Median is:",findMedian(arr3))
print ("\n**Find optimal sub array**")
arr4=[2,4,7,5,22,11]
print ("Array is:",arr4)
print ("Optimal sub array is:",optimalSubArray(arr4))
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
52ba6a2814b715ec92f01331d0712c169ef9ebfa
|
009eddcbb8e4917710ad3c8661ee5bb04b6fc512
|
/tf-transliteration/transliterate.py
|
2a032f79be925ab091ef9b896e4cc7c30e5a4c45
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SRSTLCI/tf-transliteration-riggs2slo-toolkit
|
873ea0ccb4c7f270bf16b137ea59ad324ea176ce
|
e6224627408ea23f830e2b3cd4f6e3a723d70d9d
|
refs/heads/main
| 2023-04-20T18:47:30.731897
| 2021-05-17T16:08:55
| 2021-05-17T16:08:55
| 366,071,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,962
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os, sys, io, re
import six
from data import create_vocab, load_vocab
from data import split_text_file, SPECIALS
from data import create_dataset, make_data_iter_fn
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("train_steps", 0,
"The number of steps to run training for.")
flags.DEFINE_integer("eval_steps", 100, "Number of steps in evaluation.")
flags.DEFINE_integer("min_eval_frequency", 101, "Minimum steps between evals")
flags.DEFINE_string("hparams", "", "Comma separated list of hyperparameters")
flags.DEFINE_string("model_name", "ei", "Name of model")
flags.DEFINE_string("data_file", None, "TSV Data filename")
flags.DEFINE_float("eval_fraction", 0.05, "Fraction dataset used for evaluation")
flags.DEFINE_string("decode_input_file", None, "File to decode")
flags.DEFINE_string("vocab_file", "chars.vocab", "Character vocabulary file")
tf.logging.set_verbosity(tf.logging.INFO)
def decode_hparams(vocab_size, overrides=""):
hp = tf.contrib.training.HParams(
batch_size=32,
embedding_size=64,
char_vocab_size=vocab_size + 1, #Blank label for CTC loss
hidden_size=128,
learn_rate=0.0008
)
return hp.parse(overrides)
def get_model_dir(model_name):
model_dir = os.path.join(os.getcwd(), model_name)
if not os.path.exists(model_dir):
os.mkdir(model_dir)
return model_dir
def cer(labels, predictions):
dist = tf.edit_distance(predictions, labels)
return tf.metrics.mean(dist)
def create_model():
"""
Actual model function.
Refer https://arxiv.org/abs/1610.09565
"""
def model_fn(features, labels, mode, params):
hparams = params
inputs = features['input']
input_lengths = features['input_length']
targets = labels
target_lengths = features['target_length']
# Flatten input lengths
input_lengths = tf.reshape(input_lengths, [-1])
with tf.device('/cpu:0'):
embeddings = tf.Variable(
tf.truncated_normal(
[hparams.char_vocab_size, hparams.embedding_size],
stddev=(1/np.sqrt(hparams.embedding_size))),
name='embeddings')
input_emb = tf.nn.embedding_lookup(embeddings, inputs)
cell_fw = tf.nn.rnn_cell.BasicLSTMCell(hparams.hidden_size//2)
cell_bw = tf.nn.rnn_cell.BasicLSTMCell(hparams.hidden_size//2)
with tf.variable_scope('encoder'):
# BiLSTM
enc_outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, input_emb,
input_lengths, dtype=tf.float32)
enc_outputs = tf.concat(enc_outputs, axis=-1)
with tf.variable_scope('decoder'):
# Project to vocab size
logits = tf.layers.dense(enc_outputs, hparams.char_vocab_size)
# CTC loss and decoder requires Time major
logits = tf.transpose(logits, perm=[1, 0, 2])
loss = None
eval_metric_ops = None
train_op = None
predictions = None
if mode == tf.estimator.ModeKeys.TRAIN:
loss = tf.nn.ctc_loss(labels, logits, input_lengths, ignore_longer_outputs_than_inputs=True)
loss = tf.reduce_mean(loss)
optimizer = tf.contrib.opt.LazyAdamOptimizer(learning_rate=hparams.learn_rate)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
elif mode == tf.estimator.ModeKeys.EVAL:
loss = tf.nn.ctc_loss(labels, logits, input_lengths,
ignore_longer_outputs_than_inputs=True)
loss = tf.reduce_mean(loss)
eval_predictions, _ = tf.nn.ctc_greedy_decoder(logits, input_lengths)
eval_metric_ops = {
'CER': cer(labels, tf.cast(eval_predictions[0], tf.int32))
}
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions, _ = tf.nn.ctc_greedy_decoder(logits, input_lengths)
predictions = tf.sparse_tensor_to_dense(tf.cast(predictions[0], tf.int32))
predictions = {'decoded': predictions}
return tf.estimator.EstimatorSpec(
mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops
)
return model_fn
def train():
"""
Train the model:
1. Create vocab file from dataset if not created
2. Split dataset into test/eval if not available
3. Create TFRecord files if not available
4. Load TFRecord files using tf.data pipeline
5. Train model using tf.Estimator
"""
model_dir = get_model_dir(FLAGS.model_name)
vocab_file = os.path.join(model_dir, FLAGS.vocab_file)
if not os.path.exists(vocab_file):
create_vocab([FLAGS.data_file], vocab_file)
vocab, characters = load_vocab(vocab_file)
train_file, eval_file = split_text_file(FLAGS.data_file, model_dir, FLAGS.eval_fraction)
train_tfr = create_dataset(train_file, vocab)
eval_tfr = create_dataset(eval_file, vocab)
hparams = decode_hparams(len(vocab), FLAGS.hparams)
tf.logging.info('params: %s', str(hparams))
train_input_fn = make_data_iter_fn(train_tfr, hparams.batch_size, True)
eval_input_fn = make_data_iter_fn(eval_tfr, hparams.batch_size, False)
estimator = tf.estimator.Estimator(
model_fn=create_model(),
model_dir=model_dir,
params=hparams,
config=tf.contrib.learn.RunConfig()
)
experiment = tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps,
min_eval_frequency=FLAGS.min_eval_frequency
)
experiment.train_and_evaluate()
def predict():
"""
Perform transliteration using trained model. Input must be a text
file. Converts to a TFRecord first.
"""
model_dir = get_model_dir(FLAGS.model_name)
vocab_file = os.path.join(model_dir, FLAGS.vocab_file)
if not os.path.exists(vocab_file):
raise IOError("Could not find vocabulary file")
vocab, rev_vocab = load_vocab(vocab_file)
hparams = decode_hparams(len(vocab), FLAGS.hparams)
tf.logging.info('params: %s', str(hparams))
if FLAGS.decode_input_file is None:
raise ValueError("Must provide input field to decode")
tfr_file = create_dataset(FLAGS.decode_input_file, vocab)
infer_input_fn = make_data_iter_fn(tfr_file, hparams.batch_size, False)
estimator = tf.estimator.Estimator(
model_fn=create_model(),
model_dir=model_dir,
params=hparams,
config=tf.contrib.learn.RunConfig()
)
y = estimator.predict(input_fn=infer_input_fn, predict_keys=['decoded'])
ignore_ids = set([vocab[c] for c in SPECIALS] + [0])
decode_output_file = re.sub(r'\..+', '.out.txt', FLAGS.decode_input_file)
count = 0
with io.open(decode_output_file, 'w', encoding='utf-8') as fp:
for pred in y:
decoded = pred['decoded']
if len(decoded.shape) == 1:
decoded = decoded.reshape(1, -1)
for r in range(decoded.shape[0]):
fp.write(''.join([rev_vocab[i] for i in decoded[r, :] if i not in ignore_ids]) + '\n')
count += 1
if count % 10000 == 0:
tf.logging.info('Decoded %d lines', count)
def main(unused_argv):
if FLAGS.decode_input_file:
predict()
elif FLAGS.train_steps > 0:
train()
tf.app.run()
|
[
"noreply@github.com"
] |
noreply@github.com
|
795d213349a1ac367a0dcc6c7f13ed3a859b131d
|
c385ed950cd8512915f97a8bbca466349b647a56
|
/code/model.py
|
c9fc26614280c37bf6aa54a2050dc001f870e95d
|
[] |
no_license
|
livenb/Ultrasonic_Nerve
|
b5f2c9c8a0bd0e2e3e654ab247f1dca6fa65b847
|
39f5c036dbf3c63ebaae5c96d86ee6005ccfe3af
|
refs/heads/master
| 2021-01-10T23:08:18.455410
| 2017-02-09T01:42:58
| 2017-02-09T01:42:58
| 70,640,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,683
|
py
|
from __future__ import print_function
import cv2
import numpy as np
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from data_prepare import load_train_data, load_test_data
data_path = '../data/'
# K.set_image_dim_ordering('th') # Theano dimension ordering in this code
img_rows = 64
img_cols = 80
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((img_rows, img_cols, 1))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
pre = Convolution2D(1, 1, 1, init='he_normal', activation='sigmoid')(conv5)
pre = Flatten()(pre)
aux_out = Dense(1, activation='sigmoid', name='aux_output')(pre)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=3)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=3)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=3)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=3)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid',
name='main_output')(conv9)
model = Model(input=inputs, output=[conv10, aux_out])
model.compile(optimizer=Adam(lr=1e-5),
loss={'main_output': dice_coef_loss, 'aux_output': 'binary_crossentropy'},
metrics={'main_output': dice_coef, 'aux_output': 'acc'},
loss_weights={'main_output': 1., 'aux_output': 0.5})
return model
def preprocess(imgs):
imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols, 1),
dtype=np.uint8)
for i in range(imgs.shape[0]):
img = cv2.resize(imgs[i], (img_cols, img_rows))
imgs_p[i] = img.reshape((img.shape[0],img.shape[1],1))
return imgs_p
def mask_exist(mask):
return np.array([int(np.sum(mask[i, 0]) > 0) for i in xrange(len(mask))])
def train_and_predict():
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train, imgs_mask_train = load_train_data()
imgs_train = preprocess(imgs_train)
imgs_mask_train = preprocess(imgs_mask_train)
imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train) # mean for data centering
std = np.std(imgs_train) # std for data normalization
imgs_train -= mean
imgs_train /= std
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_mask_train /= 255. # scale masks to [0, 1]
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
model = get_unet()
model_checkpoint = ModelCheckpoint(data_path+'unet.hdf5', monitor='loss',
save_best_only=True)
print('-'*30)
print('Fitting model...')
print('-'*30)
model.fit(imgs_train, [imgs_mask_train, mask_exist(imgs_mask_train)],
batch_size=32, nb_epoch=20,
verbose=1, shuffle=True, callbacks=[model_checkpoint])
print('-'*30)
print('Loading and preprocessing test data...')
print('-'*30)
imgs_test, imgs_id_test = load_test_data()
imgs_test = preprocess(imgs_test)
imgs_test = imgs_test.astype('float32')
imgs_test -= mean
imgs_test /= std
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model.load_weights(data_path+'unet.hdf5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
imgs_mask_test = model.predict(imgs_test, verbose=1)
np.save(data_path+'imgs_mask_test.npy', imgs_mask_test)
if __name__ == '__main__':
train_and_predict()
|
[
"livenb666@gmail.com"
] |
livenb666@gmail.com
|
820e15a0e02606bf4165dcedc96fab3b641663ef
|
da8a2b9404e6bb9f3d6ca5a786fd01eddf440ec4
|
/lyric/apps.py
|
618476d0dd10601771f2dbee5c71aa66f38b89f0
|
[] |
no_license
|
BrightHao/Django_Music
|
c099e22d4606a8101f1be3be3a9f4e7b2babcc25
|
c31df1a0d3f34594c65bddcc88815e22bc2f0902
|
refs/heads/master
| 2023-03-17T20:01:33.915667
| 2021-03-13T05:37:49
| 2021-03-13T05:37:49
| 347,283,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
from django.apps import AppConfig
class LyricConfig(AppConfig):
name = 'lyric'
|
[
"861759757@qq.com"
] |
861759757@qq.com
|
916795017de2b6b98ecb83425e776e9a8975c090
|
8212c7b8c532681107f735643ab47bb20da177ac
|
/content_indexer/content_indexer.py
|
cb4003e22f16b032b2dee9eb2467f20f7d22044f
|
[] |
no_license
|
jjenner689/Python
|
31458c2ca0b612776387fce3bfd3ad56fd07f031
|
665c25077b86b7c802312d5ef4b1ec740b752f8b
|
refs/heads/master
| 2016-08-10T17:06:34.721622
| 2015-10-05T15:00:24
| 2015-10-05T15:00:24
| 43,438,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,050
|
py
|
import string
import os
"""
Content Indexing Engine Capstone Project
You have a bunch of files in the file system! How can we index these files to make them easily searchable by keyword?
Indexing is a way of moving work 'upfront' so that when the user searches, less work is needed to get them the right search results.
Tips:
Look into array .extend() method
Look into string module , and .punctuation
Look into the set() builtin data type
Example index:
index = {'cat':['filename1','filename2','filename3'],'dog':['filename2',filename3]}
"""
#Tip: upgrade your recursive find code from a previous exercise to return a list of files
def recursive_find(name, index = {}):
array = os.listdir(name)
for i in array:
path = os.path.join(name, i)
if os.path.isdir(path):
recursive_find(path, index)
else:
data_string = read_data(path)
data_string = strip_punctuation(data_string)
data = split_data_string(data_string)
index = add_to_index(data, i, index)
return index
stop_words = ['a','an','and','i']
def read_data(filename):
with open(filename,"r") as f:
return f.read()
def strip_punctuation(data_string):
punctuation = ["\n",",","'","/","\"","?","+","*","(",")","#","!", "-"]
for i in punctuation:
data_string = data_string.replace(i, ' ')
return data_string
def split_data_string(data_string):
data = data_string.split(" ")
data = list(set(data))
data = map(lambda x: x.lower(), data)
if '' in data:
data.remove('')
return data
def add_to_index(words,filename,index):
for i in words:
if i in index:
index[i].append(filename)
else:
index[i] = [filename]
return index
def handle_words(response, index):
words = response.split(' ')
both = ''
if set(words).issubset(set(index)):
for i in range(len(words)-1):
print index[words[i]], '/', index[words[i+1]]
both = list(set(index[words[i]]) & set(index[words[i+1]]))
if both == '':
both = index[words[0]]
print '\n%s found in files %s' % (words, both)
else:
print '\n%s not found....' % list(set(words) - set(index))
def run_interactive():
print '''\n***Welcome to Josh's content index!***\n'''
#index = recursive_find('/home/josh/Desktop/text_files')
response_1 = ''
response_2 = ''
while not os.path.isdir(response_1):
if response_1 == 'q':
return
response_1 = raw_input('Please enter a valid directory to index or press q to quit > ')
index = recursive_find(response_1)
while response_2 != 'q':
response_2 = raw_input('\nEnter the item/s (separated by spaces) you would like to search or press q to quit > ')
if response_2 == 'q':
break
handle_words(response_2, index)
print "\nThankyou for using Josh's content index.......\n"
run_interactive()
|
[
"jjenner689@gmail.com"
] |
jjenner689@gmail.com
|
49d7b8bebbad46079eac386c638ee23415db1cdd
|
0c8f747e59846ddd7d2479930d38db361ff59bd6
|
/secrets.py
|
b6eb1026c3cc29b7a0e1dd2f39b9bbafa41a8228
|
[] |
no_license
|
PaveTranquil/Texity
|
39289163e1c5b4ce4d3dea7d718916db8661890a
|
a4798bbf0cf89496ca12bdb1cd48a3ef9ce4566c
|
refs/heads/main
| 2023-04-13T06:28:32.349938
| 2021-04-27T20:00:52
| 2021-04-27T20:00:52
| 354,387,657
| 3
| 1
| null | 2021-04-06T05:04:37
| 2021-04-03T20:26:48
|
Python
|
UTF-8
|
Python
| false
| false
| 69
|
py
|
with open('apikey.txt') as file:
API_KEY = file.readline().strip()
|
[
"noreply@github.com"
] |
noreply@github.com
|
8c063266cbd431b2d5053fd81731c057bd7c9d32
|
ca1da3b2d522566132ef48319bd2328813a0a0cc
|
/tests/callbacks/test_progress_bar.py
|
f621e7022801269b16d12ae7f3d73f8318b9fa95
|
[
"Apache-2.0"
] |
permissive
|
anthonytec2/pytorch-lightning
|
cc4724c3493e93c6c5b89982abe9caf78a969ee2
|
9759491940c4108ac8ef01e0b53b31f03a69b4d6
|
refs/heads/master
| 2022-11-18T15:48:40.110614
| 2020-07-17T09:54:24
| 2020-07-17T09:54:24
| 273,110,455
| 1
| 0
|
Apache-2.0
| 2021-05-13T20:38:51
| 2020-06-18T01:06:34
|
Python
|
UTF-8
|
Python
| false
| false
| 6,732
|
py
|
import pytest
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ProgressBarBase, ProgressBar, ModelCheckpoint
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
@pytest.mark.parametrize('callbacks,refresh_rate', [
([], 1),
([], 2),
([ProgressBar(refresh_rate=1)], 0),
([ProgressBar(refresh_rate=2)], 0),
([ProgressBar(refresh_rate=2)], 1),
])
def test_progress_bar_on(tmpdir, callbacks, refresh_rate):
"""Test different ways the progress bar can be turned on."""
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=callbacks,
progress_bar_refresh_rate=refresh_rate,
max_epochs=1,
overfit_batches=5,
)
progress_bars = [c for c in trainer.callbacks if isinstance(c, ProgressBarBase)]
# Trainer supports only a single progress bar callback at the moment
assert len(progress_bars) == 1
assert progress_bars[0] is trainer.progress_bar_callback
@pytest.mark.parametrize('callbacks,refresh_rate', [
([], 0),
([], False),
([ModelCheckpoint('../trainer')], 0),
])
def test_progress_bar_off(tmpdir, callbacks, refresh_rate):
"""Test different ways the progress bar can be turned off."""
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=callbacks,
progress_bar_refresh_rate=refresh_rate,
)
progress_bars = [c for c in trainer.callbacks if isinstance(c, ProgressBar)]
assert 0 == len(progress_bars)
assert not trainer.progress_bar_callback
def test_progress_bar_misconfiguration():
"""Test that Trainer doesn't accept multiple progress bars."""
callbacks = [ProgressBar(), ProgressBar(), ModelCheckpoint('../trainer')]
with pytest.raises(MisconfigurationException, match=r'^You added multiple progress bar callbacks'):
Trainer(callbacks=callbacks)
def test_progress_bar_totals(tmpdir):
"""Test that the progress finishes with the correct total steps processed."""
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=1,
limit_val_batches=1.0,
max_epochs=1,
)
bar = trainer.progress_bar_callback
assert 0 == bar.total_train_batches
assert 0 == bar.total_val_batches
assert 0 == bar.total_test_batches
trainer.fit(model)
# check main progress bar total
n = bar.total_train_batches
m = bar.total_val_batches
assert len(trainer.train_dataloader) == n
assert bar.main_progress_bar.total == n + m
# check val progress bar total
assert sum(len(loader) for loader in trainer.val_dataloaders) == m
assert bar.val_progress_bar.total == m
# main progress bar should have reached the end (train batches + val batches)
assert bar.main_progress_bar.n == n + m
assert bar.train_batch_idx == n
# val progress bar should have reached the end
assert bar.val_progress_bar.n == m
assert bar.val_batch_idx == m
# check that the test progress bar is off
assert 0 == bar.total_test_batches
assert bar.test_progress_bar is None
trainer.test(model)
# check test progress bar total
k = bar.total_test_batches
assert sum(len(loader) for loader in trainer.test_dataloaders) == k
assert bar.test_progress_bar.total == k
# test progress bar should have reached the end
assert bar.test_progress_bar.n == k
assert bar.test_batch_idx == k
def test_progress_bar_fast_dev_run(tmpdir):
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
)
progress_bar = trainer.progress_bar_callback
assert 1 == progress_bar.total_train_batches
# total val batches are known only after val dataloaders have reloaded
trainer.fit(model)
assert 1 == progress_bar.total_val_batches
assert 1 == progress_bar.train_batch_idx
assert 1 == progress_bar.val_batch_idx
assert 0 == progress_bar.test_batch_idx
# the main progress bar should display 2 batches (1 train, 1 val)
assert 2 == progress_bar.main_progress_bar.total
assert 2 == progress_bar.main_progress_bar.n
trainer.test(model)
# the test progress bar should display 1 batch
assert 1 == progress_bar.test_batch_idx
assert 1 == progress_bar.test_progress_bar.total
assert 1 == progress_bar.test_progress_bar.n
@pytest.mark.parametrize('refresh_rate', [0, 1, 50])
def test_progress_bar_progress_refresh(tmpdir, refresh_rate):
"""Test that the three progress bars get correctly updated when using different refresh rates."""
model = EvalModelTemplate()
class CurrentProgressBar(ProgressBar):
train_batches_seen = 0
val_batches_seen = 0
test_batches_seen = 0
def on_batch_start(self, trainer, pl_module):
super().on_batch_start(trainer, pl_module)
assert self.train_batch_idx == trainer.batch_idx
def on_batch_end(self, trainer, pl_module):
super().on_batch_end(trainer, pl_module)
assert self.train_batch_idx == trainer.batch_idx + 1
if not self.is_disabled and self.train_batch_idx % self.refresh_rate == 0:
assert self.main_progress_bar.n == self.train_batch_idx
self.train_batches_seen += 1
def on_validation_batch_end(self, trainer, pl_module):
super().on_validation_batch_end(trainer, pl_module)
if not self.is_disabled and self.val_batch_idx % self.refresh_rate == 0:
assert self.val_progress_bar.n == self.val_batch_idx
self.val_batches_seen += 1
def on_test_batch_end(self, trainer, pl_module):
super().on_test_batch_end(trainer, pl_module)
if not self.is_disabled and self.test_batch_idx % self.refresh_rate == 0:
assert self.test_progress_bar.n == self.test_batch_idx
self.test_batches_seen += 1
progress_bar = CurrentProgressBar(refresh_rate=refresh_rate)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[progress_bar],
progress_bar_refresh_rate=101, # should not matter if custom callback provided
limit_train_batches=1.0,
num_sanity_val_steps=2,
max_epochs=3,
)
assert trainer.progress_bar_callback.refresh_rate == refresh_rate
trainer.fit(model)
assert progress_bar.train_batches_seen == 3 * progress_bar.total_train_batches
assert progress_bar.val_batches_seen == 3 * progress_bar.total_val_batches + trainer.num_sanity_val_steps
trainer.test(model)
assert progress_bar.test_batches_seen == progress_bar.total_test_batches
|
[
"noreply@github.com"
] |
noreply@github.com
|
73140cdc70ade106181a0a7092b94bcbb63b6c41
|
4b0ac126af3d635be9d248ed5b2642dfe32b56d0
|
/philips_app_engine/main.py
|
040715b066a101a64a4565b120c2e2f6f29d8966
|
[] |
no_license
|
CosmaTrix/hackathon-git
|
b4d31a91531818fa943796a81def0a9626283e83
|
cfbdf81aa20ff74ce3af3424018f8b90201f5fe4
|
refs/heads/master
| 2020-12-24T17:08:42.627668
| 2015-01-31T23:08:04
| 2015-01-31T23:08:04
| 30,111,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,087
|
py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from urlparse import urljoin
import webapp2
import json
import requests
import settings
GREEN = 21845.
RED = 0
BRIGHTNESS_MAX = 255.
class MainHandler(webapp2.RequestHandler):
def __init__(self, *args, **kwargs):
super(MainHandler, self).__init__(*args, **kwargs)
self.lights = {
0: "http://{0}/api/newdeveloper/lights/1/".format(
settings.PHILIPS_HUE_IP),
1: "http://{0}/api/newdeveloper/lights/3/".format(
settings.PHILIPS_HUE_IP),
2: "http://{0}/api/newdeveloper/lights/2/".format(
settings.PHILIPS_HUE_IP),
}
self.last_light = 2
def __dict_for(self, hue_color, bright):
return {
"on": True,
"sat": 255,
"bri": bright,
"hue": hue_color
}
def __request_dict_from_resp(self, data):
return {
"on": data["state"]["on"],
"sat": data["state"]["sat"],
"br": data["state"]["bri"],
"hue": data["state"]["hue"],
}
def __sequence_lights(self, data):
resp = requests.get(self.lights[1])
data_1 = json.loads(resp.text)
requests.put(urljoin(self.lights[2], 'state'), json.dumps(
self.__request_dict_from_resp(data_1)))
resp = requests.get(self.lights[0])
data_0 = json.loads(resp.text)
requests.put(urljoin(self.lights[1], 'state'), json.dumps(
self.__request_dict_from_resp(data_0)))
requests.put(urljoin(self.lights[0], 'state'), json.dumps(data))
def __turn_lights_off(self):
off_data = json.dumps({"on": False})
requests.put(urljoin(self.lights[0], 'state'), off_data)
time.sleep(0.1)
requests.put(urljoin(self.lights[1], 'state'), off_data)
time.sleep(0.1)
requests.put(urljoin(self.lights[2], 'state'), off_data)
time.sleep(0.1)
def get(self):
fh = open("index.html", "r")
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(fh.read())
def post(self):
jsonstring = self.request.body
data = json.loads(jsonstring)
values = data.get("values", [])
max_impr = 1
max_vol = 1
tmp_impr = []
tmp_vol = []
for value in values:
impressions = value.get("impressions")
tmp_impr.append(impressions)
volume = value.get("volume")
tmp_vol.append(volume)
max_impr = max(max_impr, impressions)
max_vol = max(max_vol, volume)
rate_impr = GREEN / max_impr
list_impr = [int(impr * rate_impr) for impr in tmp_impr]
rate_vol = BRIGHTNESS_MAX / max_vol
list_vol = [int(vol * rate_vol) for vol in tmp_vol]
response = {}
for i in range(len(list_impr)):
json_dict = self.__dict_for(list_impr[i], list_vol[i])
self.__sequence_lights(json_dict)
json_dict["count"] = i
generated = response.get("generated", [])
generated.append(json_dict)
response["generated"] = generated
time.sleep(data.get("interval", 0.5))
self.__turn_lights_off()
self.response.headers['Content-Type'] = 'application/json'
response["status"] = "OK"
self.response.out.write(json.dumps(response))
app = webapp2.WSGIApplication([('/', MainHandler)], debug=True)
|
[
"marco@travelbird.nl"
] |
marco@travelbird.nl
|
3eeca3eaadcaf592565384f53f3160774272b0ca
|
86f5ed0463be0b32508865889f5a77fdda549c22
|
/raspberryPi/ultrasonic_client.py
|
3441acce60eca5f0c6ba2d110d8c8b66f0352044
|
[
"BSD-2-Clause"
] |
permissive
|
pseudoyim/galvaneye
|
e6d7d210bacab5841e98cf275744dcc4ea6454aa
|
c7c1f1bb893fb8fb6f83c6765473d506979ec4b0
|
refs/heads/master
| 2023-01-16T00:43:27.283533
| 2020-11-22T03:09:58
| 2020-11-22T03:09:58
| 70,966,508
| 68
| 25
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
from socket import *
import time
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
# create a socket and bind socket to the host
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(('10.10.10.2', 8002))
def measure():
"""
measure distance
"""
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
start = time.time()
while GPIO.input(GPIO_ECHO)==0:
start = time.time()
while GPIO.input(GPIO_ECHO)==1:
stop = time.time()
elapsed = stop-start
distance = (elapsed * 34300)/2
return distance
# referring to the pins by GPIO numbers
GPIO.setmode(GPIO.BCM)
# define pi GPIO
GPIO_TRIGGER = 16
GPIO_ECHO = 18
# output pin: Trigger
GPIO.setup(GPIO_TRIGGER,GPIO.OUT)
# input pin: Echo
GPIO.setup(GPIO_ECHO,GPIO.IN)
# initialize trigger pin to low
GPIO.output(GPIO_TRIGGER, False)
try:
while True:
distance = measure()
print "Distance : %.1f cm" % distance
# send data to the host every 0.5 sec
client_socket.send(str(distance))
time.sleep(0.5)
finally:
client_socket.close()
GPIO.cleanup()
|
[
"paul.j.yim@gmail.com"
] |
paul.j.yim@gmail.com
|
4a460011144f616f403fdf0cd4870acf4bd66824
|
3bdc38b3ba7bcd87f10f24fdae3832cc8344ffba
|
/douban_spider/doubanspider/douban_image_pipelines.py
|
c34d5d2d875e4668c16b617c5a0411846c223f8e
|
[] |
no_license
|
sheng-jie/Learning.Python
|
b1eee7fe53b0b11b8ca4e2e8716dd6ebb06948d6
|
61f0336c76b9a769021238664142cbc0e4a39d02
|
refs/heads/master
| 2020-03-20T23:01:44.507048
| 2018-06-19T01:46:21
| 2018-06-19T01:46:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
import os
import scrapy
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline
class DoubanImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield scrapy.Request(image_url, meta={'item': item})
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
# os.rename('books/' + image_paths[0], 'books/full/' + item['name'] + '.jpg')
return item
def file_path(self, request, response=None, info=None):
item = request.meta['item']
file_format = request.url.split('.')[-1]
filename = u'full/{0[name]}.{1}'.format(item, file_format)
return filename
|
[
"ysjshengjie@live.cn"
] |
ysjshengjie@live.cn
|
c8df0b5f2035c2386b9918776f917c3effb9da50
|
02fa1542bc428b64da276afdb46f2f2f7199f7a6
|
/DataManager.py
|
ff0a2123a51d8a90d13e0f767a8e98c9f0b921d1
|
[] |
no_license
|
ulissesbcorrea/atae-lstm-theano-modified
|
bb4ba7b9786d00d5bf6da16cbbdd5067e8118e91
|
e3bfa6a74df878ee7eec115c1a6e8fd1c4fcfa46
|
refs/heads/master
| 2022-12-13T16:36:10.256232
| 2020-09-14T09:21:14
| 2020-09-14T09:21:14
| 287,218,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,457
|
py
|
# -*- encoding: utf-8 -*-
import numpy as np
import theano
class Sentence(object):
"""docstring for sentence"""
def __init__(self, content, target, rating, grained):
self.content, self.target = content.lower(), target
self.solution = np.zeros(grained, dtype=theano.config.floatX)
self.senlength = len(self.content.split(' '))
try:
self.solution[int(rating)+1] = 1
except Exception as e:
print 'erro no contrutor de Sentence:'+ str(e)
print 'rating:' + rating
exit()
def stat(self, target_dict, wordlist, grained=3):
data, data_target, i = [], [], 0
solution = np.zeros((self.senlength, grained), dtype=theano.config.floatX)
for word in self.content.split(' '):
data.append(wordlist[word])
# try:
# pol = Lexicons_dict[word]
# solution[i][pol+1] = 1
# except Exception as e:
# print 'error in stat:' + str(e)
# pass
i = i+1
for word in self.target.split(' '):
data_target.append(wordlist[word])
return {'seqs': data, 'target': data_target, 'solution': np.array([self.solution]), 'target_index': self.get_target(target_dict), 'original_text':self.content, 'aspect': self.target}
def get_target(self, dict_target):
return dict_target[self.target]
class DataManager(object):
def __init__(self, dataset, seed, grained=3):
self.fileList = ['train', 'test', 'dev']
self.origin = {}
for fname in self.fileList:
data = []
with open('%s/%s.cor' % (dataset, fname)) as f:
sentences = f.readlines()
for i in xrange(len(sentences)/3):
content, target, rating = sentences[i*3].strip(), sentences[i*3+1].strip(), sentences[i*3+2].strip()
sentence = Sentence(content, target, rating, grained)
data.append(sentence)
self.origin[fname] = data
self.gen_target()
def gen_word(self):
wordcount = {}
def sta(sentence):
for word in sentence.content.split(' '):
try:
wordcount[word] = wordcount.get(word, 0) + 1
except:
wordcount[word] = 1
for word in sentence.target.split(' '):
try:
wordcount[word] = wordcount.get(word, 0) + 1
except:
wordcount[word] = 1
for fname in self.fileList:
for sent in self.origin[fname]:
sta(sent)
words = wordcount.items()
words.sort(key=lambda x:x[1], reverse=True)
self.wordlist = {item[0]:index+1 for index, item in enumerate(words)}
return self.wordlist
def gen_target(self, threshold=5):
self.dict_target = {}
for fname in self.fileList:
for sent in self.origin[fname]:
if self.dict_target.has_key(sent.target):
self.dict_target[sent.target] = self.dict_target[sent.target] + 1
else:
self.dict_target[sent.target] = 1
i = 0
for (key,val) in self.dict_target.items():
if val < threshold:
self.dict_target[key] = 0
else:
self.dict_target[key] = i
i = i + 1
return self.dict_target
def gen_data(self, grained=3):
self.data = {}
for fname in self.fileList:
self.data[fname] = []
for sent in self.origin[fname]:
self.data[fname].append(sent.stat(self.dict_target, self.wordlist))
return self.data['train'], self.data['dev'], self.data['test']
def word2vec_pre_select(self, mdict, word2vec_file_path, save_vec_file_path):
list_seledted = ['']
line = ''
with open(word2vec_file_path) as f:
for line in f:
tmp = line.strip().split(' ', 1)
if mdict.has_key(tmp[0]):
list_seledted.append(line.strip())
list_seledted[0] = str(len(list_seledted)-1) + ' ' + str(len(line.strip().split())-1)
open(save_vec_file_path, 'w').write('\n'.join(list_seledted))
|
[
"ulissesbcorrea@gmail.com"
] |
ulissesbcorrea@gmail.com
|
4f02cd88aa3d26c3be1bbb4b45c2049a6e8a6317
|
9ab9d9a3883471763edbceea59a0e83170581b5f
|
/eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-i686-ucs4.egg/EGG-INFO/scripts/bed_extend_to.py
|
2985cc3497acf222c69151a76b253624baa01752
|
[
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
asmmhossain/phyG
|
24dc211dad5b3e89c87ff384e841f2e98bbd52db
|
023f505b705ab953f502cbc55e90612047867583
|
refs/heads/master
| 2022-11-21T12:43:46.172725
| 2014-02-14T12:33:08
| 2014-02-14T12:33:08
| 13,800,552
| 0
| 1
|
NOASSERTION
| 2020-07-25T21:05:41
| 2013-10-23T11:04:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
#!/afs/bx.psu.edu/project/pythons/linux-i686-ucs4/bin/python2.7
"""
Read BED file and extend each record to the specified minimum length. If chromosome
size information is provided trim extended intervals.
usage: %prog amount [ chrom_file ] < bed_file
"""
import sys
from bx.intervals.io import GenomicIntervalReader
length = int( sys.argv[1] )
chrom_len = None
if len( sys.argv ) > 2:
chrom_len = dict( ( fields[0], int( fields[1] ) ) for fields in map( str.split, open( sys.argv[2] ) ) )
for interval in GenomicIntervalReader( sys.stdin ):
if interval.end - interval.start < length:
start = interval.start
end = interval.end
# Extend in positive direction on strand
if interval.strand == "+":
end = start + length
else:
start = end - length
# Trim
if start < 0:
start = 0
if chrom_len and end > chrom_len[interval.chrom]:
end = chrom_len[interval.chrom]
# Set new start and end
interval.start = start
interval.end = end
# Output possibly adjusted interval
print interval
|
[
"mukarram819@gmail.com"
] |
mukarram819@gmail.com
|
811cf7cfd7af8000864d8ca0048863f2d418d819
|
a9756247d833412e64b18e741c293e7cab9c0b5b
|
/Command Line/enrollment.py
|
3e6491fc442bd5f6d3ef46d8be2a7ca14fa00ac7
|
[] |
no_license
|
guennithegun/Command-Line
|
e44b2b5b7ceac9adc7894f37bf29396b5557dfff
|
7c01985129415a56250fa679c9290c3395bf5158
|
refs/heads/master
| 2020-04-08T04:05:26.323362
| 2018-11-25T05:59:43
| 2018-11-25T05:59:43
| 159,002,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
import pandas as pd
if __name__ == "__main__":
data = pd.read_csv("data/CRDC2013_14.csv", encoding="Latin-1")
cols =["SCH_ENR_HI_M", "SCH_ENR_HI_F", "SCH_ENR_AM_M", "SCH_ENR_AM_F", "SCH_ENR_AS_M", "SCH_ENR_AS_F", "SCH_ENR_HP_M", "SCH_ENR_HP_F", "SCH_ENR_BL_M", "SCH_ENR_BL_F", "SCH_ENR_WH_M", "SCH_ENR_WH_F", "SCH_ENR_TR_M", "SCH_ENR_TR_F"]
data["total_enrollment"] = data["TOT_ENR_M"] + data["TOT_ENR_F"]
sums = {}
for col in cols:
sums[col] = data[col].sum()
all_enrollment = data["total_enrollment"].sum()
gender_race_perc = {}
for col in cols:
gender_race_perc[col] = (sums[col]*100) / all_enrollment
counter = int()
for keys,values in gender_race_perc.items():
print(keys)
print(values)
|
[
"noreply@github.com"
] |
noreply@github.com
|
7ac445a7981cc09e31bfafce07f08ab38310efce
|
2a1146bd74be4ae270bd2dc105e1917aa13a3bfe
|
/gotti/modules/modules.py
|
346c86de735720927666368efd616105af0b2a46
|
[
"MIT"
] |
permissive
|
HellBringerReal/Telegram-Bot
|
0a2721ed04667c6bb1347f7ccdf0e657a360a71a
|
c204de5e8212fd32aaae6afd92c2bc7999457d4f
|
refs/heads/master
| 2023-06-28T20:34:50.909101
| 2021-08-11T20:22:27
| 2021-08-11T20:22:27
| 290,599,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,947
|
py
|
import importlib
from telegram import Bot, Update, ParseMode
from telegram.ext import CommandHandler, run_async
from gotti import dispatcher
from gotti.__main__ import (IMPORTED, HELPABLE, MIGRATEABLE, STATS, USER_INFO, DATA_IMPORT, DATA_EXPORT, CHAT_SETTINGS,
USER_SETTINGS)
from gotti.modules.helper_funcs.chat_status import sudo_plus, dev_plus
@run_async
@dev_plus
def load(bot: Bot, update: Update):
message = update.effective_message
text = message.text.split(" ", 1)[1]
load_messasge = message.reply_text(f"Attempting to load module : <b>{text}</b>", parse_mode=ParseMode.HTML)
try:
imported_module = importlib.import_module("gotti.modules." + text)
except:
load_messasge.edit_text("Does that module even exist?")
return
if not hasattr(imported_module, "__mod_name__"):
imported_module.__mod_name__ = imported_module.__name__
if not imported_module.__mod_name__.lower() in IMPORTED:
IMPORTED[imported_module.__mod_name__.lower()] = imported_module
else:
load_messasge.edit_text("Module already loaded.")
return
if "__handlers__" in dir(imported_module):
handlers = imported_module.__handlers__
for handler in handlers:
if type(handler) != tuple:
dispatcher.add_handler(handler)
else:
handler_name, priority = handler
dispatcher.add_handler(handler_name, priority)
else:
IMPORTED.pop(imported_module.__mod_name__.lower())
load_messasge.edit_text("The module cannot be loaded.")
return
if hasattr(imported_module, "__help__") and imported_module.__help__:
HELPABLE[imported_module.__mod_name__.lower()] = imported_module
# Chats to migrate on chat_migrated events
if hasattr(imported_module, "__migrate__"):
MIGRATEABLE.append(imported_module)
if hasattr(imported_module, "__stats__"):
STATS.append(imported_module)
if hasattr(imported_module, "__user_info__"):
USER_INFO.append(imported_module)
if hasattr(imported_module, "__import_data__"):
DATA_IMPORT.append(imported_module)
if hasattr(imported_module, "__export_data__"):
DATA_EXPORT.append(imported_module)
if hasattr(imported_module, "__chat_settings__"):
CHAT_SETTINGS[imported_module.__mod_name__.lower()] = imported_module
if hasattr(imported_module, "__user_settings__"):
USER_SETTINGS[imported_module.__mod_name__.lower()] = imported_module
load_messasge.edit_text("Successfully loaded module : <b>{}</b>".format(text), parse_mode=ParseMode.HTML)
@run_async
@dev_plus
def unload(bot: Bot, update: Update):
message = update.effective_message
text = message.text.split(" ", 1)[1]
unload_messasge = message.reply_text(f"Attempting to unload module : <b>{text}</b>", parse_mode=ParseMode.HTML)
try:
imported_module = importlib.import_module("gotti.modules." + text)
except:
unload_messasge.edit_text("Does that module even exist?")
return
if not hasattr(imported_module, "__mod_name__"):
imported_module.__mod_name__ = imported_module.__name__
if imported_module.__mod_name__.lower() in IMPORTED:
IMPORTED.pop(imported_module.__mod_name__.lower())
else:
unload_messasge.edit_text("Can't unload something that isn't loaded.")
return
if "__handlers__" in dir(imported_module):
handlers = imported_module.__handlers__
for handler in handlers:
if type(handler) == bool:
unload_messasge.edit_text("This module can't be unloaded!")
return
elif type(handler) != tuple:
dispatcher.remove_handler(handler)
else:
handler_name, priority = handler
dispatcher.remove_handler(handler_name, priority)
else:
unload_messasge.edit_text("The module cannot be unloaded.")
return
if hasattr(imported_module, "__help__") and imported_module.__help__:
HELPABLE.pop(imported_module.__mod_name__.lower())
# Chats to migrate on chat_migrated events
if hasattr(imported_module, "__migrate__"):
MIGRATEABLE.remove(imported_module)
if hasattr(imported_module, "__stats__"):
STATS.remove(imported_module)
if hasattr(imported_module, "__user_info__"):
USER_INFO.remove(imported_module)
if hasattr(imported_module, "__import_data__"):
DATA_IMPORT.remove(imported_module)
if hasattr(imported_module, "__export_data__"):
DATA_EXPORT.remove(imported_module)
if hasattr(imported_module, "__chat_settings__"):
CHAT_SETTINGS.pop(imported_module.__mod_name__.lower())
if hasattr(imported_module, "__user_settings__"):
USER_SETTINGS.pop(imported_module.__mod_name__.lower())
unload_messasge.edit_text(f"Successfully unloaded module : <b>{text}</b>", parse_mode=ParseMode.HTML)
@run_async
@sudo_plus
def listmodules(bot: Bot, update: Update):
message = update.effective_message
module_list = []
for helpable_module in HELPABLE:
helpable_module_info = IMPORTED[helpable_module]
file_info = IMPORTED[helpable_module_info.__mod_name__.lower()]
file_name = file_info.__name__.rsplit("gotti.modules.", 1)[1]
mod_name = file_info.__mod_name__
module_list.append(f'- <code>{mod_name} ({file_name})</code>\n')
module_list = "Following modules are loaded : \n\n" + ''.join(module_list)
message.reply_text(module_list, parse_mode=ParseMode.HTML)
LOAD_HANDLER = CommandHandler("load", load)
UNLOAD_HANDLER = CommandHandler("unload", unload)
LISTMODULES_HANDLER = CommandHandler("listmodules", listmodules)
dispatcher.add_handler(LOAD_HANDLER)
dispatcher.add_handler(UNLOAD_HANDLER)
dispatcher.add_handler(LISTMODULES_HANDLER)
__mod_name__ = "MODULES"
|
[
"noreply@github.com"
] |
noreply@github.com
|
cccee8c95ce17bb44043b1a20a899ac4161055be
|
ee22ec2076a79e8de3011377fe205bc87163ab9f
|
/src/basic-c3/func-let.py
|
8c9c6ff3fea14adfbe60b86692ad4981a5710241
|
[] |
no_license
|
n18018/programming-term2
|
039a95c67372a38a34e2aa8c5975045a9fc731be
|
86c455269eed312def529604e1ac3b00f476226c
|
refs/heads/master
| 2020-03-22T08:59:29.545280
| 2018-08-29T07:57:37
| 2018-08-29T07:57:37
| 139,806,131
| 0
| 0
| null | 2018-07-05T06:42:11
| 2018-07-05T06:42:11
| null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
# 関数を定義
def mul_func(a, b):
return a * b
def div_func(a, b):
return a / b
# mul_func関数を変数に代入
func = mul_func
# 代入した変数で関数を使う
result = func(2, 3)
print(result)
# div_func関数を変数に代入する場合
func2 = div_func
result = func2(10, 5)
print(result)
|
[
"n18018@std.it-college.ac.jp"
] |
n18018@std.it-college.ac.jp
|
614d20e490badf198728acb806f1b442ff8a43b7
|
9ab642dbc8b5409673e5b2f90e009aa4b5634c32
|
/st_network_server.py
|
ea0366083d46d6693412626cfecd9193cb995a67
|
[] |
no_license
|
sunpu/stwhiteboard
|
65c20aab6049acc0ca1b5b1924ae048671e119ee
|
26cb02b593076e65496beca162be24404bee690a
|
refs/heads/master
| 2021-09-07T12:40:49.138494
| 2018-02-23T01:44:37
| 2018-02-23T01:44:37
| 115,606,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,122
|
py
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
import SocketServer
from time import ctime
import json
courseDict = {}
class Course:
def __init__(self, courseID):
self.courseID = courseID
self.clientDict = {}
self.finishList = []
HOST = ''
PORT = 10001
ADDR = (HOST, PORT)
ROOT_PATH = './file/'
class Client(SocketServer.BaseRequestHandler):
role = ''
courseID = 0
bigData = ''
def readDirectory(self, path):
result = []
paths = os.listdir(path)
for i, item in enumerate(paths):
sub_path = os.path.join(path, item)
data = {}
data['name'] = item
timestamp = os.path.getmtime(sub_path)
date = datetime.datetime.fromtimestamp(timestamp)
data['time'] = date.strftime('%Y-%m-%d %H:%M:%S')
if os.path.isdir(sub_path):
data['type'] = 'folder'
data['size'] = '-'
else:
data['type'] = 'file'
fsize = os.path.getsize(sub_path)
fsize = fsize / float(1024)
data['size'] = str(round(fsize,2)) + 'KB'
result.append(data)
json_res = json.dumps(result)
return json_res
def sendMessage(self, data):
self.request.sendall('#*#' + data + '@%@')
def sendHistoryMessage(self):
#print courseDict[self.courseID].finishList
finishList = courseDict[self.courseID].finishList
for index in range(0, len(finishList)):
self.request.sendall('#*#' + finishList[index] + '@%@')
def boardcastMessage(self, data):
#print courseDict
clientDict = courseDict[self.courseID].clientDict
#print clientDict
for k in clientDict.keys():
if k == self.client_address:
continue
#print k, clientDict[k]
#print '---', data
clientDict[k].sendall('#*#' + data + '@%@')
def processData(self, data):
#print '--------------', data
datas = json.loads(data)
if datas['type'] == 'createClient':
self.role = datas['data']['role']
elif datas['type'] == 'createCourse':
self.courseID = datas['data']['courseID']
if courseDict.has_key(self.courseID):
return
course = Course(self.courseID)
courseDict[self.courseID] = course
elif datas['type'] == 'joinCourse':
self.courseID = datas['data']['courseID']
course = courseDict[self.courseID]
course.clientDict[self.client_address] = self.request
self.sendHistoryMessage()
elif datas['type'] == 'setClientAuthority' or datas['type'] == 'finish':
self.boardcastMessage(data)
course = courseDict[self.courseID]
for index in range(0, len(course.finishList)):
#print '---', course.finishList[index]
historyDatas = json.loads(course.finishList[index])
#print 'historyDatas---', historyDatas
#print 'datas---', datas
if historyDatas.has_key('itemID') and datas.has_key('itemID') and historyDatas['itemID'] == datas['itemID'] and historyDatas['subtype'] == datas['subtype']:
course.finishList.remove(course.finishList[index])
break
course.finishList.append(data)
elif datas['type'] == 'realtime':
self.boardcastMessage(data)
elif datas['type'] == 'file':
path = ROOT_PATH
if datas['action'] == 'list':
path += datas['data']['path']
elif datas['action'] == 'new':
path += datas['data']['path']
name = datas['data']['name']
cmd = 'cd %s;mkdir %s;' % (path, name)
os.system(cmd)
elif datas['action'] == 'copy':
path += datas['data']['path']
name = datas['data']['name']
destPath += datas['data']['destPath']
cmd = 'cd %s;cp -rf %s %s;' % (path, name, destPath)
os.system(cmd)
elif datas['action'] == 'move':
path += datas['data']['path']
name = datas['data']['name']
destPath += datas['data']['destPath']
cmd = 'cd %s;mv -rf %s %s;' % (path, name, destPath)
os.system(cmd)
elif datas['action'] == 'del':
path += datas['data']['path']
name = datas['data']['name']
cmd = 'cd %s;rm -rf %s;' % (path, name)
os.system(cmd)
list = self.readDirectory(path)
self.sendMessage(list)
#{"type":"file","action":"list","data":{"path":"/1/2"}}
#{"type":"file","action":"new","data":{"path":"/1/2","name":"xxx"}}
#{"type":"file","action":"copy","data":{"path":"/2","name":"xxx","destPath":"/3"}}
#{"type":"file","action":"move","data":{"path":"/2","name":"xxx","destPath":"/3"}}
#{"type":"file","action":"del","data":{"path":"/2","name":"xxx"}}
def handle(self):
# 客户端登入后,记住
print '...connected from:', self.client_address
while True:
data = self.request.recv(1024 * 1024 * 10)
#print data, 'from', self.client_address
if len(data) == 0:
course = courseDict[self.courseID]
course.clientDict.pop(self.client_address)
break
if data.endswith('@%@'):
if len(self.bigData) > 0:
data = self.bigData + data
self.bigData = ''
data = data.replace('@%@', '')
dataList = data.split('#*#')
for index in range(0, len(dataList)):
if dataList[index]:
self.processData(dataList[index])
else:
self.bigData = self.bigData + data
tcpServ = SocketServer.ThreadingTCPServer(ADDR, Client)
print 'waiting for connection...'
tcpServ.serve_forever()
|
[
"sunpumsn@hotmail.com"
] |
sunpumsn@hotmail.com
|
ae8bf909464124ce2e2e1c318f37dd319d3ef4ac
|
5a99d1f7e0363878a5a94732598410a06008d2ed
|
/multimeter/_tasks.py
|
204079b61c271657c9be403e1e4f4d875d9109f1
|
[] |
no_license
|
av-pavlov/multimeter
|
d8b93e4a6acec420e8593ea8864db9d67ab4a177
|
d8132db1e5e0c3b153ab142599c93b302c18fbf5
|
refs/heads/master
| 2021-04-29T18:44:15.300083
| 2018-03-23T04:38:45
| 2018-03-23T04:38:45
| 121,699,767
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,491
|
py
|
# -*- coding: utf-8 -*=
import sys
import subprocess
from collections import OrderedDict
from os import listdir, stat
from os.path import isdir, join, isfile
from collections import OrderedDict
from .helpers import load_json, save_json, validate_code, check_or_create_dir, load_tests
class Tasks:
""" Массив олимпиадных задач """
def __init__(self, settings, languages):
self._settings = settings
self._languages = languages
self.tasks = OrderedDict()
self.load()
def __len__(self):
return len(self.tasks)
def __setitem__(self, key, value):
self.tasks[key] = value
def __getitem__(self, item):
return self.tasks[item]
def __delitem__(self, key):
del self.tasks[key]
def __contains__(self, item):
return item in self.tasks
def __iter__(self):
return self.tasks.__iter__()
def items(self):
return self.tasks.items()
def keys(self):
return sorted(self.tasks.keys())
def load(self):
""" Загрузить олимпиадные задачи из подкаталогов рабочего каталога """
# Просмотрим подкаталоги рабочего каталога
dirs = sorted(listdir(self._settings.work_dir))
for name in dirs:
path = join(self._settings.work_dir, name)
if isdir(path) and '.' not in path:
try:
self.tasks[name] = Task(name, path)
self.tasks[name].load()
except (TypeError, FileNotFoundError, UnicodeDecodeError):
# Если не удалось прочитать описание задачи - игнорируем этот подкаталог
pass
def save(self):
""" Сохранить описания олимпиадных задач в их подкаталогах """
for task in self.tasks:
task.save()
def get_results(self, task_code, username, attempt=None):
""" Получить результаты проверки решений олимпиадной задачи определенным пользователем """
answer = []
# Начнем просмотр файлов результатов в каталоге .results
results_dir = join(self._settings.work_dir, '.results')
for filename in listdir(results_dir):
# Только только JSON-файлы
if filename[-5:] != '.json':
continue
name = filename[:-5]
(_task_code, _username, _attempt) = name.split('-')
# Только выбранная олимпиадная задача
if task_code != _task_code:
continue
# Только задачи определенного пользователя
if username != _username:
continue
# Если была выбрана попытка, то нужна только определенная попытка
if attempt is not None and attempt != _attempt:
continue
# Прочитаем результат проверки и добавим в номер попытки
res = load_json(filename, {}, results_dir)
res['attempt'] = int(_attempt)
answer.append(res)
return sorted(answer, key=lambda x: x['attempt'])
def validate_task(self, code, data, check_uniqueness):
""" Проверка задания
:param check_uniqueness:
:param data:
:param code:
"""
codes_list = self.tasks if check_uniqueness else []
errors = validate_code(code, codes_list, 'Код задания')
if not data.get('name'):
errors.append('Наименование не задано')
return errors
class TestSuite:
# Стратегия отображения результатов
BRIEF = 'brief'
FULL = 'full'
ERROR = 'error'
RESULTS = (
(BRIEF, 'Отображаются только баллы за подзадачу целом'),
(FULL, 'Отображаются баллы за каждый тест'),
(ERROR, 'Отображаются баллы за подзадачу в целом либо результат первой ошибки'),
)
# Стратегия начисления баллов
PARTIAL = 'partial'
ENTIRE = 'entire'
SCORING = (
(PARTIAL, 'Баллы начисляются пропорционально'),
(ENTIRE, 'Подзадача оценивается как единое целое'),
)
task = None
code = ''
ts_dir = ''
name = ''
results = FULL
scoring = PARTIAL
test_score = 0
total_score = 0
depends = []
def __init__(self, task, code, data):
self.task = task
self.code = code
self.ts_dir = join(task.test_suites_dir, code)
self.name = data['name']
self.scoring = data['scoring']
self.results = data['results']
self.test_score = data.get('test_score', 0)
self.total_score = data.get('total_score', 0)
self.tests = load_tests(self.ts_dir)
self.depends = data.get('depends', self.depends)
class Task:
# Важные атрибуты
code = '' # Код
task_dir = '' # Каталог
# Атрибуты из конфигурационного файла
name = '' # Имя
timeout = 2.0 # Предельное время выполнения в секундах, при превышении - работа программа будет завершена
time_limit = 1.0 # Лимит времени выполнения в секундах, при превышении - вердикт TL
memory_limit = 256 # Лимит по количеству памяти в Мб, при превышении - вердикт ML
input_file = 'input.txt' # Имя выходного файла
output_file = 'output.txt' # Имя выходного файла
# Атрибуты заполняемые из файлов
statement = '' # Условия задачи
preliminary = [] # Список примеров для предварительной проверки решения
test_suites = OrderedDict() # Словарь подзадач, подзадача - это список тестов
def __init__(self, code, task_dir):
"""
Создание задачи по каталогу
:param code: код задачи
:param task_dir: каталог задачи
"""
self.code = code
self.task_dir = task_dir
@property
def brief_name(self):
return '%s. %s' % (self.code, self.name)
@property
def full_name(self):
return 'Задача %s. %s' % (self.code, self.name)
@property
def config_file(self):
return join(self.task_dir, 'task.json')
@property
def statements_file(self):
return join(self.task_dir, 'task.html')
@property
def checker(self):
return join(self.task_dir, 'check.exe')
@property
def solutions_dir(self):
return join(self.task_dir, 'solutions')
@property
def preliminary_dir(self):
return join(self.task_dir, 'tests', 'samples')
@property
def test_suites_dir(self):
return join(self.task_dir, 'tests')
def load(self):
""" Читаем описание задачи из конфигурационных файлов """
# Загружаем атрибуты задачи из конфигурационного файла
config = load_json(self.config_file, {})
if 'name' in config:
self.name = str(config['name'])
if 'timeout' in config:
self.timeout = float(config['timeout'])
if 'time_limit' in config:
self.time_limit = float(config['time_limit'])
if 'memory_limit' in config:
self.memory_limit = float(config['memory_limit'])
if 'input_file' in config:
self.input_file = str(config['input_file'])
if 'output_file' in config:
self.output_file = str(config['output_file'])
if 'test_suites' in config:
tss_from_file = config['test_suites']
if isinstance(tss_from_file, OrderedDict):
self.test_suites = OrderedDict()
for code, ts in tss_from_file.items():
self.test_suites[code] = TestSuite(self, code, ts)
# Загружаем условия задачи
try:
statement = open(self.statements_file, encoding='utf-8')
self.statement = statement.read()
except FileNotFoundError:
# Если файла нет - молча ничего не делаем
pass
# Загружаем примеры
self.preliminary = load_tests(self.preliminary_dir)
def save(self):
""" Сохранение задачи в task.json в каталоге задачи """
keys = ['name', 'brief_name', 'timeout', 'input_file', 'output_file', 'test_suites']
config = dict(zip(keys, [self.__dict__[k] for k in keys]))
save_json(config, self.config_file)
with open(self.statements_file, mode='w', encoding='utf-8') as f:
f.write(self.statement)
f.close()
def verify(self):
if not isdir(self.task_dir):
raise Exception('Task {} folder not found: {}'.format(self.code, self.task_dir))
check_or_create_dir(self.solutions_dir)
check_or_create_dir(self.test_suites_dir)
for test in self.preliminary:
self.verify_test(test)
total_score = 0
for suite_code, suite in self.test_suites.items():
if suite.scoring == TestSuite.ENTIRE:
total_score += suite.total_score
elif suite.scoring == TestSuite.PARTIAL:
total_score += suite.test_score * len(suite.tests)
for test in suite.tests:
self.verify_test(test, suite_code)
if total_score != 100:
raise Exception('Sum of tests score of task {} not equal 100 !!!'.format(self.code))
def verify_test(self, test, suite_code=None):
""" Проверка теста
:param test: имя теста
:param suite_code:
"""
if suite_code is None:
test_name = "Preliminary test {}".format(test)
input_file = join(self.preliminary_dir, test)
else:
test_name = "Test {} in {}".format(test, suite_code)
input_file = join(self.test_suites_dir, suite_code, test)
answer_file = input_file + '.a'
if not isfile(input_file):
raise Exception('{} for task {} not found !!!'.format(test_name, self.code))
if not isfile(answer_file):
raise Exception('{} for task {} don\'t have answer !!!'.format(test_name, self.code))
try:
subprocess.check_call(
[self.checker, input_file, answer_file, answer_file],
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL)
except FileNotFoundError:
raise Exception('Checker for task {} is not found !!!'.format(self.code))
except subprocess.CalledProcessError:
raise Exception('Checker for task {} is not working !!!'.format(self.code))
def check(self):
""" Проверка ответа участника """
answer = '??'
try:
output_file = 'stdout'
if isfile(self.output_file) and stat(self.output_file).st_size > 0:
output_file = self.output_file
subprocess.check_call([
self.checker,
self.input_file,
output_file,
'answer.txt',
])
answer = 'OK'
except subprocess.CalledProcessError as error:
if error.returncode == 1:
answer = 'WA' # Wrong answer
elif error.returncode == 2:
answer = 'PE' # Presentation error
finally:
return answer
|
[
"7911562+av-pavlov@users.noreply.github.com"
] |
7911562+av-pavlov@users.noreply.github.com
|
b6d4b00e9ba7fd2e1ffb15551e74584d5f265b5d
|
ed61c386fbe2ab18a73e6c4ac4c581540638dba6
|
/src/old verion of code/propagation.py
|
d0295ff5fd4ade51e1ce136c9a8ddc81f37f1650
|
[] |
no_license
|
allenqaq/Online-Social-Network
|
5f244cbf3b9f53fe68a926847e485c3a5ab9782e
|
c6f863827521ed787cf0120c3e02ae0485004a6a
|
refs/heads/master
| 2020-03-08T17:34:54.384051
| 2018-12-21T08:42:34
| 2018-12-21T08:42:34
| 128,272,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,024
|
py
|
'''
Created on Mar 5, 2017
@author: allen
'''
from math import sqrt
import numpy as np
# def mean(lst):
# """calculates mean"""
# sum = 0
# for i in range(len(lst)):
# sum += lst[i]
# return (sum / len(lst))
# def stddev(lst):
# """calculates standard deviation"""
# sum = 0
# mn = mean(lst)
# for i in range(len(lst)) :
# sum += pow((lst[i]-mn),2)
# return sqrt(sum/len(lst)-1)
def eccenricity(items) :
if not items :
return 0;
sd = np.std(items)
# print(sd)
if sd == 0 :
return 0
max1 = max(items)
items.remove(max1)
if len(items) != 0 :
max2 = max(items)
else :
max2 = 0
# print((max1 - max2) / sd)
return (max1 - max2) / sd
def matchScores(lgraph, rgraph, mapping, lnode) :
# print('match scores start')
scores = {}
# for i in lgraph.nodes():
# scores[i] = 0
# print(scores)
# scores[lnode] = 0.1
listIn1 = lgraph.in_edges(lnode)
for lnbr in listIn1 :
#lnbr is like (1111, 1112)
if lnbr[0] not in mapping[0].keys() :
continue
rnbr = mapping[0][lnbr[0]]
listOut1 = rgraph.out_edges(rnbr)
for rnode in listOut1 :
#rnode is like (1111, 1112)
# if rnode[1] in mapping[0].keys() or rgraph.in_degree(rnode[1]) > 100 or rgraph.in_degree(rnode[1]) == 1:
if rnode[1] in mapping[0].keys() :
continue
else :
skip = rgraph.in_degree(rnode[1]) - rgraph.in_degree(lnode)
# skip is a degree check supposing 2 mapping mode degree differential is less than 3
if skip > 3 or skip < -3 :
continue
elif rnode[1] in scores.keys() :
scores[rnode[1]] += 1 / sqrt(rgraph.in_degree(rnode[1]))
else :
scores[rnode[1]] = 1 / sqrt(rgraph.in_degree(rnode[1]))
listOut2 = lgraph.out_edges(lnode)
for lnbr in listOut2 :
#lnbr is like (1111, 1112)
if lnbr[1] not in mapping[0].keys() :
continue
rnbr = mapping[0][lnbr[1]]
listIn2 = rgraph.in_edges(rnbr)
for rnode in listIn2 :
#rnode is like (1111, 1112)
# if rnode[0] in mapping[0].keys() or rgraph.out_degree(rnode[0]) > 100 or rgraph.out_degree(rnode[0]) == 1:
if rnode[0] in mapping[0].keys() :
continue
else :
skip = rgraph.out_degree(rnode[0]) - rgraph.out_degree(lnode)
# skip is a degree check supposing 2 mapping mode degree differential is less than 3
if skip > 3 or skip < -3 :
continue
if rnode[0] in scores.keys() :
scores[rnode[0]] += 1 / sqrt(rgraph.out_degree(rnode[0]))
else :
scores[rnode[0]] = 1 / sqrt(rgraph.out_degree(rnode[0]))
# if lnode in scores.keys() and scores[lnode] != 0 :
# print("lnode :"),
# print(lnode)
# print("shoule be scores : ")
# print(scores[lnode])
# print("max id is :"),
# print(max(scores.items(), key=lambda x: x[1])[0])
# print(max(scores.values()))
# print(scores)
# # for (k,v) in scores.items() :
# # if v == max(scores.values()) :
# # print(k),
# # print
# print("--------------------------------------")
return scores
def propagationStep(lgraph, rgraph, mapping) :
scores = {}
node_acount = 0
for lnode in lgraph.nodes() :
node_acount = node_acount + 1
data_len = len(lgraph.nodes())
rate = node_acount * 100.0 / data_len
print('-------'),
print(rate),
print(' %')
if lnode in mapping[0].keys() :
continue
scores[lnode] = matchScores(lgraph, rgraph, mapping, lnode)
# print(scores[lnode])
if eccenricity(scores[lnode].values()) < 0.01 :
# 0.01 is theta, a parameter that controls the tradeoff between the yield and the accuracy.
continue
rnode = max(scores[lnode].items(), key=lambda x: x[1])[0]
scores[rnode] = matchScores(rgraph, lgraph, mapping, rnode)
# no need to invert mapping
if eccenricity(scores[rnode].values()) < 0.01 :
# 0.01 is theta, a parameter that controls the tradeoff between the yield and the accuracy.
continue
reverse_match = max(scores[rnode].items(), key=lambda x: x[1])[0]
print("reverse_match :"),
print(reverse_match)
print("lnode :"),
print(lnode)
# print(scores[rnode][reverse_match])
# print(scores[rnode][lnode])
print("======================================")
if reverse_match != lnode :
continue
else :
mapping[0][lnode] = rnode
|
[
"allenqaq555@gmail.com"
] |
allenqaq555@gmail.com
|
9d58d840d5920137fe056298c28671343069f204
|
ee8ee84343e5efd184e20cb474abdd04425aaf7b
|
/messenger/models.py
|
2992db2413316e882741ef7602bad696b1bf617c
|
[] |
no_license
|
Amrsaeed/codetouch
|
978924b59bb68da05f2307fbc2deed31d10aa927
|
a70c6d6241e57692501034a5bf6f5d5634738ef1
|
refs/heads/master
| 2021-01-22T08:18:00.354453
| 2019-01-14T17:05:07
| 2019-01-14T17:05:07
| 92,609,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Message(models.Model):
message_text = models.CharField(max_length=2000)
sentOn = models.DateTimeField('Sent On')
sender = models.CharField(max_length=150, default='None')
reciever = models.CharField(max_length=150, default='None')
def __str__(self):
return self.message_text
|
[
"amrsaeed@aucegypt.edu"
] |
amrsaeed@aucegypt.edu
|
aea9bbdf52c15b1029619192896682d0da4103c2
|
a4b46342bc37d2d08b19934c5230928575d9cc39
|
/getEmailByRegex.py
|
7ad1f201c2b92c1916a084f220d32e11906a7fd5
|
[] |
no_license
|
yenchenhuang/regex_practice
|
5daec1e13903ad233b2d67dba145593e4a0e7966
|
59f138639f783198d813572f2487566955355d0b
|
refs/heads/master
| 2020-03-26T05:34:41.882199
| 2016-06-22T09:13:52
| 2016-06-22T09:13:52
| 144,563,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
import re
def is_email(input):
pattern = r"([\w._]+)@([\w_\-.]+?)"
match = re.match(pattern, input)
if match:
return True
else:
return False
def get_emails(paragraph):
pattern = r"[\w._]+@[\w_\-.]+"
matches = re.findall(pattern, paragraph)
return matches
def get_accounts(paragraph):
pattern = r"([\w._]+)@[\w_\-.]+"
matches = re.findall(pattern, paragraph)
return matches
|
[
"yenchenhuang@kkbox.com"
] |
yenchenhuang@kkbox.com
|
f8bc29d25b3b055f3a4acbaac21dc04354dc7797
|
e9434af6e0f542769e84a37f15860d3394367068
|
/interpolator.py
|
77a96c0d74c29e27f4c58e234107593baa669a9c
|
[] |
no_license
|
RobinAmsters/benchmarking
|
12ab77451222dfe113c0d4c66375ad5fb03d3122
|
f4d7f6af804645438506a4ee5791eba596ecb52b
|
refs/heads/master
| 2021-05-09T22:27:40.632141
| 2019-03-28T12:45:15
| 2019-03-28T12:45:15
| 118,751,992
| 0
| 2
| null | 2018-01-24T11:18:43
| 2018-01-24T10:48:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,436
|
py
|
import numpy as np
import sys
def transform_to_common(dataset1, dataset2):
"""
Transform both datasets to a common coordinate system, assume both datasets have the same amount of points.
:param dataset1:
:param dataset2:
:return:
"""
keep = np.invert(np.bitwise_or(np.isnan(np.sum(dataset1, axis=1)), np.isnan(np.sum(dataset2, axis=1))))
dataset1 = dataset1[keep]
dataset2 = dataset2[keep]
mean1 = np.mean(dataset1, axis=0)
mean2 = np.mean(dataset2, axis=0)
zeroAvg1 = dataset1 - mean1
zeroAvg2 = dataset2 - mean2
R = extract_rotation_matrix(zeroAvg1, zeroAvg2)
tranformed1 = np.dot(R, zeroAvg1.transpose()).transpose()
translation = np.mean(dataset2 - np.dot(R, dataset1.transpose()).transpose(), axis=0)
return tranformed1, zeroAvg2, R, translation
def extract_rotation_matrix(zeroAveragedPositions1, zeroAveragedPositions2):
"""
Find the rotation matrix between two point clouds with a mean position of 0. The rotatation matrix is defined as follows:
R . zeroAveragedPositions1 == zeroAveragedPositions2 + epsilon
:param zeroAveragedPositions1: The first point cloud
:param zeroAveragedPositions2: The second point cloud
:return: The rotation matrix
"""
u, s, v_trans = np.linalg.svd(np.dot(zeroAveragedPositions1.transpose(), zeroAveragedPositions2))
d = np.linalg.det(np.dot(v_trans.transpose(), u.transpose()))
R = np.dot(np.dot(v_trans.transpose(), np.array([[1, 0, 0], [0, 1, 0], [0, 0, d]])), u.transpose())
return R
def find_pose(originalPositions, movedPositions):
"""
Find the pose matrix for a given set of marker positions on the robot and the coordinates of these points for which to calculate the pose matrix.
:param originalPositions: The positions of the markers on the robot (rows are different markers).
:param movedPositions: The moved positions (rows are different markers).
:return: The pose matrix.
"""
if len(originalPositions) != len(movedPositions):
raise Exception(
"To find the pose of the robot, the same amount of markers have to be defined as the amount of measured markers")
indices = np.invert(np.isnan(np.sum(movedPositions, axis=1)))
if sum(indices) < 3:
return None
originalPositions = np.array(originalPositions)[indices, :]
movedPositions = movedPositions[indices, :]
zeroAvg1 = originalPositions - np.mean(originalPositions, axis=0)
zeroAvg2 = movedPositions - np.mean(movedPositions, axis=0)
R = extract_rotation_matrix(zeroAvg1, zeroAvg2)
transformed_input = np.dot(R, originalPositions.transpose()).transpose()
meanVec = np.mean(transformed_input, axis=0)
meanPos = np.mean(movedPositions, axis=0) - meanVec
meanPos.shape = (3, 1)
pose = np.block([[R, meanPos], [0, 0, 0, 1]])
error = 0
for i in range(originalPositions.shape[0]):
error += np.sum(np.abs(movedPositions[i, :] - np.dot(pose, np.append(originalPositions[i, :], 1))[:3]))
if error > 30:
return None
return np.block([[R, meanPos], [0, 0, 0, 1]]) # TODO: Not sure about block
def overlap_datasets(markerTimePoints, transformedMarkerPositions, evaluationTrack, referenceFramerate=1/50):
"""
:param initialMarkerPositions:
:param markerTracks: The measured positions of the different markers, the measurements are assumed to be at a constant rate.
The timestamps are assumed to be equal among the different markers.
:param initialEvaluationPosition:
:param evaluationTrack:
:return:
"""
evaluationTrack[:, 0] -= evaluationTrack[0, 0]
kryptonIndices = get_virtual_indices(referenceFramerate, evaluationTrack[:, 0])
return timeShift(evaluationTrack, kryptonIndices, transformedMarkerPositions, markerTimePoints)
def get_transformed_marker_position(initialEvaluationPosition, initialMarkerPositions, markerTracks):
transformedMarkerPositions = list()
for i in range(markerTracks.shape[1]):
pose = find_pose(initialMarkerPositions, markerTracks[:, i, 1:])
if pose is not None:
transformedMarkerPositions.append(np.dot(pose, np.append(initialEvaluationPosition, 1))[:3])
else:
transformedMarkerPositions.append(np.array([np.nan, np.nan, np.nan]))
transformedMarkerPositions = np.array(transformedMarkerPositions)
return transformedMarkerPositions
def get_transformed_vive_position(initialEvaluationPosition, initialVivePosition, viveMatrix):
transformedVivePositions = list()
for i in range(len(viveMatrix)):
R = viveMatrix[i][0][:, :3]
p = viveMatrix[i][0][:, 3] * 1000
originP = p - np.dot(R, initialVivePosition) + initialVivePosition
transformedVivePositions.append(originP + np.dot(R, initialEvaluationPosition))
return transformedVivePositions
def timeShift(evaluationTrack, kryptonIndices, transformedMarkerPositions, markerTimePoints):
"""
Find how much the evaluation track is shifted in time versus the reference (transformedMarkerPOsitions)
:param evaluationTrack: numpy ndarray containing the positions of the system to evaluate: [[time, X, Y, Z]]
:param kryptonIndices: a list of numbers containing the matching index in transformedMarkerPositions for each point in the evaluationtrack.
:param transformedMarkerPositions: the positions of the measurement system to evaluate according to the Krypton system.
:param markerTimePoints: the timestamps of transformedMarkerPositions
:return:
"""
minOverlap = 5 # seconds of overlap TODO: figure out realistic overlap
minErr = sys.float_info.max
finalOffset = 0
finalR = None
finalTranslation = None
minOffset = - kryptonIndices[np.searchsorted(evaluationTrack[:, 0], evaluationTrack[-1, 0] - minOverlap)]
maxOffset = np.searchsorted(markerTimePoints, markerTimePoints[-1] - minOverlap)
offsets = range(minOffset, maxOffset)
for offset in offsets:
start = np.searchsorted(kryptonIndices + offset, 0, side='right')
end = np.searchsorted(kryptonIndices + offset, len(transformedMarkerPositions))
markerPoints = transformedMarkerPositions[kryptonIndices[start:end] + offset]
evaluationPoints = evaluationTrack[start:end]
evaluationTransformed, markerTransformed, R, translation = transform_to_common(evaluationPoints[:, 1:],
markerPoints)
error = error_func(markerTransformed, evaluationTransformed)
if error < minErr:
minErr = error
finalOffset = offset
finalR = R
finalTranslation = translation
return finalOffset, finalR, finalTranslation
def get_virtual_indices(kryptonRate, timestamps):
return np.array(map(lambda a: int(round(a * kryptonRate)), timestamps))
def error_func(referenceSet, evaluationSet):
refMean = np.mean(referenceSet, axis=0)
variance = np.sum((referenceSet - refMean) ** 2)
if variance ** 0.5 < 2000: # guarantee at least 5m of movement within interval.
return sys.float_info.max
return float(np.sum(np.abs(referenceSet - evaluationSet))) / len(referenceSet)
if __name__ == '__main__':
c = np.array([[1, 2, 5], [3, 5, 4], [10, 8, 9]])
m = np.array([[2, -1, 5], [5, -3, 4], [8, -10, 9]])
pose = find_pose(c, m)
print(pose)
|
[
"quinten.lauwers1@student.kuleuven.be"
] |
quinten.lauwers1@student.kuleuven.be
|
1908dbef5523b4c7baf3492790e727cfa3424bd7
|
13a1fae2f825f6c16fa15e47556e4e0b33da15b2
|
/stepik course/stepik_3_1_1.py
|
f1d88368217a5e569d6bd9ca3bbf9a90f41a9d1a
|
[] |
no_license
|
kubenet/pyLearn
|
98a4a2c3243f2c03d00af82451863d65a8a38350
|
3d850de58ba525040c1f72730dcd31368766af53
|
refs/heads/master
| 2020-07-20T21:13:14.266289
| 2020-03-10T15:53:55
| 2020-03-10T15:53:55
| 206,710,216
| 1
| 0
| null | 2019-09-06T04:05:23
| 2019-09-06T04:05:22
| null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
# Напишите функцию f(x), которая возвращает значение следующей функции, определённой на всей числовой прямой:
# 1−(x+2)^2, при x≤−2
# -x/2, при −2<x≤2
# ((x−2)^2)+1, при 2<x
#
# Требуется реализовать только функцию, решение не должно осуществлять операций ввода-вывода.
# Sample Input 1:
# 4.5
# Sample Output 1:
# 7.25
# Sample Input 2:
# -4.5
# Sample Output 2:
# -5.25
# Sample Input 3:
# 1
# Sample Output 3:
# -0.5
def f(x):
if x > 2:
return ((x-2)**2)+1
elif x <= -2:
return 1-(x+2)**2
elif (x <= 2) or (-2 < x):
return -x/2
print(f(4.5))
print(f(-4.5))
print(f(1))
|
[
"kubenet@gmail.com"
] |
kubenet@gmail.com
|
b04bcc13154c9db03c343251a42fa7662c29b486
|
52c737c2f062f7b07e35ab88e9ea042337463253
|
/Weibo_v3/weibo_auto_handle.py
|
b1c5aade7198eb39ae4667657b948245d1f5a156
|
[] |
no_license
|
LichMscy/Weibo
|
b704779cdfe8c52063bb3c8e2d4e611ffd47bb37
|
6d3ed1e9709ef48c4627dfc69fd9ee9cbe7e628b
|
refs/heads/master
| 2021-01-18T17:26:20.351718
| 2017-04-01T02:47:49
| 2017-04-01T02:47:49
| 86,798,194
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,101
|
py
|
# -*-coding: utf-8 -*-
import re
import time
import datetime
import logging
import json
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
import requests
from bs4 import BeautifulSoup
from util import parsedate
import persist_iics
logging.basicConfig(level=logging.INFO, format="[%(asctime)s]%(name)s:%(levelname)s:%(message)s")
logger = logging.getLogger(__name__)
logging.getLogger("selenium").setLevel(logging.WARNING)
def init_phantomjs_driver():
headers = {
'Cookie': 'YF-Ugrow-G0=b02489d329584fca03ad6347fc915997; SUB=_2AkMvgPj2dcPxrAFYnPgWyGvkZYpH-jycVZEAAn7uJhMyOhgv7nBSqSVOKynW2PbhU4768kfRGZgNPwXeRA..; SUBP=0033WrSXqPxfM72wWs9jqgMF55529P9D9WWEFXHsNpvgJdQjr1GM.e765JpVF020SKM7e0571hMc',
}
for key, value in headers.items():
webdriver.DesiredCapabilities.PHANTOMJS['phantomjs.page.customHeaders.{}'.format(key)] = value
useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36'
webdriver.DesiredCapabilities.PHANTOMJS['phantomjs.page.settings.userAgent'] = useragent
# local path refer phantomjs
driver = webdriver.PhantomJS(executable_path='xxxxx')
driver.set_window_size(1366, 768)
return driver
def update_cookies():
p1 = persist_iics.Persist()
accounts = p1.query_account()
cookie = json.loads(accounts[0][3])
req = requests.Session().get('http://weibo.cn/', cookies=cookie)
if re.findall('登录|注册', req.text, re.S):
logging.error('Account cookies out of date! (Account_id: %s)' % accounts[0][0])
browser = init_phantomjs_driver()
try:
browser.get("http://weibo.com")
time.sleep(3)
failure = 0
while "微博-随时随地发现新鲜事" == browser.title and failure < 5:
failure += 1
username = browser.find_element_by_name("username")
pwd = browser.find_element_by_name("password")
login_submit = browser.find_element_by_class_name('W_btn_a')
username.clear()
username.send_keys(accounts[0][1])
pwd.clear()
pwd.send_keys(accounts[0][2])
login_submit.click()
time.sleep(5)
# if browser.find_element_by_class_name('verify').is_displayed():
# logger.error("Verify code is needed! (Account: %s)" % account)
if "我的首页 微博-随时随地发现新鲜事" in browser.title:
browser.get('http://weibo.cn/')
cookies = dict()
if "我的首页" in browser.title:
for elem in browser.get_cookies():
cookies[elem["name"]] = elem["value"]
p2 = persist_iics.Persist()
p2.save_account_cookies(accounts[0][0], cookies, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
logging.error('Account cookies updated! (Account_id: %s)' % accounts[0][0])
return cookies
except:
logger.error("Weibo Login Unknown exception!")
raise
else:
return cookie
def snatch_news_info(cookies):
p1 = persist_iics.Persist()
p1_result = p1.query_task()
ids = re.findall('weibo.com/(.*?)/(.*?)[?]', p1_result[0][1], re.S)[0]
if ids and ids[0] and ids[1]:
url = 'http://weibo.cn/comment/{}?uid={}'.format(ids[1], ids[0])
req = requests.get(url, cookies=cookies)
while req.status_code != 200:
logging.error('Snatch (Task_id: %s) failed!' % p1_result[0][0])
exit()
soup = BeautifulSoup(req.text, 'lxml')
item = soup.select('span.ctt')[0]
dic = dict()
dic['platform_id'] = 2
dic['media_name'] = '新浪微博'
dic['title'] = item.get_text()[1:23] + '...'
dic['summary'] = item.get_text()[1:290]
dic['src_url'] = result[0][1]
dic['task_id'] = result[0][0]
dic['comment_num'] = ''.join(re.findall('\">\s评论\[(.*?)\]\s<', soup.extract().decode(), re.S))
like = re.findall('\">赞\[(.*?)\]<', soup.extract().decode(), re.S)
if like:
dic['like_num'] = like[0]
dic['forward_num'] = ''.join(re.findall('\">转发\[(.*?)\]<', soup.extract().decode(), re.S))
create_time = soup.select('span.ct')[0].get_text().split('\xa0来自')[0]
dic['create_time'] = parsedate.parse_date(create_time)
p2 = persist_iics.Persist()
p2.insert_news(dic)
logging.info('Snatch wb news success! (Task_id: %s)' % p1_result[0][0])
p3 = persist_iics.Persist()
p3.update_task_status(p1_result[0][0])
logging.error('Snatch (Task_id: %s) failed! Updated status!' % p1_result[0][0])
def comment_prepare():
# TODO: query comment list from db.
comment_list = tuple()
p1 = persist_iics.Persist()
result = p1.query_task()
ids = re.findall('weibo.com/(.*?)/(.*?)[?]', result[0][1], re.S)[0]
url = 'http://weibo.cn/comment/{}?uid={}'.format(ids[1], ids[0])
result = dict()
result['comment'] = comment_list
result['url'] = url
return result
def comment(weibo, wb_content, wb_comment_url):
code = 1
account = weibo['usn']
password = weibo['pwd']
# service_args = [
# '--proxy=127.0.0.1:9999',
# '--proxy-type=http',
# '--ignore-ssl-errors=true'
# ]
browser = init_phantomjs_driver()
try:
browser.get("http://weibo.com")
time.sleep(3)
# browser.save_screenshot("weibocom.png")
failure = 0
while "微博-随时随地发现新鲜事" == browser.title and failure < 5:
failure += 1
username = browser.find_element_by_name("username")
pwd = browser.find_element_by_name("password")
login_submit = browser.find_element_by_class_name('W_btn_a')
username.clear()
username.send_keys(account)
pwd.clear()
pwd.send_keys(password)
login_submit.click()
time.sleep(5)
# if browser.find_element_by_class_name('verify').is_displayed():
# logger.error("Verify code is needed! (Account: %s)" % account)
if "我的首页 微博-随时随地发现新鲜事" in browser.title:
browser.get(wb_comment_url)
comment_avatar = browser.find_element_by_xpath("//div/a[@href='http://weibo.com/']")
comment_avatar.send_keys(Keys.TAB, wb_content)
time.sleep(5)
comment_submit = browser.find_element_by_xpath("//a[@class='W_btn_a']")
comment_submit.click()
time.sleep(5)
code = 0
except:
logger.error("weibo comment Unknown exception!")
raise
return code
# if __name__ == '__main__':
# print(comment({'usn': 'xxxxx', 'pwd': 'xxxxx'}, '死...死...死狗一', 'http://weibo.com/xxxxx/xxxxx'))
|
[
"jraqi1994@gmail.com"
] |
jraqi1994@gmail.com
|
59aed57ba62f13ab930efe93f2f9d078ea11c2d3
|
f144f0f34227acf229a991d09db18281cd6b5ac6
|
/.c9/metadata/environment/abscences/models.py
|
269d869f41bb85e539bc1b9b841e06464feed82d
|
[] |
no_license
|
KikiDow/TUD_HDP_PSE3S
|
42dde541a6d65d0c764def842a1e2732fec740ac
|
cd21c27651e442e1ed84518c1d567f1d52f215c0
|
refs/heads/main
| 2023-04-22T23:25:25.966531
| 2021-05-09T14:10:01
| 2021-05-09T14:10:01
| 313,276,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
{"filter":false,"title":"models.py","tooltip":"/abscences/models.py","undoManager":{"mark":2,"position":2,"stack":[[{"start":{"row":8,"column":55},"end":{"row":8,"column":56},"action":"insert","lines":["/"],"id":2},{"start":{"row":8,"column":56},"end":{"row":8,"column":57},"action":"insert","lines":["/"]}],[{"start":{"row":8,"column":56},"end":{"row":8,"column":57},"action":"remove","lines":["/"],"id":3}],[{"start":{"row":8,"column":55},"end":{"row":8,"column":56},"action":"remove","lines":["/"],"id":4}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":8,"column":55},"end":{"row":8,"column":55},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1620153966673,"hash":"c83b2801de5e5f3fcb36b6f66f5a7f588f9bceb1"}
|
[
"ec2-user@ip-172-31-0-153.ec2.internal"
] |
ec2-user@ip-172-31-0-153.ec2.internal
|
b477a9cee5d1d50b4effb6e86d254fa10629c6f1
|
d63811f9944dead8a745a46e1382f64800c72c5e
|
/linuxYazKampı/sonuç/petimebak/adverts/views.py
|
ea08662d7d76a0d3d26ba3224ffcc6795ba666ad
|
[] |
no_license
|
Arciles/Notes
|
9dd77425209b9a10a6503dcd27a5c48c9666c35b
|
095e361bdb11ca72c3bff801ed4a9b938827c84a
|
refs/heads/master
| 2020-12-11T05:46:11.416975
| 2014-11-22T12:08:37
| 2014-11-22T12:08:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,924
|
py
|
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from adverts.forms import AdvertCreationForm, PhotoCreationForm
from adverts.models import Advert
from messages.forms import NewMessageForm
def home(request):
adverts = (Advert.objects
.filter(is_published=True, end_date__gte=datetime.now())
.order_by("-date_created"))
return render_to_response("index.html", {
"adverts": adverts
}, RequestContext(request))
@login_required
def new_advert(request):
form = AdvertCreationForm()
success = False
if request.method == "POST":
form = AdvertCreationForm(request.POST)
if form.is_valid():
form.instance.user = request.user
form.save()
success = True
return render_to_response("new_advert.html", {
"form": form,
"success": success,
}, RequestContext(request))
def detail_advert(request, pk):
advert = get_object_or_404(Advert, id=pk)
message_sent = request.GET.get("message_sent")
form = NewMessageForm()
return render_to_response("detail.html", {
"advert": advert,
"form": form,
"message_sent": message_sent
}, RequestContext(request))
def photo_add(request, pk):
advert = get_object_or_404(Advert, id=pk)
form = PhotoCreationForm()
if request.method == "POST":
form = PhotoCreationForm(request.POST, request.FILES)
if form.is_valid():
form.instance.advert = advert
form.save()
return redirect(reverse('detail_advert', args=[pk]))
return render_to_response("photo_add.html", {
"advert": advert,
"form": form,
}, RequestContext(request))
|
[
"salihklc91@gmail.com"
] |
salihklc91@gmail.com
|
a852d4447c5f5e6261198b26f9281cce50269c1c
|
d393e865b83edc1b83fe80b716775b8036c51af4
|
/Preprocessing.py
|
40094cabfa374078ce04b111e05fd9cab6011fa5
|
[] |
no_license
|
dwaydwaydway/KKStream-Deep-Learning-Workshop
|
1137724577c46b9d0a039a8b64f84abf0a0ea91f
|
7c9da5114b6901052d479228fa40a5628e646e25
|
refs/heads/master
| 2020-05-25T04:44:29.691746
| 2020-03-11T09:25:25
| 2020-03-11T09:25:25
| 187,633,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,497
|
py
|
import numpy as np
import pandas as pd
import multiprocessing as mp
from datetime import datetime
import tqdm
import pickle
import csv
import math
import warnings
import json
warnings.filterwarnings("ignore")
def main(config):
pool = mp.Pool(processes=config["n_workers"])
pool.map(job, range(1, 76))
# Return one hot encoding of length = base and the nth element = 1
def make_onehot(n, base):
if n == -1 :
return list(np.zeros(base))
onehot = np.zeros(base)
onehot[n-1] = 1
return list(onehot)
# Return the time slot number of the given datatime boject
def is_time_slot(datetime, time_slot_comp):
slot = 0
for i in range(4):
if datetime.time() >= time_slot_comp[i][0] and datetime.time() < time_slot_comp[i][1]:
slot = i
return slot
# Return [week, time slot, time slot of the week]
def time_slot(datetime, begin_datetime, end_datetime, time_slot_comp):
if datetime < begin_datetime or datetime > end_datetime:
return [-1, -1, -1]
diff = datetime - begin_datetime
time_slot = is_time_slot(datetime, time_slot_comp)
return [int(diff.days / 7), diff.days*4 + time_slot, datetime.weekday()*4 + time_slot]
# Return the scaleed played duration value
def scale_played_duration(n):
return math.log(1 + n / 9550675.0)
# Return the Processed data
def prepare_data(row, platform):
temp = list()
# platform
temp += make_onehot(platform[row[4]], 3)
# connection type
if 'wifi' in row[7]:
connect = make_onehot(0, 3)
elif 'cellular' in row[7]:
connect = make_onehot(1, 3)
elif 'online' in row[7]:
connect = make_onehot(2, 3)
else:
connect = make_onehot(-1, 3)
temp += connect
# watch ratio
temp.append(float(row[5]) / (float(row[6]) + 1e-10))
# total number of episode
temp.append(math.log(1 + float(row[5]) / 210.0))
# limit playzone countdown
temp.append(1 if 'limit playzone countdown' in row[3] else 0)
# error
temp.append(1 if 'error' in row[3] else 0)
# (video ended, program stopped or enlarged-reduced, program stopped)
temp.append(1 if 'ed' in row[3] else 0)
# played duration of popular title
temp.append(math.log(1 + float(row[2]) / 5224.0) if int(row[0]) in [74, 79, 77] else 0)
return temp
# Extact selected features from data files(This part could be a bit messy)
def job(k):
with open("preprocessing_config.json") as f:
config = json.load(f)
begin_datetime = datetime.strptime('2017-01-02 01:00:00.00', '%Y-%m-%d %H:%M:%S.%f')
end_datetime = datetime.strptime('2017-08-14 01:00:00.00', '%Y-%m-%d %H:%M:%S.%f')
time_slot_comp = [[datetime.strptime('01:00:00', '%H:%M:%S').time(), datetime.strptime('09:00:00', '%H:%M:%S').time()],
[datetime.strptime('09:00:0', '%H:%M:%S').time(), datetime.strptime('17:00:00', '%H:%M:%S').time()],
[datetime.strptime('17:00:0', '%H:%M:%S').time(), datetime.strptime('21:00:00', '%H:%M:%S').time()],
[datetime.strptime('21:00:00', '%H:%M:%S').time(), datetime.strptime('01:00:00', '%H:%M:%S').time()]]
platform = { 'Web': 0, 'iOS': 1, 'Android': 2 }
file = pd.read_csv((config["data_folder"] + "/data-0{:0>2d}.csv").format(k))
n = file.groupby(['user_id']).size().values
file = file.drop(columns=['user_id', 'device_id', 'session_id', 'is_trailer'])
file = np.split(np.asarray(file.values), np.add.accumulate(n), axis=0)[:len(n)]
pad = config["pad_token"]
collect = []
for idx in file:
person_data = np.ones((32, 28, 13)) * pad
week_data = np.ones((28, 13)) * pad
time_slot_data = []
watch_time_sum = 0
prev = time_slot(datetime.strptime(idx[0][1], '%Y-%m-%d %H:%M:%S.%f'), begin_datetime, end_datetime, time_slot_comp)
for row in idx:
now = time_slot(datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S.%f'), begin_datetime, end_datetime, time_slot_comp)
if prev[0] == -1:
prev = now
if now[0] == -1:
continue
if now[1] == prev[1]:
time_slot_data.append(prepare_data(row, platform))
watch_time_sum += float(row[2])
else:
time_slot_data = np.mean(time_slot_data, axis=0).tolist()
time_slot_data.append(scale_played_duration(watch_time_sum))
week_data[prev[2]] = time_slot_data
time_slot_data = []
if prev[0] < now[0]:
person_data[prev[0]] = week_data
week_data = week_data * 0 + pad
time_slot_data.append(prepare_data(row, platform))
watch_time_sum += float(row[2])
prev = now
if len(time_slot_data) != 0:
time_slot_data = np.mean(time_slot_data, axis=0).tolist()
time_slot_data.append(scale_played_duration(watch_time_sum))
week_data[prev[2]] = time_slot_data
person_data[prev[0]] = week_data
collect.append(person_data)
with open((config["preprocessed_data_folder"] +"/data-00{:0>2d}_preprocessed.pkl").format(k), 'wb') as handle:
pickle.dump(collect, handle)
if __name__ == '__main__':
with open("preprocessing_config.json") as f:
config = json.load(f)
main(config)
|
[
"tim8733123@gmail.com"
] |
tim8733123@gmail.com
|
80e5198c86ed27d6c8417c69bd3be133fb25d9a9
|
87738f6f80f4fe03ac15d6de02b9b51f4e340c4b
|
/Part 3 - Classification/Section 10 - Support Vector Machine (SVM)/svm.py
|
d94e5cd0a3aee556681f4db83424c2c51038813a
|
[
"MIT"
] |
permissive
|
Nikronic/Machine-Learning-Models
|
b4209dcc5f5a428da9447bc40fc93b9f0e171880
|
9fb48463ee2211eec800b2436699508f55d5ee28
|
refs/heads/master
| 2021-11-10T14:50:56.425443
| 2021-11-07T15:42:48
| 2021-11-07T15:42:48
| 142,788,604
| 34
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,824
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 23:26:31 2018
@author: Mohammad Doosti Lakhani
"""
# Imporing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
x = dataset.iloc[:,2:4].values
y = dataset.iloc[:,4].values
# Feature scaling
from sklearn.preprocessing import StandardScaler
standardscaler_x = StandardScaler()
standardscaler_x = standardscaler_x.fit(x)
x = standardscaler_x.transform(x)
# Splitting dataset into Train set and Test set
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y, train_size = 0.75 , random_state=0)
# Fitting the K-Nearest Neighbors model to the train set
from sklearn.svm import SVC
classifier = SVC(kernel='rbf', random_state=0)
classifier = classifier.fit(x_train,y_train)
""" Try to uncomment below code and see the visualization output (Try different kernels)"""
"""
classifier = SVC(kernel='poly', degree = 3, random_state=0)
classifier = classifier.fit(x_train,y_train)
classifier = SVC(kernel='linear', random_state=0) ## (Equals to SVR)
classifier = classifier.fit(x_train,y_train)
classifier = SVC(kernel='sigmoid', random_state=0)
classifier = classifier.fit(x_train,y_train)
"""
# Make the prediction on train set
y_train_pred = classifier.predict(x_train)
# Make the prediction on train set
y_test_pred = classifier.predict(x_test)
# Acurracy on test and train set
from sklearn.metrics import confusion_matrix
cm_train = confusion_matrix(y_train,y_train_pred)
cm_test = confusion_matrix(y_test,y_test_pred)
import os
import sys
scriptpath = "../../Tools" # functions of acc and CAP
# Add the directory containing your module to the Python path
sys.path.append(os.path.abspath(scriptpath))
import accuracy as ac
t_train,f_train,acc_train = ac.accuracy_on_cm(cm_train)
print('Train status = #{} True, #{} False, %{} Accuracy'.format(t_train,f_train,acc_train*100))
t_test,f_test,acc_test = ac.accuracy_on_cm(cm_test)
print('Test status = #{} True, #{} False, %{} Accuracy'.format(t_test,f_test,acc_test*100))
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = x_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM - rbf kernel (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = x_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM - rbf kernel (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
[
"nikan.doosti@outlook.com"
] |
nikan.doosti@outlook.com
|
69163c15593175ec5108d8614e170be4b086b0cc
|
d5a84ba1417d59d6b8eff26124a37ba7186d7e33
|
/test_calculator.py
|
a303fcf1954dd8cadf456028cef326d495242bd5
|
[] |
no_license
|
jonathanzerox/tdd-python
|
7c4ce49acb6eec562c9a382e9aab83cd1963aa23
|
cd718ecdd646046dd0e2d437d050d75f16378280
|
refs/heads/master
| 2021-08-23T17:39:47.264755
| 2017-12-05T22:54:25
| 2017-12-05T22:54:25
| 113,240,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
import unittest
from calculator import Calculator
class TestCalculator(unittest.TestCase):
def SetUp():
print("Setting things up")
def TearDown():
print("Releasing allocated resources back")
def test_addition(self):
calc = Calculator()
self.assertEqual(4, calc.add(2, 2))
def test_multiplication(self):
calc = Calculator()
self.assertEqual(8, calc.mul(4, 2))
if __name__ == '__main__':
unittest.main()
|
[
"jonathanzerox@hotmail.com"
] |
jonathanzerox@hotmail.com
|
2a1f5e4881e26548e1ce7fdb9043a6c590f91749
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/devtestlab/latest/get_lab.py
|
f41e214f30cdd3cfa752fbedcad88a1a4eccd182
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 16,776
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetLabResult',
'AwaitableGetLabResult',
'get_lab',
]
@pulumi.output_type
class GetLabResult:
"""
A lab.
"""
def __init__(__self__, announcement=None, artifacts_storage_account=None, created_date=None, default_premium_storage_account=None, default_storage_account=None, environment_permission=None, extended_properties=None, lab_storage_type=None, load_balancer_id=None, location=None, mandatory_artifacts_resource_ids_linux=None, mandatory_artifacts_resource_ids_windows=None, name=None, network_security_group_id=None, premium_data_disk_storage_account=None, premium_data_disks=None, provisioning_state=None, public_ip_id=None, support=None, tags=None, type=None, unique_identifier=None, vault_name=None, vm_creation_resource_group=None):
if announcement and not isinstance(announcement, dict):
raise TypeError("Expected argument 'announcement' to be a dict")
pulumi.set(__self__, "announcement", announcement)
if artifacts_storage_account and not isinstance(artifacts_storage_account, str):
raise TypeError("Expected argument 'artifacts_storage_account' to be a str")
pulumi.set(__self__, "artifacts_storage_account", artifacts_storage_account)
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if default_premium_storage_account and not isinstance(default_premium_storage_account, str):
raise TypeError("Expected argument 'default_premium_storage_account' to be a str")
pulumi.set(__self__, "default_premium_storage_account", default_premium_storage_account)
if default_storage_account and not isinstance(default_storage_account, str):
raise TypeError("Expected argument 'default_storage_account' to be a str")
pulumi.set(__self__, "default_storage_account", default_storage_account)
if environment_permission and not isinstance(environment_permission, str):
raise TypeError("Expected argument 'environment_permission' to be a str")
pulumi.set(__self__, "environment_permission", environment_permission)
if extended_properties and not isinstance(extended_properties, dict):
raise TypeError("Expected argument 'extended_properties' to be a dict")
pulumi.set(__self__, "extended_properties", extended_properties)
if lab_storage_type and not isinstance(lab_storage_type, str):
raise TypeError("Expected argument 'lab_storage_type' to be a str")
pulumi.set(__self__, "lab_storage_type", lab_storage_type)
if load_balancer_id and not isinstance(load_balancer_id, str):
raise TypeError("Expected argument 'load_balancer_id' to be a str")
pulumi.set(__self__, "load_balancer_id", load_balancer_id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if mandatory_artifacts_resource_ids_linux and not isinstance(mandatory_artifacts_resource_ids_linux, list):
raise TypeError("Expected argument 'mandatory_artifacts_resource_ids_linux' to be a list")
pulumi.set(__self__, "mandatory_artifacts_resource_ids_linux", mandatory_artifacts_resource_ids_linux)
if mandatory_artifacts_resource_ids_windows and not isinstance(mandatory_artifacts_resource_ids_windows, list):
raise TypeError("Expected argument 'mandatory_artifacts_resource_ids_windows' to be a list")
pulumi.set(__self__, "mandatory_artifacts_resource_ids_windows", mandatory_artifacts_resource_ids_windows)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_security_group_id and not isinstance(network_security_group_id, str):
raise TypeError("Expected argument 'network_security_group_id' to be a str")
pulumi.set(__self__, "network_security_group_id", network_security_group_id)
if premium_data_disk_storage_account and not isinstance(premium_data_disk_storage_account, str):
raise TypeError("Expected argument 'premium_data_disk_storage_account' to be a str")
pulumi.set(__self__, "premium_data_disk_storage_account", premium_data_disk_storage_account)
if premium_data_disks and not isinstance(premium_data_disks, str):
raise TypeError("Expected argument 'premium_data_disks' to be a str")
pulumi.set(__self__, "premium_data_disks", premium_data_disks)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_id and not isinstance(public_ip_id, str):
raise TypeError("Expected argument 'public_ip_id' to be a str")
pulumi.set(__self__, "public_ip_id", public_ip_id)
if support and not isinstance(support, dict):
raise TypeError("Expected argument 'support' to be a dict")
pulumi.set(__self__, "support", support)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_identifier and not isinstance(unique_identifier, str):
raise TypeError("Expected argument 'unique_identifier' to be a str")
pulumi.set(__self__, "unique_identifier", unique_identifier)
if vault_name and not isinstance(vault_name, str):
raise TypeError("Expected argument 'vault_name' to be a str")
pulumi.set(__self__, "vault_name", vault_name)
if vm_creation_resource_group and not isinstance(vm_creation_resource_group, str):
raise TypeError("Expected argument 'vm_creation_resource_group' to be a str")
pulumi.set(__self__, "vm_creation_resource_group", vm_creation_resource_group)
@property
@pulumi.getter
def announcement(self) -> Optional['outputs.LabAnnouncementPropertiesResponse']:
"""
The properties of any lab announcement associated with this lab
"""
return pulumi.get(self, "announcement")
@property
@pulumi.getter(name="artifactsStorageAccount")
def artifacts_storage_account(self) -> str:
"""
The lab's artifact storage account.
"""
return pulumi.get(self, "artifacts_storage_account")
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> str:
"""
The creation date of the lab.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="defaultPremiumStorageAccount")
def default_premium_storage_account(self) -> str:
"""
The lab's default premium storage account.
"""
return pulumi.get(self, "default_premium_storage_account")
@property
@pulumi.getter(name="defaultStorageAccount")
def default_storage_account(self) -> str:
"""
The lab's default storage account.
"""
return pulumi.get(self, "default_storage_account")
@property
@pulumi.getter(name="environmentPermission")
def environment_permission(self) -> Optional[str]:
"""
The access rights to be granted to the user when provisioning an environment
"""
return pulumi.get(self, "environment_permission")
@property
@pulumi.getter(name="extendedProperties")
def extended_properties(self) -> Optional[Mapping[str, str]]:
"""
Extended properties of the lab used for experimental features
"""
return pulumi.get(self, "extended_properties")
@property
@pulumi.getter(name="labStorageType")
def lab_storage_type(self) -> Optional[str]:
"""
Type of storage used by the lab. It can be either Premium or Standard. Default is Premium.
"""
return pulumi.get(self, "lab_storage_type")
@property
@pulumi.getter(name="loadBalancerId")
def load_balancer_id(self) -> str:
"""
The load balancer used to for lab VMs that use shared IP address.
"""
return pulumi.get(self, "load_balancer_id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="mandatoryArtifactsResourceIdsLinux")
def mandatory_artifacts_resource_ids_linux(self) -> Optional[Sequence[str]]:
"""
The ordered list of artifact resource IDs that should be applied on all Linux VM creations by default, prior to the artifacts specified by the user.
"""
return pulumi.get(self, "mandatory_artifacts_resource_ids_linux")
@property
@pulumi.getter(name="mandatoryArtifactsResourceIdsWindows")
def mandatory_artifacts_resource_ids_windows(self) -> Optional[Sequence[str]]:
"""
The ordered list of artifact resource IDs that should be applied on all Windows VM creations by default, prior to the artifacts specified by the user.
"""
return pulumi.get(self, "mandatory_artifacts_resource_ids_windows")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkSecurityGroupId")
def network_security_group_id(self) -> str:
"""
The Network Security Group attached to the lab VMs Network interfaces to restrict open ports.
"""
return pulumi.get(self, "network_security_group_id")
@property
@pulumi.getter(name="premiumDataDiskStorageAccount")
def premium_data_disk_storage_account(self) -> str:
"""
The lab's premium data disk storage account.
"""
return pulumi.get(self, "premium_data_disk_storage_account")
@property
@pulumi.getter(name="premiumDataDisks")
def premium_data_disks(self) -> Optional[str]:
"""
The setting to enable usage of premium data disks.
When its value is 'Enabled', creation of standard or premium data disks is allowed.
When its value is 'Disabled', only creation of standard data disks is allowed.
"""
return pulumi.get(self, "premium_data_disks")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIpId")
def public_ip_id(self) -> str:
"""
The public IP address for the lab's load balancer.
"""
return pulumi.get(self, "public_ip_id")
@property
@pulumi.getter
def support(self) -> Optional['outputs.LabSupportPropertiesResponse']:
"""
The properties of any lab support message associated with this lab
"""
return pulumi.get(self, "support")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> str:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> str:
"""
The lab's Key vault.
"""
return pulumi.get(self, "vault_name")
@property
@pulumi.getter(name="vmCreationResourceGroup")
def vm_creation_resource_group(self) -> str:
"""
The resource group in which all new lab virtual machines will be created. To let DevTest Labs manage resource group creation, set this value to null.
"""
return pulumi.get(self, "vm_creation_resource_group")
class AwaitableGetLabResult(GetLabResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLabResult(
announcement=self.announcement,
artifacts_storage_account=self.artifacts_storage_account,
created_date=self.created_date,
default_premium_storage_account=self.default_premium_storage_account,
default_storage_account=self.default_storage_account,
environment_permission=self.environment_permission,
extended_properties=self.extended_properties,
lab_storage_type=self.lab_storage_type,
load_balancer_id=self.load_balancer_id,
location=self.location,
mandatory_artifacts_resource_ids_linux=self.mandatory_artifacts_resource_ids_linux,
mandatory_artifacts_resource_ids_windows=self.mandatory_artifacts_resource_ids_windows,
name=self.name,
network_security_group_id=self.network_security_group_id,
premium_data_disk_storage_account=self.premium_data_disk_storage_account,
premium_data_disks=self.premium_data_disks,
provisioning_state=self.provisioning_state,
public_ip_id=self.public_ip_id,
support=self.support,
tags=self.tags,
type=self.type,
unique_identifier=self.unique_identifier,
vault_name=self.vault_name,
vm_creation_resource_group=self.vm_creation_resource_group)
def get_lab(expand: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLabResult:
"""
Use this data source to access information about an existing resource.
:param str expand: Specify the $expand query. Example: 'properties($select=defaultStorageAccount)'
:param str name: The name of the lab.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:devtestlab/latest:getLab', __args__, opts=opts, typ=GetLabResult).value
return AwaitableGetLabResult(
announcement=__ret__.announcement,
artifacts_storage_account=__ret__.artifacts_storage_account,
created_date=__ret__.created_date,
default_premium_storage_account=__ret__.default_premium_storage_account,
default_storage_account=__ret__.default_storage_account,
environment_permission=__ret__.environment_permission,
extended_properties=__ret__.extended_properties,
lab_storage_type=__ret__.lab_storage_type,
load_balancer_id=__ret__.load_balancer_id,
location=__ret__.location,
mandatory_artifacts_resource_ids_linux=__ret__.mandatory_artifacts_resource_ids_linux,
mandatory_artifacts_resource_ids_windows=__ret__.mandatory_artifacts_resource_ids_windows,
name=__ret__.name,
network_security_group_id=__ret__.network_security_group_id,
premium_data_disk_storage_account=__ret__.premium_data_disk_storage_account,
premium_data_disks=__ret__.premium_data_disks,
provisioning_state=__ret__.provisioning_state,
public_ip_id=__ret__.public_ip_id,
support=__ret__.support,
tags=__ret__.tags,
type=__ret__.type,
unique_identifier=__ret__.unique_identifier,
vault_name=__ret__.vault_name,
vm_creation_resource_group=__ret__.vm_creation_resource_group)
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
4f8cdbd5a7b33f05ef45e53e15cec978c09c4e1f
|
a86acf28472700008261fb3709134483b4d4c7e2
|
/myutils/seq2seq_peeky.py
|
39147587ca4e866f28e07cc94f092969ca0bb185
|
[] |
no_license
|
s1len7/ai-rnn
|
5a0ab40e7aa0371f41131e99371991c3aeecfa98
|
e3f0a07f1a5e4681b2209276b5f3d6a5d4901f1d
|
refs/heads/master
| 2022-11-30T13:55:41.267065
| 2020-07-29T11:19:18
| 2020-07-29T11:19:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,012
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 1 11:24:24 2020
@author: shkim
"""
"""
## seq2seq 성능 개선
* 1) 입력 데이터 반전(Reverse)
* 2) 엿보기(Peeky)
"""
"""
## seq2seq 성능 개선 : 엿보기(Peeky)
### base seq2seq 모델에서의 동작
* Encoder는 입력문장(문제문장)을 고정길이 벡터 h로 변환함
* 이때 h안에는 Decoder에 필요한 정보가 모두 담겨 있음
* 즉 h가 Decoder에 있어서 유일한 정보인 셈임
* 최초 시각의 LSTM 계층만이 벡터 h를 이용함 --> 이 중요한 h 정보를 더 활용할 수 없을까?
### 개선된 seq2seq 모델 : 엿보기(Peeky) 모델
* 중요한 정보가 담긴 Encoder의 출력 h를 Decoder의 다른 계층에도 전달해 주는 것
* Encoder의 출력 h를 모든 시각의 LSTM 계층과 Affine 계층에 전해줌 --> 집단 지성
* LSTM 계층과 Affine 계층에 입력되는 벡터가 2개씩 됨 --> concatenate 됨
"""
#%%
"""
## 개선된 seq2seq 모델 : 엿보기(Peeky) 모델 구현
"""
import numpy as np
import sys
sys.path.append('..')
from myutils.time_layers import TimeEmbedding, TimeLSTM, TimeAffine, TimeSoftmaxWithLoss
from myutils.seq2seq import Seq2seq, Encoder
#%%
class DecoderPeeky:
def __init__(self, vocab_size, wordvec_size, hideen_size):
V, D, H = vocab_size, wordvec_size, hideen_size
rn = np.random.randn
embed_W = (rn(V, D) / 100).astype('f')
lstm_Wx = (rn(H+D, 4*H) / np.sqrt(H+D)).astype('f')
lstm_Wh = (rn(H, 4*H) / np.sqrt(H)).astype('f')
lstm_b = np.zeros(4*H).astype('f')
affine_W = (rn(H+H, V) / np.sqrt(H+H)).astype('f')
affine_b = np.zeros(V).astype('f')
self.embed = TimeEmbedding(embed_W)
self.lstm = TimeLSTM(lstm_Wx, lstm_Wh, lstm_b, stateful=True)
self.affine = TimeAffine(affine_W, affine_b)
self.params, self.grads = [], []
for layer in (self.embed, self.lstm, self.affine):
self.params += layer.params
self.grads += layer.grads
self.cache = None
def forward(self, xs, h):
N, T = xs.shape
N, H = h.shape
self.lstm.set_state(h)
out = self.embed.forward(xs)
hs = np.repeat(h, T, axis=0).reshape(N, T, H)
out = np.concatenate((hs, out), axis=2)
out = self.lstm.forward(out)
out = np.concatenate((hs, out), axis=2)
score = self.affine.forward(out)
self.cache = H
return score
def backward(self, dscore):
H = self.cache
dout = self.affine.backward(dscore)
dout, dhs0 = dout[:, :, H:], dout[:, :, :H]
dout = self.lstm.backward(dout)
dembed, dhs1 = dout[:, :, H:], dout[:, :, :H]
self.embed.backward(dembed)
dhs = dhs0 + dhs1
dh = self.lstm.dh + np.sum(dhs, axis=1)
return dh
def generate(self, h, start_id, sample_size):
sampled = []
char_id = start_id
self.lstm.set_state(h)
H = h.shape[1]
peeky_h = h.reshape(1, 1, H)
for _ in range(sample_size):
x = np.array([char_id]).reshape((1, 1))
out = self.embed.forward(x)
out = np.concatenate((peeky_h, out), axis=2)
out = self.lstm.forward(out)
out = np.concatenate((peeky_h, out), axis=2)
score = self.affine.forward(out)
char_id = np.argmax(score.flatten())
sampled.append(char_id)
return sampled
#%%
class Seq2seqPeeky(Seq2seq):
def __init__(self, vocab_size, wordvec_size, hidden_size):
V, D, H = vocab_size, wordvec_size, hidden_size
self.encoder = Encoder(V, D, H)
self.decoder = DecoderPeeky(V, D, H)
self.softmax = TimeSoftmaxWithLoss()
self.params = self.encoder.params + self.decoder.params
self.grads = self.encoder.grads + self.decoder.grads
#%%
|
[
"shkim.hi@gmail.com"
] |
shkim.hi@gmail.com
|
2b010340767a1cea96729c9cee25bcdfd1e4b981
|
c6b53079177e99790651980910358f9643eff009
|
/blog/migrations/0009_blogpost_user.py
|
c466dc61885fc1ccf07ec3e976e2e2352c4eb528
|
[] |
no_license
|
ravi19999/Try-Django-2.2
|
09bffcf39806fce4fdbb4ff59f9fc5934ba9991a
|
f0379445bdb7fc07c14bcbf7514438ede3700223
|
refs/heads/master
| 2023-02-07T02:15:43.244332
| 2020-12-19T14:26:13
| 2020-12-19T14:26:13
| 303,755,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# Generated by Django 2.2 on 2019-07-28 17:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0008_auto_20190727_1119'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='user',
field=models.ForeignKey(default=1, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
|
[
"dahalravindra@gmail.com"
] |
dahalravindra@gmail.com
|
9889f501e5014b0b7e9eb35d9828310da06d2dac
|
879b62155c70622d4100bf44c219f11288d90d76
|
/peak_detection_matlab.py
|
3302b3604bd5c0d46c5ca0f9fc18171ac27db9b3
|
[] |
no_license
|
praneelrathore/Ballistocardiography
|
c9e53d714659c91814f40ca59f24d2e127de3a93
|
c8f8c2841f53bbacfb2179fd3aef44f3a58d3afa
|
refs/heads/master
| 2021-11-11T17:12:11.816019
| 2021-11-02T16:41:41
| 2021-11-02T16:41:41
| 74,914,249
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,114
|
py
|
def beat_location(frames_after_spline, window_size, cs_new, Fs):
k = 0
x_index = []#zeros()
beat_point = []#zeros()
index = 0
find_o = 0
window = 0
while 1:
if cs_new[find_o] == 0:
break
elif((cs_new[find_o] > 0 and cs_new[find_o + 1] < 0) or cs_new[find_o] < 0 and cs_new[find_o + 1] > 0):
break
else:
find_o = find_o + 1
for i in range(0, frames_after_spline+1, window_size):
if i == 0:
maxi = 0
lower = find_o
else:
maxi = 0
if k == 0:
lower = window_size
elif(window - index) < Fs / 5:
lower = x_index(k) + Fs / 2
if (lower >= frames_after_spline):
lower = frames_after_spline - 1
check = i
while check < lower:
if cs_new(check) > cs_new(check - 1) and cs_new(check) > cs_new(check + 1) and cs_new(check) > beat_point(k) and cs_new(check) > cs_new(check - 15) and cs_new(check) > cs_new(check + 15):
beat_point[k] = cs_new(check)
x_index[k] = check
else:
check = check + 1
else:
lower = i
flag = 0
window = i + window_size - 1
if window - frames_after_spline > Fs / 5:
break
elif window > frames_after_spline:
window = frames_after_spline
for j in range(lower-1, window+1):
if (j != frames_after_spline and j < frames_after_spline) and cs_new[j] > cs_new[j+1]:
if j != 0 and cs_new[j] > cs_new[j - 1] and cs_new[j] > 0:
if maxi < cs_new[j]:
flag = 1
maxi = cs_new[j]
index = j
if flag != 0:
x_index.append(index)
beat_point.append(cs_new[index])
#k = k + 1;
#x_index[k] = index;
#beat_point[k] = cs_new[index];
return beat_point, x_index
|
[
"a.garg95@gmail.com"
] |
a.garg95@gmail.com
|
b02bca5c918def6a70efac656fdcaa65b903a14d
|
35a2a3f5fa6573c32e411d399a60e6f67ae51556
|
/tests/python/mkl/test_mkldnn.py
|
2caf7af7eb4c4c9e8eb2cdcff9d02cfcbd19da5f
|
[
"Apache-2.0",
"BSD-2-Clause-Views",
"Zlib",
"BSD-2-Clause",
"BSD-3-Clause",
"Intel"
] |
permissive
|
TuSimple/mxnet
|
21c1b8fedd1a626cb57189f33ee5c4b2b382fd79
|
4cb69b85b4db8e1492e378c6d1a0a0a07bd737fb
|
refs/heads/master
| 2021-01-09T07:59:24.301512
| 2019-07-27T00:56:52
| 2019-07-27T00:56:52
| 53,660,918
| 33
| 47
|
Apache-2.0
| 2019-07-27T01:09:17
| 2016-03-11T10:56:36
|
Python
|
UTF-8
|
Python
| false
| false
| 7,303
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
MKL-DNN related test cases
"""
import sys
import os
import numpy as np
import mxnet as mx
from mxnet.test_utils import assert_almost_equal
from mxnet import gluon
from mxnet.gluon import nn
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../unittest/'))
from common import with_seed
def test_mkldnn_model():
model = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data",
"test_mkldnn_test_mkldnn_model_model1.json")
shape = (32, 3, 300, 300)
ctx = mx.cpu()
sym = mx.sym.load(model)
args = sym.list_arguments()
shapes = sym.infer_shape(data=shape)
def get_tensors(args, shapes, ctx):
return {x: mx.nd.ones(y, ctx) for x, y in zip(args, shapes)}
inputs = get_tensors(args, shapes[0], ctx)
grads = get_tensors(args, shapes[0], ctx)
try:
exe = sym.bind(ctx, inputs, args_grad=grads)
for _ in range(2):
exe.forward(is_train=True)
for y in exe.outputs:
y.wait_to_read()
exe.backward()
for y in exe.grad_arrays:
y.wait_to_read()
except: # pylint: disable=bare-except
assert 0, "test_mkldnn_model exception in bind and execution"
def test_mkldnn_ndarray_slice():
ctx = mx.cpu()
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None))
net.collect_params().initialize(ctx=ctx)
x = mx.nd.array(np.ones([32, 3, 224, 224]), ctx)
y = net(x)
# trigger computation on ndarray slice
assert_almost_equal(y[0].asnumpy()[0, 0, 0], 0.3376348)
def test_mkldnn_engine_threading():
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None))
net.collect_params().initialize(ctx=mx.cpu())
class Dummy(gluon.data.Dataset):
def __len__(self):
return 2
def __getitem__(self, key):
return key, np.ones((3, 224, 224)), np.ones((10, ))
loader = gluon.data.DataLoader(Dummy(), batch_size=2, num_workers=1)
X = (32, 3, 32, 32)
# trigger mkldnn execution thread
y = net(mx.nd.array(np.ones(X))).asnumpy()
# Use Gluon dataloader to trigger different thread.
# below line triggers different execution thread
for _ in loader:
y = net(mx.nd.array(np.ones(X))).asnumpy()
# output should be 016711406 (non-mkldnn mode output)
assert_almost_equal(y[0, 0, 0, 0], 0.016711406)
break
@with_seed()
def test_reshape_before_conv():
class Net(gluon.HybridBlock):
"""
test Net
"""
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(10, (3, 3))
self.conv1 = nn.Conv2D(5, (3, 3))
def hybrid_forward(self, F, x, *args, **kwargs):
x_reshape = x.reshape((0, 0, 20, 5))
y = self.conv0(x_reshape)
y_reshape = y.reshape((0, 0, 9, 6))
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(2, 4, 10, 10))
x.attach_grad()
net = Net()
net.collect_params().initialize()
with mx.autograd.record():
out1 = net(x)
out1.backward()
dx1 = x.grad
net.hybridize()
with mx.autograd.record():
out2 = net(x)
out2.backward()
mx.test_utils.assert_almost_equal(dx1.asnumpy(), x.grad.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-5, atol=1e-6)
@with_seed()
def test_slice_before_conv():
class Net(gluon.HybridBlock):
"""
test Net
"""
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(4, (3, 3))
self.conv1 = nn.Conv2D(4, (3, 3))
def hybrid_forward(self, F, x, *args, **kwargs):
x_slice = x.slice(begin=(0, 0, 0, 0), end=(2, 4, 10, 10))
y = self.conv0(x_slice)
y_slice = y.slice(begin=(1, 0, 2, 2), end=(2, 1, 7, 7))
out = self.conv1(y_slice)
return out
x = mx.nd.random.uniform(shape=(2, 10, 10, 10))
x.attach_grad()
net = Net()
net.collect_params().initialize()
with mx.autograd.record():
out1 = net(x)
out1.backward()
dx1 = x.grad
net.hybridize()
with mx.autograd.record():
out2 = net(x)
out2.backward()
mx.test_utils.assert_almost_equal(dx1.asnumpy(), x.grad.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-5, atol=1e-6)
@with_seed()
def test_slice_reshape_before_conv():
class Net(gluon.HybridBlock):
"""
test Net
"""
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(4, (3, 3))
self.conv1 = nn.Conv2D(4, (3, 3))
def hybrid_forward(self, F, x, *args, **kwargs):
x_slice = x.slice(begin=(0, 0, 0, 0), end=(2, 4, 8, 9))
y = self.conv0(x_slice)
y_reshape = y.reshape((0, 0, 14, 3))
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(2, 10, 10, 10))
x.attach_grad()
net = Net()
net.collect_params().initialize()
with mx.autograd.record():
out1 = net(x)
out1.backward()
dx1 = x.grad
net.hybridize()
with mx.autograd.record():
out2 = net(x)
out2.backward()
mx.test_utils.assert_almost_equal(dx1.asnumpy(), x.grad.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-5, atol=1e-6)
def test_mkldnn_sum_inplace_with_cpu_layout():
x_shape = (32, 3, 224, 224)
x_npy = np.ones(x_shape)
y_shape = (32, 32, 222, 222)
y_npy = np.ones(y_shape)
x = mx.sym.Variable("x")
y = mx.sym.Variable("y")
z = mx.symbol.Convolution(data=x, num_filter=32, kernel=(3, 3))
z = mx.sym.add_n(z, y)
exe = z.simple_bind(ctx=mx.cpu(), x=x_shape, y=y_shape)
out = exe.forward(is_train=False, x=x_npy, y=y_npy)[0]
assert_almost_equal(out[0].asnumpy()[0, 0, 0], 1.0)
if __name__ == '__main__':
test_mkldnn_install()
|
[
"anirudh2290@apache.org"
] |
anirudh2290@apache.org
|
81264f2bcadaa766a81e3a63ef481439ed76116f
|
e20ed90b9be7a0bcdc1603929d65b2375a224bf6
|
/generated-libraries/python/netapp/fpolicy/event_name.py
|
476ecd9c31bd1a9cacb1652502a4f667427125da
|
[
"MIT"
] |
permissive
|
radekg/netapp-ontap-lib-gen
|
530ec3248cff5ead37dc2aa47ced300b7585361b
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
refs/heads/master
| 2016-09-06T17:41:23.263133
| 2015-01-14T17:40:46
| 2015-01-14T17:40:46
| 29,256,898
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
class EventName(basestring):
"""
Event name
"""
@staticmethod
def get_api_name():
return "event-name"
|
[
"radek@gruchalski.com"
] |
radek@gruchalski.com
|
0aced39f9e720a7bd36e2e19e7abdce7a403c575
|
6b2d62130252c0921715b22a626a2ce94ba91ee1
|
/ITP125/Homework/Homework 3/hw03a.py
|
1691b1ab48b6e8db263cbb850bf92b492a0d76e4
|
[] |
no_license
|
sthsher/USC_CECS
|
fba48c7b331f562a802958a9412ab2d5ee1f2ec3
|
776ca95cab2b1c132ee87e1fd8ce95c77315e452
|
refs/heads/master
| 2018-12-11T10:42:58.576051
| 2016-11-01T06:20:11
| 2016-11-01T06:20:11
| 24,713,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
shopping_list = ["banana", "orange", "apple"]
stock = {
"banana": 6,
"apple": 0,
"orange": 32,
"pear": 15
}
prices = {
"banana": 4,
"apple": 2,
"orange": 1.5,
"pear": 3
}
# Write your code below!
def compute_bill(food):
total = 0
for item in food:
if (stock[item] > 0):
total += prices[item]
stock[item] -= 1
return total
|
[
"stephen.sher.94@gmail.com"
] |
stephen.sher.94@gmail.com
|
c6c061e8c03a18c82ab3cd76a3c826f444b900f1
|
14c831041866edeed1fb3a5643405542fe736939
|
/datasetGen.py
|
f1e20ec710d378afc9b9e20cff19a3c148fd8dba
|
[] |
no_license
|
Rumee040/Retinal-Vessel-Segmentation-using-Convolutional-Network
|
d4a37d72d8fde637b53c5d2e279c9848d1dc1313
|
41333730fd2c27a99c0bbb149dfaa739c04ece50
|
refs/heads/master
| 2021-05-06T06:01:16.863817
| 2017-12-20T11:58:19
| 2017-12-20T11:58:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,468
|
py
|
#import cv2
import numpy as np
from sys import exit
from PIL import Image
import matplotlib.pyplot as plt
import random
## defining parameters
patchSize = 27 # rectangular patch with size patchSize*patchSize*channel
patchPerImg = 1000 # patches per image
numImage = 20 # number of images
totalPatch = patchPerImg * numImage
data = np.ones((totalPatch, patchSize, patchSize, 3)) # all of the patches will be stored here
dataLoc = np.ones((totalPatch, 2)) # location of the patches stores as (row, column)
dataLabel = np.ones((totalPatch)) # label of the patches 0 - neg, 1 - pos
balance = 0.5 # balance between positive and negative patches
positive = int(patchPerImg * 0.5) # number of positive image in an image
negative = patchPerImg - positive # number of negative image in an image
## reading the imageand mask
for i in range(1, numImage + 1):
imgNum = str(i)
if i < 10:
imgNum = '0' + imgNum
imgName = imgNum + '_test.tif'
img = Image.open('E:\\library of EEE\\4-2\\eee 426\\data\\DRIVE\\DRIVE\\test\\images\\' + imgName)
maskName = imgNum + '_test_mask.gif'
mask = Image.open('E:\\library of EEE\\4-2\\eee 426\\data\\DRIVE\\DRIVE\\test\\mask\\' + maskName)
gndTruthName = imgNum + '_manual1.gif'
gndTruth = Image.open('E:\\library of EEE\\4-2\\eee 426\\data\\DRIVE\\DRIVE\\test\\1st_manual\\' + gndTruthName)
## converting them to numpy array
img = np.array(img)
#img = np.array(img.getdata()).reshape(img.size[1], img.size[0], 3) # Image class store image as (width, height) but we want it as (row, column)
#img = img.astype('float32') / 255 # to see the image in plt
mask = mask.convert('RGB')
#mask = np.array(mask.getdata()).reshape(mask.size[1], mask.size[0], 3)
mask = np.array(mask)
#mask = mask.astype('float32') / 255
gndTruth = gndTruth.convert('RGB')
gndTruth = np.array(gndTruth)[:,:,0]
#gndTruth = gndTruth.astype('float32') / 255
## cutting out patches from the image
imgRow = img.shape[0]
imgCol = img.shape[1]
count = 0
ind = (i - 1) * patchPerImg
posCount = 0
negCount = 0
while count < patchPerImg:
r = int(round(random.uniform(0, img.shape[0])))
c = int(round(random.uniform(0, img.shape[1])))
rStart = r - patchSize // 2
rEnd = r + patchSize // 2 + 1
cStart = c - patchSize // 2
cEnd = c + patchSize // 2 + 1
if np.all(mask[rStart:rEnd, cStart:cEnd]) and r > 13 and r < imgRow - 14 and c > 13 and c < imgCol - 14:
label = gndTruth[r, c]
if label == 0:
if negCount == negative:
continue
else:
negCount += 1
else:
if posCount == positive:
continue
else:
posCount += 1
data[ind + count, :, :, :] = img[rStart:rEnd, cStart:cEnd, :]
dataLoc[ind + count] = np.array([r, c])
dataLabel[ind + count] = label
count += 1
print(negCount, posCount)
print(np.count_nonzero(dataLabel))
## storing the images and data
np.save('E:\\library of EEE\\4-2\\eee 426\\data\\MSCprojectDataBase\\simpleClassifierDataBase\\DRIVEtestData', data)
np.save('E:\\library of EEE\\4-2\\eee 426\\data\\MSCprojectDataBase\\simpleClassifierDataBase\\DRIVEtestDataLcation', dataLoc)
np.save('E:\\library of EEE\\4-2\\eee 426\\data\\MSCprojectDataBase\\simpleClassifierDataBase\\DRIVEtestDataLabel', dataLabel)
|
[
"noreply@github.com"
] |
noreply@github.com
|
ae4cb13734a0740053a6c4093337ac9c7f2ab6d8
|
de707c94c91f554d549e604737b72e6c86eb0755
|
/supervised_learning/0x02-tensorflow/7-evaluate.py
|
16e4666e5785a1670cb87f5a081e939092818dc2
|
[] |
no_license
|
ejonakodra/holbertonschool-machine_learning-1
|
885cf89c1737573228071e4dc8e26304f393bc30
|
8834b201ca84937365e4dcc0fac978656cdf5293
|
refs/heads/main
| 2023-07-10T09:11:01.298863
| 2021-08-11T03:43:59
| 2021-08-11T03:43:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
#!/usr/bin/env python3
"""
Defines a function that evaluates output of
neural network classifier
"""
import tensorflow as tf
def evaluate(X, Y, save_path):
"""
Evaluates output of neural network
parameters:
X [numpy.ndarray]: contains the input data to evaluate
Y [numpy.ndarray]: contains the one-hot labels for X
save_path [string]: location to load the model from
returns:
the network's prediction, accuracy, and loss, respectively
"""
with tf.Session() as sess:
saver = tf.train.import_meta_graph(save_path + '.meta')
saver.restore(sess, save_path)
x = tf.get_collection('x')[0]
y = tf.get_collection('y')[0]
y_pred = tf.get_collection('y_pred')[0]
accuracy = tf.get_collection('accuracy')[0]
loss = tf.get_collection('loss')[0]
prediction = sess.run(y_pred, feed_dict={x: X, y: Y})
accuracy = sess.run(accuracy, feed_dict={x: X, y: Y})
loss = sess.run(loss, feed_dict={x: X, y: Y})
return (prediction, accuracy, loss)
|
[
"eislek02@gmail.com"
] |
eislek02@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.