blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3084d05deb4895e75b4f3656c9386de01067a15
|
ee6c5924bca5ad4df64c6922129d600ae3e1ba71
|
/info_Instagram.py
|
725ff71e88a0df89e7bada35a956061c37bf3195
|
[] |
no_license
|
jev0m/information-div0m
|
f30520f912225ab10f95fd95efe6faaaee314df2
|
447c33588199f610992d7d3421afbf3ccf39e871
|
refs/heads/main
| 2023-03-21T13:52:24.434171
| 2021-03-19T12:59:45
| 2021-03-19T12:59:45
| 349,423,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
import requests
import pyfiglet
## By Xcode & @xcodeon1
## By Twitter : @Matrix0700
R = '\033[31m'
G = '\033[32m'
B = '\033[34m'
print(R+" @xcodeon1")
br = pyfiglet.figlet_format("Info.py")
print(R+br)
user = input(B+"username :")
print(R+"-"*40)
url = "https://i.instagram.com:443/api/v1/users/lookup/"
cookies = {"mid": "XOSINgABAAG1IDmaral3noOozrK0rrNSbPuSbzHq"}
headers = {"Connection": "close", "X-IG-Connection-Type": "WIFI", "X-IG-Capabilities": "3R4=",
"Accept-Language": "ar-AE",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": "Instagram 99.4.0 S3od_al3nzi (Dmaral3noOoz)",
"Accept-Encoding": "gzip, deflate"}
data = {"signed_body": "35a2d547d3b6ff400f713948cdffe0b789a903f86117eb6e2f3e573079b2f038.{\"q\":\"%s\"}" % user }
re = requests.post(url, headers=headers, cookies=cookies, data=data)
info = re.json()
# print(info)
print(G+"Username :"+user)
if info['email_sent'] == False :
print(G+"Email_Sent : False")
else:
print("Sms_Sent : True")
if info['sms_sent'] == False :
print(G+"sms_Sent : False")
else:
print("sms : True")
def emailPhoneIsuue(info):
try:
if info['obfuscated_email']:
print(G+"His Phone Email Is : "+info['obfuscated_email'])
else:
pass
except KeyError:
'obfuscated_email'
pass
try:
if info['obfuscated_phone']:
print(G+"His Phone number Is: "+ info['obfuscated_phone'])
else:
print("oh")
except KeyError:
'obfuscated_phone'
pass
emailPhoneIsuue(info)
print(R+"-"*40)
print("\n")
|
[
"noreply@github.com"
] |
jev0m.noreply@github.com
|
d7590593aba400293172eda8319a4f422cb65f7f
|
6655650f052a3b140f02de41b7393a24cd23747a
|
/D_D/getPDF.py
|
fb3b0e8242ff5eb0b62cc71c0b0243156728f832
|
[] |
no_license
|
bivasmaiti26/census2001auto
|
5238286934c4bd82405aaf3b2830bfebe6ce2162
|
6c1a2d7e96d80d2b7e75d7a1096611bc4ef9b844
|
refs/heads/master
| 2020-04-22T18:48:40.621253
| 2018-01-08T15:10:47
| 2018-01-08T15:10:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
import urllib
def downloadPDF(state_code):
#getting the string fpr state code
if (state_code<10):
string_state='0'+str(state_code)
else:
string_state=str(state_code)
i=1
while True:
#getting the string for district code
if (i<10):
string_district='0'+str(i)
else:
string_district=str(i)
try:
urllib.urlretrieve ("http://censusindia.gov.in/Dist_File/datasheet-"+string_state+string_district+".pdf","district"+string_district+".pdf")
except:
break
i=i+1
#Daman & Diu
code=25
downloadPDF(code)
|
[
"preetskhalsa97@gmail.com"
] |
preetskhalsa97@gmail.com
|
107f871602e5d1be87f6b15c3d28bfc62bf8fb3b
|
696bfb83e741b0ada656c633038098c5a4dcc78a
|
/manage.py
|
234ce79ac54748a952e4022175d094416f017afd
|
[] |
no_license
|
Smorta/Planning-Prom
|
dee5cb4186f27de596ee375bce78afd6243fb695
|
169c62f911ef3d3a6f32c949dad4592ca1072e38
|
refs/heads/main
| 2023-06-10T01:48:07.473233
| 2021-07-02T08:57:01
| 2021-07-02T08:57:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'GestioPro.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Smorta.noreply@github.com
|
4728245652c5aa72351604934b2ca40e9612e261
|
1e44f7826fc872a480400b9f6f4658fc48d688ab
|
/Functions/Python/MIND/__init__.py
|
2eb6e9d1916786ecefe974d8ea5c95504295f414
|
[] |
no_license
|
hsokooti/RegUn
|
220e01f5957a81efb39b1351c5d1ebbc44622f82
|
f029d61e1146af2992ae71d0f59c6e881db95aad
|
refs/heads/master
| 2022-05-04T05:10:38.485534
| 2022-04-21T21:42:49
| 2022-04-21T21:42:49
| 151,310,972
| 25
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
from .pyMIND import *
from .search_region import *
__version__ = "0.1.0"
__author__ = "hsokooti"
|
[
"h.sokooti_oskooyi@lumc.nl"
] |
h.sokooti_oskooyi@lumc.nl
|
7ca78d0c38317f6f641d8132aba60941648200ba
|
de0cabc94e287cec4ff07c186cc6c708eab168e2
|
/flea/translate.py
|
14576b0fd88fb755aa69c497aa33c9551ea9cca9
|
[
"MIT"
] |
permissive
|
chudym2/flea-pipeline
|
453536af2838c6f25c30765f4f4465da5a9d6591
|
2bb29d793a1c35a2f344cca70d1f3b768a5760cc
|
refs/heads/master
| 2022-03-18T13:51:30.068639
| 2018-08-31T20:24:53
| 2018-08-31T20:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
#!/usr/bin/env python
"""
Translate DNA reads from a fasta file.
"""
import sys
import click
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC, Gapped
from flea.util import insert_gaps
def _translate(record, gapped=False):
result = record[:]
if gapped:
translated = record.seq.ungap('-').translate()
result.seq = Seq(insert_gaps(str(record.seq), str(translated), '---', '-'),
alphabet=Gapped(IUPAC.IUPACProtein))
else:
result.seq = record.seq.translate()
return result
def translate(infile, outfile, gapped=False):
alphabet=IUPAC.ambiguous_dna
if gapped:
alphabet = Gapped(alphabet)
records = SeqIO.parse(infile, "fasta", alphabet=alphabet)
result = (_translate(r, gapped) for r in records)
SeqIO.write(result, outfile, "fasta")
@click.command()
@click.option('-g', '--gapped', is_flag=True, help='allow gaps')
def main(gapped):
translate(sys.stdin, sys.stdout, gapped)
if __name__ == "__main__":
main()
|
[
"kemal@kemaleren.com"
] |
kemal@kemaleren.com
|
ff8d820c965642aa2c3f657c8f50044852250de7
|
95dd3f021a03d408e93b40f498e58c7b07abb796
|
/gans/cgan.py
|
4a16c4d40226b0c455b3cbcfd51a9789fdd5e258
|
[
"MIT"
] |
permissive
|
er-Bot/gans
|
18e1c46352c7b2d7591eb3ca0e7335bb97ea5e69
|
fc19446750e10896dd3b1746b0ccb3c4d3b5ed8d
|
refs/heads/main
| 2023-03-15T09:04:20.926340
| 2021-03-09T00:57:57
| 2021-03-09T00:57:57
| 345,826,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,594
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm.auto import tqdm
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
__all__ = ["Discriminator", "Generator", "CGAN"]
criterion = nn.BCEWithLogitsLoss()
hidden_dim = 128
class Discriminator(nn.Module):
def __init__(self, in_dim):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_dim, 4 * hidden_dim),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(4 * hidden_dim, 2 * hidden_dim),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(2 * hidden_dim, hidden_dim),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(hidden_dim, 1)
)
def forward(self, x, y):
d_in = torch.cat((x, y), -1)
return self.model(d_in)
class Generator(nn.Module):
def __init__(self, in_dim, out_dim):
super(Generator, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, 2 * hidden_dim),
nn.BatchNorm1d(2 * hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(2 * hidden_dim, 4 * hidden_dim),
nn.BatchNorm1d(4 * hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(4 * hidden_dim, 8 * hidden_dim),
nn.BatchNorm1d(8 * hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(8 * hidden_dim, out_dim),
nn.Sigmoid()
)
def forward(self, z, y):
g_in = torch.cat((z, y), -1)
return self.model(g_in)
class CGAN:
def __init__(self):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# img_size of the form (1, w, h) e.g. for MNIST it's (1, 28, 28)
def setup(self, z_dim, n_classes, img_size, lr, betas):
self.z_dim = z_dim
self.n_classes = n_classes
self.img_size = img_size
assert len(img_size) == 3, 'size sould be of format : (channel, width, heigt)'
x_dim = img_size[1] * img_size[2]
self.generator = Generator(z_dim + n_classes, x_dim).to(self.device)
self.discriminator = Discriminator(x_dim + n_classes).to(self.device)
self.g_opt = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=betas)
self.d_opt = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=betas)
self.d_loss_history = []
self.g_loss_history = []
self.z = self.noise(100)
self.start_epoch = 0
def load_state(self, path):
state = torch.load(path, map_location=self.device)
self.z_dim = state['z_dim']
self.n_classes = state['n_classes']
self.img_size = state['img_size']
self.generator = state['gen']
self.discriminator = state['disc']
self.g_opt = state['g_opt']
self.d_opt = state['d_opt']
self.d_loss_history = state['d_loss_history'].tolist()
self.g_loss_history = state['g_loss_history'].tolist()
self.z = state['z']
self.start_epoch = state['start_epoch']
def noise(self, n):
return torch.randn(n, self.z_dim, device=self.device)
def show_images(self, images, figsize=(10, 10), nrow=10, show=False, path='.'):
img_unflat = images.detach().cpu().view(-1, *self.img_size)
img_grid = make_grid(img_unflat, nrow=nrow)
plt.figure(figsize=figsize)
plt.imshow(img_grid.permute(1, 2, 0).squeeze())
if not show:
plt.savefig(path)
else:
plt.show()
plt.close(None)
def get_discriminator_loss(self, real, labels, batch_size):
noise = self.noise(batch_size)
fake_image_gen = self.generator(noise, labels)
fake_image_pred = self.discriminator(fake_image_gen.detach(), labels)
fake_image_loss = criterion(fake_image_pred, torch.zeros_like(fake_image_pred))
real_image_pred = self.discriminator(real, labels)
real_image_loss = criterion(real_image_pred, torch.ones_like(real_image_pred))
disc_loss = (fake_image_loss + real_image_loss) / 2
return disc_loss
def get_generator_loss(self, labels, batch_size):
noise = self.noise(batch_size)
fake_image_gen = self.generator(noise, labels)
fake_image_pred = self.discriminator(fake_image_gen, labels)
gen_loss = criterion(fake_image_pred, torch.ones_like(fake_image_pred))
return gen_loss
def one_hot(self, labels):
return F.one_hot(labels, self.n_classes).to(self.device)
def train(self, dataloader, n_epochs, display_step=1, save_step=50, path='.'):
for epoch in range(self.start_epoch, n_epochs + 1):
for real, labels in tqdm(dataloader):
batch_size = len(real)
real = real.view(batch_size, -1).to(self.device) # flatten
y = self.one_hot(labels)
""" Update discriminator """
self.d_opt.zero_grad()
disc_loss = self.get_discriminator_loss(real, y, batch_size)
disc_loss.backward()
self.d_opt.step()
self.d_loss_history += [disc_loss.item()]
""" Update generator """
self.g_opt.zero_grad()
gen_loss = self.get_generator_loss(y, batch_size)
gen_loss.backward()
self.g_opt.step()
self.g_loss_history += [gen_loss.item()]
### Some visuals ###
if epoch % display_step == 0:
print(f"Epoch {epoch}: G_loss = {self.g_loss_history[-1]}, D_loss = {self.d_loss_history[-1]}")
yy = self.one_hot(torch.arange(0, 100, 1)//10)
generated = self.generator(self.z, yy)
self.show_images(generated, path=path+'/sample-%04d.png'%epoch)
# loss functions
step_bins = 20
n_example = (len(self.d_loss_history) // step_bins) * step_bins
plt.clf()
plt.figure(figsize=(10, 5))
plt.plot(
range(n_example // step_bins),
torch.Tensor(self.g_loss_history[:n_example]).view(-1, step_bins).mean(1),
label="Generator loss"
)
plt.plot(
range(n_example // step_bins),
torch.Tensor(self.d_loss_history[:n_example]).view(-1, step_bins).mean(1),
label="Discriminator loss"
)
plt.legend()
plt.savefig(path+'/loss-%04d.png'%epoch)
plt.close(None)
### Model saving ###
if epoch % save_step == 0:
state = {
'z_dim': self.z_dim,
'n_classes': self.n_classes,
'img_size': self.img_size,
'gen': self.generator,
'disc': self.discriminator,
'd_opt': self.d_opt,
'g_opt': self.g_opt,
'd_loss_history': torch.Tensor(self.d_loss_history),
'g_loss_history': torch.Tensor(self.g_loss_history),
'z': self.z,
'start_epoch': epoch + 1,
}
torch.save(state, path+'/cgan-%04d.h5'%epoch)
|
[
"jammalenneiym@gmail.com"
] |
jammalenneiym@gmail.com
|
1e56c59f16fa254f98929f77a0edee61254cf15d
|
611c90d319a127c6f42135dd2762ffe79fe2a492
|
/lecture5/dynamic_member_variable.py
|
bb181439860048545828d7077545a07816000df4
|
[] |
no_license
|
wduan1025/python-intro
|
8d84f405653a5a599fcca1b223bbe82a2cb55769
|
8628d8fa41182a2f69c7d75c3feeffa8d0e48090
|
refs/heads/master
| 2022-04-15T12:26:31.432536
| 2020-03-29T02:02:33
| 2020-03-29T02:02:33
| 247,553,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
class Animal:
pass
a = Animal()
b = Animal()
a.name = "Charles"
print(a.name)
# print(b.name)
Animal.name = "Charles"
print(b.name)
c = Animal()
print(c.name)
|
[
"weiduan1025@gmail.com"
] |
weiduan1025@gmail.com
|
f3b344d9bd81f498554471e88f34378fee094fa7
|
5a5e0a01efa6ef0961992e53bb4f64840f93150b
|
/RegressionVisualizer/manage.py
|
b5db558ef481979ffecd909114ebd0e5bdf372b6
|
[] |
no_license
|
scotteskridge/RegressionApp
|
ed059e3205ab54061129779404345b55c0dee75c
|
68932a9c94235a1e8bd6cd71a765b545f2266189
|
refs/heads/master
| 2021-01-19T20:48:13.495541
| 2017-04-25T02:39:49
| 2017-04-25T02:39:56
| 88,555,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RegressionVisualizer.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
print(sys.argv)
execute_from_command_line(sys.argv)
|
[
"scott.eskridge@gmail.com"
] |
scott.eskridge@gmail.com
|
ea09b50bd4b3672b1c0b6ad53faf51b40e8941a7
|
eaeacaef534ee2bd8f33928b615134037868b660
|
/createDF.py
|
9f696492a8566c368456e813c6d6dd33ad9dc2a7
|
[] |
no_license
|
tkShir/TOPIX-Price-Signal-Analysis
|
42d533a0afe44872c43fa4ee02e8b1abe2628c20
|
ad80354573590b7ce4236a7bf28dbb3aa498c9d0
|
refs/heads/master
| 2021-08-16T23:34:15.138854
| 2020-05-06T03:52:26
| 2020-05-06T03:52:26
| 176,823,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,510
|
py
|
import pandas as pd
def create_df():
pd.set_option('display.max_columns', 500)
index_list = ["GSPC", "DJI", "IXIC", "STOXX50E", "RUT", "N225"]
fx_df = pd.read_csv("FX.csv", encoding ="shift-jis", header = 2)
fx_df["Date"] = pd.to_datetime(fx_df["Date"])
#fx_df = fx_df.add_prefix("")
#print(fx_df.head())
df_lst = []
for index in index_list:
target = index + ".csv"
target_df =pd.read_csv(str(target), header = 0)
target_df["Date"] = pd.to_datetime(target_df["Date"])
target_df.columns = target_df.columns[:1].union((index + " " +target_df.columns[1:]), sort = False)
df_lst.append(target_df)
res_df = fx_df
for i in range(len(df_lst)):
res_df = pd.merge(df_lst[i], res_df, on="Date")
res_df.to_csv("result.csv",index=False)
def modifyDF():
res_df = pd.read_csv("result.csv", header = 0)
price_increase = [[0]]
prev_close = 0
cur_close = 0
for row in range(len(res_df)):
if row == 0:
prev_close = res_df["N225 Close"][row]
else:
cur_close = res_df["N225 Close"][row]
if prev_close < cur_close:
price_increase.append([1])
else:
price_increase.append([0])
prev_close = res_df["N225 Close"][row]
answer = pd.DataFrame(price_increase, columns = ["Answer"])
answer
res_df = res_df.join(answer)
res_df.to_csv("result.csv",index=False)
create_df()
modifyDF()
|
[
"takashirono@gmail.com"
] |
takashirono@gmail.com
|
0bdb9d4f4e95b0c5eefe477b1bd396427419e433
|
28686b6c3670539ae00f80ced4533a41dc59d165
|
/efs-server/dropbox/dropbox.py
|
acd0644a74f13bd76da9970855e862babf72d842
|
[] |
no_license
|
hicksmatt/EFS
|
61eb326246ff114227d42a2ccf5606e844bc7e3c
|
c101f1bdb07091b2ae82f22dea0f9d0182be2919
|
refs/heads/master
| 2021-01-10T10:02:31.266934
| 2015-12-04T17:51:55
| 2015-12-04T17:51:55
| 47,127,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,338
|
py
|
__all__ = [
'Dropbox',
]
# TODO(kelkabany): We need to auto populate this as done in the v1 SDK.
__version__ = '3.42'
import contextlib
import json
import logging
import os
import random
import six
import time
import requests
#from . import babel_serializers
import babel_serializers
from .base import DropboxBase
from .exceptions import (
ApiError,
AuthError,
BadInputError,
HttpError,
InternalServerError,
RateLimitError,
)
from .session import pinned_session
class RouteResult(object):
"""The successful result of a call to a route."""
def __init__(self, obj_result, http_resp=None):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists. Must be serialized JSON.
:param requests.models.Response http_resp: A raw HTTP response. It will
be used to stream the binary-body payload of the response.
"""
assert isinstance(obj_result, six.string_types), \
'obj_result: expected string, got %r' % type(obj_result)
if http_resp is not None:
assert isinstance(http_resp, requests.models.Response), \
'http_resp: expected requests.models.Response, got %r' % \
type(http_resp)
self.obj_result = obj_result
self.http_resp = http_resp
class RouteErrorResult(object):
"""The error result of a call to a route."""
def __init__(self, obj_result):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists.
"""
self.obj_result = obj_result
class Dropbox(DropboxBase):
"""
Use this to make requests to the Dropbox API.
"""
API_VERSION = '2'
DEFAULT_DOMAIN = '.dropboxapi.com'
# Host for RPC-style routes.
HOST_API = 'api'
# Host for upload and download-style routes.
HOST_CONTENT = 'content'
# Host for longpoll routes.
HOST_NOTIFY = 'notify'
# Download style means that the route argument goes in a Dropbox-API-Arg
# header, and the result comes back in a Dropbox-API-Result header. The
# HTTP response body contains a binary payload.
ROUTE_STYLE_DOWNLOAD = 'download'
# Upload style means that the route argument goes in a Dropbox-API-Arg
# header. The HTTP request body contains a binary payload. The result
# comes back in a Dropbox-API-Result header.
ROUTE_STYLE_UPLOAD = 'upload'
# RPC style means that the argument and result of a route are contained in
# the HTTP body.
ROUTE_STYLE_RPC = 'rpc'
def __init__(self,
oauth2_access_token,
max_connections=8,
max_retries_on_error=4,
user_agent=None,
proxies=None):
"""
:param str oauth2_access_token: OAuth2 access token for making client
requests.
:param int max_connections: Maximum connection pool size.
:param int max_retries_on_error: On 5xx errors, the number of times to
retry.
:param str user_agent: The user agent to use when making requests. This
helps us identify requests coming from your application. We
recommend you use the format "AppName/Version". If set, we append
"/OfficialDropboxPythonV2SDK/__version__" to the user_agent,
:param dict proxies: See the `requests module
<http://docs.python-requests.org/en/latest/user/advanced/#proxies>`_
for more details.
"""
assert len(oauth2_access_token) > 0, \
'OAuth2 access token cannot be empty.'
self._oauth2_access_token = oauth2_access_token
# We only need as many pool_connections as we have unique hostnames.
self._session = pinned_session(pool_maxsize=max_connections)
if proxies:
self._session.proxies = proxies
self._max_retries_on_error = max_retries_on_error
base_user_agent = 'OfficialDropboxPythonV2SDK/' + __version__
if user_agent:
self._user_agent = '{}/{}'.format(user_agent, base_user_agent)
else:
self._user_agent = base_user_agent
self._logger = logging.getLogger('dropbox')
self._domain = os.environ.get('DROPBOX_DOMAIN', Dropbox.DEFAULT_DOMAIN)
self._api_hostname = os.environ.get(
'DROPBOX_API_HOST', 'api' + self._domain)
self._api_content_hostname = os.environ.get(
'DROPBOX_API_CONTENT_HOST', 'content' + self._domain)
self._api_notify_hostname = os.environ.get(
'DROPBOX_API_NOTIFY_HOST', 'notify' + self._domain)
self._host_map = {self.HOST_API: self._api_hostname,
self.HOST_CONTENT: self._api_content_hostname,
self.HOST_NOTIFY: self._api_notify_hostname}
def request(self,
host,
route_name,
route_style,
arg_data_type,
result_data_type,
error_data_type,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API and in the process validates that
the route argument and result are the expected data types. The
request_arg is converted to JSON based on the arg_data_type. Likewise,
the response is deserialized from JSON and converted to an object based
on the {result,error}_data_type.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:type arg_data_type: :class:`.datatypes.babel_validators.DataType`
:type result_data_type: :class:`.datatypes.babel_validators.DataType`
:type error_data_type: :class:`.datatypes.babel_validators.DataType`
:param request_arg: Argument for the route that conforms to the
validator specified by arg_data_type.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result.
"""
serialized_arg = babel_serializers.json_encode(arg_data_type,
request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
decoded_obj_result = json.loads(res.obj_result)
if isinstance(res, RouteResult):
returned_data_type = result_data_type
obj = decoded_obj_result
elif isinstance(res, RouteErrorResult):
returned_data_type = error_data_type
obj = decoded_obj_result['error']
user_message = decoded_obj_result.get('user_message')
user_message_text = user_message and user_message.get('text')
user_message_locale = user_message and user_message.get('locale')
else:
raise AssertionError('Expected RouteResult or RouteErrorResult, '
'but res is %s' % type(res))
deserialized_result = babel_serializers.json_compat_obj_decode(
returned_data_type, obj, strict=False)
if isinstance(res, RouteErrorResult):
raise ApiError(deserialized_result,
user_message_text,
user_message_locale)
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_object(self,
host,
route_name,
route_style,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API, taking a JSON-serializable Python
object as an argument, and returning one as a response.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:param str request_arg: A JSON-serializable Python object representing
the argument for the route.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result as a JSON-serializable Python object.
"""
serialized_arg = json.dumps(request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
# This can throw a ValueError if the result is not deserializable,
# but that would be completely unexpected.
deserialized_result = json.loads(res.obj_result)
if isinstance(res, RouteResult) and res.http_resp is not None:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_string_with_retry(self,
host,
route_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_object` for description of parameters.
:param request_json_arg: A string representing the serialized JSON
argument to the route.
"""
attempt = 0
while True:
self._logger.info('Request to %s', route_name)
try:
return self.request_json_string(host,
route_name,
route_style,
request_json_arg,
request_binary)
except (InternalServerError, RateLimitError) as e:
if isinstance(e, InternalServerError):
# Do not count a rate limiting error as an attempt
attempt += 1
if attempt <= self._max_retries_on_error:
# Use exponential backoff
backoff = 2**attempt * random.random()
self._logger.info('HttpError status_code=%s: '
'Retrying in %.1f seconds',
e.status_code, backoff)
time.sleep(backoff)
else:
raise
def request_json_string(self,
host,
func_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_string_with_retry` for description of
parameters.
"""
if host not in self._host_map:
raise ValueError('Unknown value for host: %r' % host)
# Fully qualified hostname
fq_hostname = self._host_map[host]
url = self._get_route_url(fq_hostname, func_name)
headers = {'User-Agent': self._user_agent}
if host != self.HOST_NOTIFY:
headers['Authorization'] = 'Bearer %s' % self._oauth2_access_token
# The contents of the body of the HTTP request
body = None
# Whether the response should be streamed incrementally, or buffered
# entirely. If stream is True, the caller is responsible for closing
# the HTTP response.
stream = False
if route_style == self.ROUTE_STYLE_RPC:
headers['Content-Type'] = 'application/json'
body = request_json_arg
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
headers['Dropbox-API-Arg'] = request_json_arg
stream = True
elif route_style == self.ROUTE_STYLE_UPLOAD:
headers['Content-Type'] = 'application/octet-stream'
headers['Dropbox-API-Arg'] = request_json_arg
body = request_binary
else:
raise ValueError('Unknown operation style: %r' % route_style)
r = self._session.post(url,
headers=headers,
data=body,
stream=stream,
verify=True,
)
if r.status_code >= 500:
raise InternalServerError(r.status_code, r.text)
elif r.status_code == 400:
raise BadInputError(r.text)
elif r.status_code == 401:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raise AuthError(r.json())
elif r.status_code == 429:
# TODO(kelkabany): Use backoff if provided in response.
raise RateLimitError()
elif 200 <= r.status_code <= 299:
if route_style == self.ROUTE_STYLE_DOWNLOAD:
raw_resp = r.headers['dropbox-api-result']
else:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raw_resp = r.content.decode('utf-8')
if route_style == self.ROUTE_STYLE_DOWNLOAD:
return RouteResult(raw_resp, r)
else:
return RouteResult(raw_resp)
elif r.status_code in (403, 404, 409):
raw_resp = r.content.decode('utf-8')
return RouteErrorResult(raw_resp)
else:
raise HttpError(r.status_code, r.text)
def _get_route_url(self, hostname, route_name):
"""Returns the URL of the route.
:param str hostname: Hostname to make the request to.
:param str route_name: Name of the route.
:rtype: str
"""
return 'https://{hostname}/{version}/{route_name}'.format(
hostname=hostname,
version=Dropbox.API_VERSION,
route_name=route_name,
)
def _save_body_to_file(self, download_path, http_resp, chunksize=2**16):
"""
Saves the body of an HTTP response to a file.
:param str download_path: Local path to save data to.
:param http_resp: The HTTP response whose body will be saved.
:type http_resp: :class:`requests.models.Response`
:rtype: None
"""
with open(download_path, 'wb') as f:
with contextlib.closing(http_resp):
for c in http_resp.iter_content(chunksize):
f.write(c)
|
[
"mhicks34@gatech.edu"
] |
mhicks34@gatech.edu
|
8469347fb48f63964d990558280d45cfb929ffc9
|
c237dfae82e07e606ba9385b336af8173d01b251
|
/lib/python/ZPublisher/Client.py
|
dba20da517921245ef45d6e504060b5b852fa055
|
[
"ZPL-2.0"
] |
permissive
|
OS2World/APP-SERVER-Zope
|
242e0eec294bfb1ac4e6fa715ed423dd2b3ea6ff
|
dedc799bd7eda913ffc45da43507abe2fa5113be
|
refs/heads/master
| 2020-05-09T18:29:47.818789
| 2014-11-07T01:48:29
| 2014-11-07T01:48:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,145
|
py
|
#!/bin/sh
""":"
exec python $0 ${1+"$@"}
"""
#"
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Bobo call interface
This module provides tools for accessing web objects as if they were
functions or objects with methods. It also provides a simple call function
that allows one to simply make a single web request.
Function -- Function-like objects that return both header and body
data when called.
Object -- Treat a URL as a web object with methods
call -- Simple interface to call a remote function.
The module also provides a command-line interface for calling objects.
"""
__version__='$Revision: 1.45 $'[11:-2]
import sys, re, socket, mimetools
from httplib import HTTP
from os import getpid
from time import time
from random import random
from base64 import encodestring
from urllib import urlopen, quote
from types import FileType, ListType, DictType, TupleType
from string import translate, maketrans
from urlparse import urlparse
class Function:
username=None
password=None
method=None
timeout=60
def __init__(self,url,
arguments=(),method=None,username=None,password=None,
timeout=None,
**headers):
while url[-1:]=='/': url=url[:-1]
self.url=url
self.headers=headers
if not headers.has_key('Host') and not headers.has_key('host'):
headers['Host']=urlparse(url)[1]
self.func_name=url[url.rfind('/')+1:]
self.__dict__['__name__']=self.func_name
self.func_defaults=()
self.args=arguments
if method is not None: self.method=method
if username is not None: self.username=username
if password is not None: self.password=password
if timeout is not None: self.timeout=timeout
mo = urlregex.match(url)
if mo is not None:
host,port,rurl=mo.group(1,2,3)
if port: port=int(port[1:])
else: port=80
self.host=host
self.port=port
rurl=rurl or '/'
self.rurl=rurl
else: raise ValueError, url
def __call__(self,*args,**kw):
method=self.method
if method=='PUT' and len(args)==1 and not kw:
query=[args[0]]
args=()
else:
query=[]
for i in range(len(args)):
try:
k=self.args[i]
if kw.has_key(k): raise TypeError, 'Keyword arg redefined'
kw[k]=args[i]
except IndexError: raise TypeError, 'Too many arguments'
headers={}
for k, v in self.headers.items(): headers[translate(k,dashtrans)]=v
method=self.method
if headers.has_key('Content-Type'):
content_type=headers['Content-Type']
if content_type=='multipart/form-data':
return self._mp_call(kw)
else:
content_type=None
if not method or method=='POST':
for v in kw.values():
if hasattr(v,'read'): return self._mp_call(kw)
can_marshal=type2marshal.has_key
for k,v in kw.items():
t=type(v)
if can_marshal(t): q=type2marshal[t](k,v)
else: q='%s=%s' % (k,quote(v))
query.append(q)
url=self.rurl
if query:
query='&'.join(query)
method=method or 'POST'
if method == 'PUT':
headers['Content-Length']=str(len(query))
if method != 'POST':
url="%s?%s" % (url,query)
query=''
elif not content_type:
headers['Content-Type']='application/x-www-form-urlencoded'
headers['Content-Length']=str(len(query))
else: method=method or 'GET'
if (self.username and self.password and
not headers.has_key('Authorization')):
headers['Authorization']=(
"Basic %s" %
encodestring('%s:%s' % (self.username,self.password)).replace(
'\012','')
)
try:
h=HTTP()
h.connect(self.host, self.port)
h.putrequest(method, self.rurl)
for hn,hv in headers.items():
h.putheader(translate(hn,dashtrans),hv)
h.endheaders()
if query: h.send(query)
ec,em,headers=h.getreply()
response =h.getfile().read()
except:
raise NotAvailable, RemoteException(
NotAvailable,sys.exc_info()[1],self.url,query)
if (ec - (ec % 100)) == 200:
return (headers,response)
self.handleError(query, ec, em, headers, response)
def handleError(self, query, ec, em, headers, response):
try: v=headers.dict['bobo-exception-value']
except: v=ec
try: f=headers.dict['bobo-exception-file']
except: f='Unknown'
try: l=headers.dict['bobo-exception-line']
except: l='Unknown'
try: t=exceptmap[headers.dict['bobo-exception-type']]
except:
if ec >= 400 and ec < 500: t=NotFound
elif ec == 503: t=NotAvailable
else: t=ServerError
raise t, RemoteException(t,v,f,l,self.url,query,ec,em,response)
def _mp_call(self,kw,
type2suffix={
type(1.0): ':float',
type(1): ':int',
type(1L): ':long',
type([]): ':list',
type(()): ':tuple',
}
):
# Call a function using the file-upload protcol
# Add type markers to special values:
d={}
special_type=type2suffix.has_key
for k,v in kw.items():
if ':' not in k:
t=type(v)
if special_type(t): d['%s%s' % (k,type2suffix[t])]=v
else: d[k]=v
else: d[k]=v
rq=[('POST %s HTTP/1.0' % self.rurl),]
for n,v in self.headers.items():
rq.append('%s: %s' % (n,v))
if self.username and self.password:
c=encodestring('%s:%s' % (self.username,self.password)).replace('\012','')
rq.append('Authorization: Basic %s' % c)
rq.append(MultiPart(d).render())
rq='\r\n'.join(rq)
try:
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect((self.host,self.port))
sock.send(rq)
reply=sock.makefile('rb')
sock=None
line=reply.readline()
try:
[ver, ec, em] = line.split(None, 2)
except ValueError:
raise 'BadReply','Bad reply from server: '+line
if ver[:5] != 'HTTP/':
raise 'BadReply','Bad reply from server: '+line
ec=int(ec)
em=em.strip()
headers=mimetools.Message(reply,0)
response=reply.read()
finally:
if 0:
raise NotAvailable, (
RemoteException(NotAvailable,sys.exc_info()[1],
self.url,'<MultiPart Form>'))
if ec==200: return (headers,response)
self.handleError('', ec, em, headers, response)
class Object:
"""Surrogate object for an object on the web"""
username=None
password=None
method=None
timeout=None
special_methods= 'GET','POST','PUT'
def __init__(self, url,
method=None,username=None,password=None,
timeout=None,
**headers):
self.url=url
self.headers=headers
if not headers.has_key('Host') and not headers.has_key('host'):
headers['Host']=urlparse(url)[1]
if method is not None: self.method=method
if username is not None: self.username=username
if password is not None: self.password=password
if timeout is not None: self.timeout=timeout
def __getattr__(self, name):
if name in self.special_methods:
method=name
url=self.url
else:
method=self.method
url="%s/%s" % (self.url, name)
f=Function(url,
method=method,
username=self.username,
password=self.password,
timeout=self.timeout)
f.headers=self.headers
return f
def call(url,username=None, password=None, **kw):
return apply(Function(url,username=username, password=password), (), kw)
##############################################################################
# Implementation details below here
urlregex=re.compile(r'http://([^:/]+)(:[0-9]+)?(/.+)?', re.I)
dashtrans=maketrans('_','-')
def marshal_float(n,f): return '%s:float=%s' % (n,f)
def marshal_int(n,f): return '%s:int=%s' % (n,f)
def marshal_long(n,f):
value = '%s:long=%s' % (n, f)
if value[-1] == 'L':
value = value[:-1]
return value
def marshal_list(n,l,tname='list', lt=type([]), tt=type(())):
r=[]
for v in l:
t=type(v)
if t is lt or t is tt:
raise TypeError, 'Invalid recursion in data to be marshaled.'
r.append(marshal_whatever("%s:%s" % (n,tname) ,v))
return '&'.join(r)
def marshal_tuple(n,l):
return marshal_list(n,l,'tuple')
type2marshal={
type(1.0): marshal_float,
type(1): marshal_int,
type(1L): marshal_long,
type([]): marshal_list,
type(()): marshal_tuple,
}
def marshal_whatever(k,v):
try: q=type2marshal[type(v)](k,v)
except KeyError: q='%s=%s' % (k,quote(str(v)))
return q
def querify(items):
query=[]
for k,v in items: query.append(marshal_whatever(k,v))
return query and '&'.join(query) or ''
NotFound ='bci.NotFound'
InternalError='bci.InternalError'
BadRequest ='bci.BadRequest'
Unauthorized ='bci.Unauthorized'
ServerError ='bci.ServerError'
NotAvailable ='bci.NotAvailable'
exceptmap ={'AttributeError' :AttributeError,
'BadRequest' :BadRequest,
'EOFError' :EOFError,
'IOError' :IOError,
'ImportError' :ImportError,
'IndexError' :IndexError,
'InternalError' :InternalError,
'KeyError' :KeyError,
'MemoryError' :MemoryError,
'NameError' :NameError,
'NotAvailable' :NotAvailable,
'NotFound' :NotFound,
'OverflowError' :OverflowError,
'RuntimeError' :RuntimeError,
'ServerError' :ServerError,
'SyntaxError' :SyntaxError,
'SystemError' :SystemError,
'SystemExit' :SystemExit,
'TypeError' :TypeError,
'Unauthorized' :Unauthorized,
'ValueError' :ValueError,
'ZeroDivisionError':ZeroDivisionError}
class RemoteException:
def __init__(self,etype=None,evalue=None,efile=None,eline=None,url=None,
query=None,http_code=None,http_msg=None, http_resp=None):
"""Contains information about an exception which
occurs in a remote method call"""
self.exc_type =etype
self.exc_value =evalue
self.exc_file =efile
self.exc_line =eline
self.url =url
self.query =query
self.http_code =http_code
self.http_message=http_msg
self.response =http_resp
def __repr__(self):
return '%s (File: %s Line: %s)\n%s %s for %s' % (
self.exc_value,self.exc_file,self.exc_line,
self.http_code,self.http_message,self.url)
class MultiPart:
def __init__(self,*args):
c=len(args)
if c==1: name,val=None,args[0]
elif c==2: name,val=args[0],args[1]
else: raise ValueError, 'Invalid arguments'
h={'Content-Type': {'_v':''},
'Content-Transfer-Encoding': {'_v':''},
'Content-Disposition': {'_v':''},}
dt=type(val)
b=t=None
if dt==DictType:
t=1
b=self.boundary()
d=[]
h['Content-Type']['_v']='multipart/form-data; boundary=%s' % b
for n,v in val.items():
d.append(MultiPart(n,v))
elif (dt==ListType) or (dt==TupleType):
raise ValueError, 'Sorry, nested multipart is not done yet!'
elif dt==FileType or hasattr(val,'read'):
if hasattr(val,'name'):
fn=val.name.replace( '\\', '/')
fn=fn[(fn.rfind('/')+1):]
ex=(fn[(fn.rfind('.')+1):]).lower()
if self._extmap.has_key(ex):
ct=self._extmap[ex]
else:
ct=self._extmap['']
else:
fn=''
ct=self._extmap[None]
if self._encmap.has_key(ct): ce=self._encmap[ct]
else: ce=''
h['Content-Disposition']['_v'] ='form-data'
h['Content-Disposition']['name'] ='"%s"' % name
h['Content-Disposition']['filename']='"%s"' % fn
h['Content-Transfer-Encoding']['_v']=ce
h['Content-Type']['_v'] =ct
d=[]
l=val.read(8192)
while l:
d.append(l)
l=val.read(8192)
else:
h['Content-Disposition']['_v']='form-data'
h['Content-Disposition']['name']='"%s"' % name
d=[str(val)]
self._headers =h
self._data =d
self._boundary=b
self._top =t
def boundary(self):
return '%s_%s_%s' % (int(time()), getpid(), int(random()*1000000000))
def render(self):
h=self._headers
s=[]
if self._top:
for n,v in h.items():
if v['_v']:
s.append('%s: %s' % (n,v['_v']))
for k in v.keys():
if k != '_v': s.append('; %s=%s' % (k, v[k]))
s.append('\r\n')
p=[]
t=[]
b=self._boundary
for d in self._data: p.append(d.render())
t.append('--%s\n' % b)
t.append(('\n--%s\n' % b).join(p))
t.append('\n--%s--\n' % b)
t=''.join(t)
s.append('Content-Length: %s\r\n\r\n' % len(t))
s.append(t)
return ''.join(s)
else:
for n,v in h.items():
if v['_v']:
s.append('%s: %s' % (n,v['_v']))
for k in v.keys():
if k != '_v': s.append('; %s=%s' % (k, v[k]))
s.append('\r\n')
s.append('\r\n')
if self._boundary:
p=[]
b=self._boundary
for d in self._data: p.append(d.render())
s.append('--%s\n' % b)
s.append(('\n--%s\n' % b).join(p))
s.append('\n--%s--\n' % b)
return ''.join(s)
else:
return ''.join(s+self._data)
_extmap={'': 'text/plain',
'rdb': 'text/plain',
'html': 'text/html',
'dtml': 'text/html',
'htm': 'text/html',
'dtm': 'text/html',
'gif': 'image/gif',
'jpg': 'image/jpeg',
'exe': 'application/octet-stream',
None : 'application/octet-stream',
}
_encmap={'image/gif': 'binary',
'image/jpg': 'binary',
'application/octet-stream': 'binary',
}
def ErrorTypes(code):
if code >= 400 and code < 500: return NotFound
if code >= 500 and code < 600: return ServerError
return 'HTTP_Error_%s' % code
usage="""
Usage: %s [-u username:password] url [name=value ...]
where url is the web resource to call.
The -u option may be used to provide a user name and password.
Optional arguments may be provides as name=value pairs.
In a name value pair, if a name ends in ":file", then the value is
treated as a file name and the file is send using the file-upload
protocol. If the file name is "-", then data are taken from standard
input.
The body of the response is written to standard output.
The headers of the response are written to standard error.
""" % sys.argv[0]
def main():
import getopt
user=None
try:
optlist, args = getopt.getopt(sys.argv[1:],'u:')
url=args[0]
u =filter(lambda o: o[0]=='-u', optlist)
if u:
[user, pw] = u[0][1].split(':')
kw={}
for arg in args[1:]:
[name,v]=arg.split('=')
if name[-5:]==':file':
name=name[:-5]
if v=='-': v=sys.stdin
else: v=open(v, 'rb')
kw[name]=v
except:
print usage
sys.exit(1)
# The "main" program for this module
f=Function(url)
if user: f.username, f.password = user, pw
headers, body = apply(f,(),kw)
sys.stderr.write(''.join(map(lambda h: "%s: %s\n" % h, headers.items()))
+"\n\n")
print body
if __name__ == "__main__":
main()
|
[
"martin@os2world.com"
] |
martin@os2world.com
|
8f8762deb83a2f972b91a97496916672ba78463c
|
672b1583ba4d741a156aa907d1c70c6624d863ea
|
/dataclean.py
|
5fa9205f88d0a82fbaef3ec614b9f8971c831512
|
[] |
no_license
|
IsaDash/Lake-Acidity-Modeling
|
1c4b2e9601650eaec4f5bb7e30a2545926836218
|
82768d109981a2a9fcb827f776ddc4e5c4aadd18
|
refs/heads/master
| 2023-01-31T22:14:53.330143
| 2020-12-14T20:31:40
| 2020-12-14T20:31:40
| 320,958,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
import csv
import pandas as pd
# infile = open("acid.csv", "r")
# outfile = open("lake_data_transformed.csv", "w", newline="")
# in_csv = csv.reader(infile)
# out_csv = csv.writer(outfile)
# for row in in_csv:
# to_add = True
# if row["SIO2_MG_L (silicon dioxicde)"] == None:
# continue
# else:
# out_csv.writerow(row)
# infile.close()
# outfile.close()
acid_data = pd.read_csv('acid.csv')
modified_acid = acid_data.dropna()
modified_acid.to_csv('lake_data_cleaned.csv', index=False)
|
[
"anchald2@illinois.edu"
] |
anchald2@illinois.edu
|
b387790e62a38f3fc59bf502196005745fe5cb11
|
74a71d176b25c4bc8b17bf4a4a0a8e7f2c92feb8
|
/testScripts/noiseAccuracy.py
|
21c872ebf001e7efe29964666589d85b8ce3d219
|
[] |
no_license
|
CharupriyaSharma/eBNSLNoisyOR
|
14421c666af5220059d38f48ff1b843769575e2e
|
3718710a32c8b301823c30c83e9c0cd0218c6c83
|
refs/heads/master
| 2022-12-13T06:02:04.613443
| 2022-12-07T05:05:21
| 2022-12-07T05:05:21
| 264,725,058
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
import sys
import os
noisefile = sys.argv[2]
freg = open(noisefile, "r")
fmerge = open(sys.argv[1] + "_noise_compare", "w+")
#get var count
n= int(sys.argv[3])
realNoise = [0]*n
computedNoise = [0]*n
error = [0]*n
for i in range(n):
#get scores for ith variable
item = freg.readline()
items = [float(it) for it in item.split()]
scorecount = 0
if len(items)>1:
scorecount = int(len(items)/2)
print(items)
print(scorecount)
if scorecount > 0 :
for j in range(scorecount):
print("scoring node : ")
print(items[0])
realNoise[int(items[2*j+1])] = items[2*j+2]
# print(str(int(items[2*j+1])) + ":" +str(items[2*j+2]))
#read moise from noisy-or file for ithvariable
nifile = sys.argv[1] + "_" + str(int(items[0])) + "_20_noise"
print("opening " + nifile)
fno = open(nifile)
f2=""
if os.stat(nifile).st_size > 0 :
f2 = fno.readlines()
for line in f2 :
print ("computed " + line)
items1 = [float(it) for it in line.split()]
if int(items1[0]) > 0 :
paracount = int(items1[0])
for k in range(paracount):
print(str(int(items1[k+1])) + ":" + str(items1[paracount+1+ k]))
print(int(items1[k+1]))
currenterr = abs(computedNoise[int(items1[k+1])] - realNoise[int(items1[k+1])])
newerr = abs(items1[paracount+1+ k] - realNoise[int(items1[k+1])])
print(abs(newerr < currenterr))
if computedNoise[int(items1[k+1])] == 0 or abs(newerr < currenterr):
computedNoise[int(items1[k+1])] = items1[paracount+1+ k]
fno.close()
#write merged scores to file
err = 0
errctr = 0
for i in range(n):
if realNoise[i] >0 :
err1 = abs(realNoise[i]-computedNoise[i])
errctr+=1
err+=err1
error[i] = err1
fmerge.write(str(i) + " " + str(realNoise[i]) + " " + str(computedNoise[i])+ " " + str(error[i]) + "\n")
if errctr > 0 :
fmerge.write("error : " + str(err/errctr)+ "\n")
freg.close()
fmerge.close()
er =open("errors", "a+")
er.write(sys.argv[1] + " :" + str(err/errctr) + "\n")
er.close()
|
[
"charupriyasharma@Charupriyas-MacBook-Pro.local"
] |
charupriyasharma@Charupriyas-MacBook-Pro.local
|
59e5cf0f81cff22c4432f07f3fb01378f5ceed91
|
18e4fffb093c8bb1fdbf59801bfa24345ce0f7cc
|
/test_ping.py
|
8c01d793c34486e4ec363388c66deb5ad6bd58f6
|
[] |
no_license
|
Nikisn/Check_config
|
fccd0d33f87a004853a5128cbbfd179c610ca8f7
|
8a7b0dc793570f0bfb84cc0029ef616f8751bf20
|
refs/heads/master
| 2023-01-22T01:13:00.463038
| 2020-12-03T12:58:46
| 2020-12-03T12:58:46
| 271,091,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,974
|
py
|
from netmiko import ConnectHandler
from textfsm import clitable
import yaml
with open ("hosts.yaml") as f:
ping_src = yaml.load(f)
ping_dst = {
'r1':'ping 1.1.1.1 -c 4',
'r2':'ping 2.2.2.2 -c 4',
'r3':'ping 3.3.3.3 -c 4',
'r4':'ping 4.4.4.4 -c 4',
'r5':'ping 5.5.5.5 -c 4',
'r6':'ping 6.6.6.6 -c 4',
'r7':'ping 7.7.7.7 -c 4'
}
from_ping = input('Введите ОТ какого хоста слать ICMP-пакеты (r1,r2...)\n')
to_ping = input('Введите ДО какого хоста слать ICMP-пакеты (r1,r2...)\n')
cli_table = clitable.CliTable('index', 'TextFSM_templates')
attributes = {'Command':ping_dst[to_ping], 'Vendor': 'Cisco'}
device_params = {
'device_type': 'linux',
'ip': '127.0.0.1',
'port': ping_src[from_ping],
'username': 'sysadmin',
'password': '123'}
with ConnectHandler(**device_params) as ssh:
ping = ssh.send_command(ping_dst[to_ping])
cli_table.ParseCmd(ping,attributes)
data_rows = [list(row) for row in cli_table]
if data_rows[0][0] == '4' and data_rows[0][1] == '0':
print(f'PING ОТ {from_ping} ДО {to_ping} УСПЕШНО!')
print('РЕЗУЛЬТАТ:\n', cli_table.FormattedTable())
else:
print(f'ОТ {from_ping} ДО {to_ping} ОТСУТСТВИЕ ОТВЕТА!')
print('РЕЗУЛЬТАТ:\n', cli_table.FormattedTable())
#cfg_file='r1.txt'
#command = ['hostname R1','interface enp0s8','ip address 22.1.3.1/24', 'ip ospf message-digest-key 1 md5 123']
#user = 'sysadmin'
#password = '123'
#enable_pass = '123'
#ip = '127.0.0.1'
#
#devices_port = {'r1':2222,'r2':2200,'r3':2201,'r4':2202,'r5':2203,'r6':2204,
# 'r7':2205,
# }
#
#
#print('connection to device')
#device_params = {
# 'device_type': 'linux',
# 'ip': ip,
# 'port': 2201,
# 'username': user,
# 'password': password}
#
#"подключение и снятие команды"
#
#with ConnectHandler(**device_params) as ssh:
# ping = ssh.send_command('ping 2.2.2.2 -c 4')
#
# with open ('test.txt', 'w') as dest:
# for line in ping:
# t = dest.write(line)
# print(type(t))
#
#"работа с файлом пинга"
#
#a = ''
#with open ('test.txt', 'r') as src:
# for line in src:
# if line.startswith('4'):
#
# send, rec, loss, _ = line.split(',')
#
#
#print(f'''
# Отправлено: {send}
# Доставлено: {rec}
# Потери: {loss}''')
#
#
#
#"""
#ssh.enable() f"{name}.txt
#'session_log': "output.txt"
#
#"""
|
[
"noreply@github.com"
] |
Nikisn.noreply@github.com
|
6cc1542064216d2c36184802c5ba5aaf719fec2f
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp-with-texts/ALCATEL-IND1-E-SERVICE-MIB.py
|
6ac2637b7fc0d24af9f67b9e0d9c926639877700
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395
| 2019-08-16T15:51:41
| 2019-08-16T15:53:57
| 237,512,469
| 0
| 0
|
Apache-2.0
| 2020-01-31T20:41:36
| 2020-01-31T20:41:35
| null |
UTF-8
|
Python
| false
| false
| 47,663
|
py
|
#
# PySNMP MIB module ALCATEL-IND1-E-SERVICE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ALCATEL-IND1-E-SERVICE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:17:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
softentIND1eService, = mibBuilder.importSymbols("ALCATEL-IND1-BASE", "softentIND1eService")
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Gauge32, Counter32, iso, NotificationType, ModuleIdentity, ObjectIdentity, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Counter64, Unsigned32, Integer32, TimeTicks, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Counter32", "iso", "NotificationType", "ModuleIdentity", "ObjectIdentity", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Counter64", "Unsigned32", "Integer32", "TimeTicks", "Bits")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
alcatelIND1EServiceMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1))
if mibBuilder.loadTexts: alcatelIND1EServiceMIB.setLastUpdated('200705230000Z')
if mibBuilder.loadTexts: alcatelIND1EServiceMIB.setOrganization('Alcatel-Lucent')
if mibBuilder.loadTexts: alcatelIND1EServiceMIB.setContactInfo('Please consult with Customer Service to ensure the most appropriate version of this document is used with the products in question: Alcatel-Lucent, Enterprise Solutions Division (Formerly Alcatel Internetworking, Incorporated) 26801 West Agoura Road Agoura Hills, CA 91301-5122 United States Of America Telephone: North America +1 800 995 2696 Latin America +1 877 919 9526 Europe +31 23 556 0100 Asia +65 394 7933 All Other +1 818 878 4507 Electronic Mail: support@ind.alcatel.com World Wide Web: http://alcatel-lucent.com/wps/portal/enterprise File Transfer Protocol: ftp://ftp.ind.alcatel.com/pub/products/mibs')
if mibBuilder.loadTexts: alcatelIND1EServiceMIB.setDescription('The parameters for configuration of the E-Service feature. The right to make changes in specification and other information contained in this document without prior notice is reserved. No liability shall be assumed for any incidental, indirect, special, or consequential damages whatsoever arising from or related to this document or the information contained herein. Vendors, end-users, and other interested parties are granted non-exclusive license to use this specification in connection with management of the products for which it is intended to be used. Copyright (C) 1995-2006 Alcatel-Lucent ALL RIGHTS RESERVED WORLDWIDE')
class AlaEServiceUNIProfileProtocolTreatment(TextualConvention, Integer32):
description = 'The behavior of the bridge in regards to the given protocols packets received on the UNI. Tunnel (1) enables the packets to be tunneled across the provider network. Discard (2) causes the packets to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("tunnel", 1), ("drop", 2), ("peer", 3))
alcatelIND1eServiceMIBObjects = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1))
if mibBuilder.loadTexts: alcatelIND1eServiceMIBObjects.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1eServiceMIBObjects.setDescription('Branch For E-Service Managed Objects.')
alcatelIND1EServiceMIBConformance = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2))
if mibBuilder.loadTexts: alcatelIND1EServiceMIBConformance.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1EServiceMIBConformance.setDescription('Branch For E-Service Conformance Information.')
alcatelIND1EServiceMIBGroups = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1))
if mibBuilder.loadTexts: alcatelIND1EServiceMIBGroups.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1EServiceMIBGroups.setDescription('Branch For E-Service Units Of Conformance.')
alcatelIND1EServiceMIBCompliances = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 2))
if mibBuilder.loadTexts: alcatelIND1EServiceMIBCompliances.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1EServiceMIBCompliances.setDescription('Branch For E-Service Compliance Statements.')
alaEService = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1))
alaEServiceInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 1))
alaEServiceMode = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("legacyMode", 1), ("eServiceMode", 2))).clone('legacyMode')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alaEServiceMode.setStatus('current')
if mibBuilder.loadTexts: alaEServiceMode.setDescription('The current mode configured for Vlan Stacking and Layer 2 tunnel configuration. legacyMode (1) indicates that the commands from AlcatelIND1VLANStacking.mib are to be used. eServiceMode (2) indicates the commands from this MIB are to be used.')
alaEServiceSapProfileTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2), )
if mibBuilder.loadTexts: alaEServiceSapProfileTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileTable.setDescription('A table that contains service profiles containing performance and control attributes. An entry in this table is created when a new service profile is defined.')
alaEServiceSapProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1), ).setIndexNames((1, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileID"))
if mibBuilder.loadTexts: alaEServiceSapProfileEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileEntry.setDescription('A E-Service Service Profile entry.')
alaEServiceSapProfileID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 31)))
if mibBuilder.loadTexts: alaEServiceSapProfileID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileID.setDescription('A label given to uniquely identify this profile. Must be at least one character long.')
alaEServiceSapProfileCVLANTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("stackSVLAN", 1), ("translate", 2), ("changeCVLAN", 3))).clone('stackSVLAN')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileCVLANTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileCVLANTreatment.setDescription('The type of VLAN stacking operation to be performed on a customer frame entering this service. Stack Svlan (1) indicates that the SVLAN is to be pre-pended on the frame before any existing 802.1Q tag. Translate (2) means to replace the existing 802.1Q tag with the SVLAN. Change CVLAN (3) indicates that the customer tag is to remain on the frame but its value is to be changed to the supplied value.')
alaEServiceSapProfileReplacementCVLAN = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileReplacementCVLAN.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileReplacementCVLAN.setDescription('The CVLAN ID to use when using the Change CVLAN treatment mode.')
alaEServiceSapProfilePriorityMapMode = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("notAssigned", 0), ("mapInnerPtoOuterP", 1), ("mapInnerDscpToOuterP", 2), ("fixedP", 3))).clone('fixedP')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfilePriorityMapMode.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfilePriorityMapMode.setDescription('This object describes the source of the value for the priority field of the SVLAN 802.1Q tag when pre-pended to the customer data frame.NotAssigned(0), MapInnerPtoOuterP (1) uses the priority field of the incoming frame when tagged to fill in the priority field of the SVLAN tag. mapInnerDscpToOuterP (2) uses the frames priority bits in its IP DSCP field to fill in the priority field of the SVLAN tag. FixedP (3) uses the supplied FixedPriorityValue to fill in the SVLAN tag priority bits.')
alaEServiceSapProfileFixedPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileFixedPriority.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileFixedPriority.setDescription('This object describes the value of the priority field of the 802.1Q SVLAN tag pre-pended to customer data frames when the fixed priority mapping mode is selected.')
alaEServiceSapProfileIngressBW = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 6), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileIngressBW.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileIngressBW.setDescription('This object describes this limit of ingress bandwidth for the traffic to which this profile is applied. If 0, no bandwidth limit is applied. This number represents traffic in units of 1,000,000 bits per second. Note that all CVLAN that belong to this SAP will share this aggregated limit.')
alaEServiceSapProfileBandwidthShare = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notApplicable", 0), ("shared", 1), ("notShared", 2))).clone('shared')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileBandwidthShare.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileBandwidthShare.setDescription('This object describes the use of the bandwidth limit in how it is applied across multiple ports of the SAP. If set to notApplicable(0), the SAP is not used. If set to Shared (1), all the ports that are part of the SAP will use aggregated bandwidth, sharing some part of the bandwidth limit. If set to notShared (2), each port will use its own bandwidth meter for this SAP. This value is not used if ingressBandwidth is 0.')
alaEServiceSapProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileRowStatus.setDescription('The status of this table entry.')
alaEServiceSapProfileEgressBW = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 9), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileEgressBW.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileEgressBW.setDescription('This object describes this limit of egress bandwidth for each UNI of the SAP to which this profile is applied. If 0, no bandwidth limit is applied. This number represents traffic in units of Megabits per second. Note that all CVLAN that belong to this SAP will share this aggregated limit.')
alaEServiceUNIProfileTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3), )
if mibBuilder.loadTexts: alaEServiceUNIProfileTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileTable.setDescription('A table that contains service profiles containing performance and control attributes. An entry in this table is created when a new service profile is defined.')
alaEServiceUNIProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1), ).setIndexNames((1, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileID"))
if mibBuilder.loadTexts: alaEServiceUNIProfileEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileEntry.setDescription('A E-Service Service Profile entry.')
alaEServiceUNIProfileID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 31)))
if mibBuilder.loadTexts: alaEServiceUNIProfileID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileID.setDescription('A label given to uniquely identify this profile. Must be at least one character long.')
alaEServiceUNIProfileStpBpduTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 2), AlaEServiceUNIProfileProtocolTreatment().clone('tunnel')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfileStpBpduTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileStpBpduTreatment.setDescription('This object describes the behavior of the bridge in regards to the spanning tree protocol BPDU received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currnetly Peer is not supported for Spanning Tree')
alaEServiceUNIProfile8021xTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 3), AlaEServiceUNIProfileProtocolTreatment().clone('drop')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfile8021xTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfile8021xTreatment.setDescription('This object describes the behavior of the bridge in regards to the IEEE 802.1x PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currnetly only drop is supported')
alaEServiceUNIProfile8021ABTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 4), AlaEServiceUNIProfileProtocolTreatment().clone('drop')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfile8021ABTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfile8021ABTreatment.setDescription('This object describes the behavior of the bridge in regards to the IEEE 802.1AB PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currently only drop is supported')
alaEServiceUNIProfile8023adTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 5), AlaEServiceUNIProfileProtocolTreatment().clone('peer')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfile8023adTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfile8023adTreatment.setDescription('This object describes the behavior of the bridge in regards to the IEEE 802.1ad PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currently only peer is supported')
alaEServiceUNIProfileGvrpTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 6), AlaEServiceUNIProfileProtocolTreatment().clone('tunnel')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfileGvrpTreatment.setStatus('deprecated')
if mibBuilder.loadTexts: alaEServiceUNIProfileGvrpTreatment.setDescription('This object describes the behavior of the bridge in regards to the GVRP PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currently peer is not supported for GVRP')
alaEServiceUNIProfileAmapTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 7), AlaEServiceUNIProfileProtocolTreatment().clone('drop')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfileAmapTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileAmapTreatment.setDescription('This object describes the behavior of the bridge in regards to the Alcatel propietary AMAP PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currently drop is only supported')
alaEServiceUNIProfileMvrpTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 8), AlaEServiceUNIProfileProtocolTreatment().clone('tunnel')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfileMvrpTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileMvrpTreatment.setDescription('This object describes the behavior of the bridge in regards to the MVRP PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currently peer is not supported for MVRP')
alaEServiceUNIProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfileRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileRowStatus.setDescription('The status of this table entry.')
alaEServiceTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4), )
if mibBuilder.loadTexts: alaEServiceTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceTable.setDescription('A table that contains the services and their assigned SVLAN for the E-Service feature.')
alaEServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4, 1), ).setIndexNames((1, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceID"))
if mibBuilder.loadTexts: alaEServiceEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceEntry.setDescription('The svlan/ipmvlan-port association.')
alaEServiceID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32)))
if mibBuilder.loadTexts: alaEServiceID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceID.setDescription('A label given to uniquely identify this Service. Must be at least one character long.')
alaEServiceSVLAN = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSVLAN.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSVLAN.setDescription('The SVLAN number of the SVLAN chosen to the be transport for this service.')
alaEServiceVlanType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("unknown", 0), ("svlan", 1), ("ipmvlan", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceVlanType.setStatus('current')
if mibBuilder.loadTexts: alaEServiceVlanType.setDescription('The type of the vlan this service is going to attach to. When creating the service, the type should match the vlanId specified in the request.')
alaEServiceRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a service. When creating or deleting the service, the user needs to provide both the svlan and the vlantype objects.')
alaEServiceSapTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5), )
if mibBuilder.loadTexts: alaEServiceSapTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapTable.setDescription("A table that contains the Service Access Points (Sap) listed by ID. This table is used to create, delete, and modify the SAP's profile")
alaEServiceSapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5, 1), ).setIndexNames((0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapID"))
if mibBuilder.loadTexts: alaEServiceSapEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapEntry.setDescription('The list of SAP.')
alaEServiceSapID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: alaEServiceSapID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapID.setDescription('A Number given to uniquely identify the SAP.')
alaEServiceSapServiceID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapServiceID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapServiceID.setDescription('A label given to uniquely identify the Service this SAP is for. Must be at least one character long.')
alaEServiceSapProfile = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfile.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfile.setDescription('The string identifying the SAP Profile this sap is to use. If specified, must match an existing SAP Profile.')
alaEServiceSapRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a sap. When creating the sap, the user needs to provide the service name in the same set request.')
alaEServiceSapCvlanTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6), )
if mibBuilder.loadTexts: alaEServiceSapCvlanTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanTable.setDescription('A table that contains the Service Access Points (Sap) where the CVLANs are bound to their service.')
alaEServiceSapCvlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6, 1), ).setIndexNames((0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapCvlanSapID"), (0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapCvlanCvlan"))
if mibBuilder.loadTexts: alaEServiceSapCvlanEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanEntry.setDescription('The CVLAN to Sap binding.')
alaEServiceSapCvlanSapID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: alaEServiceSapCvlanSapID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanSapID.setDescription('A Number given to uniquely identify this SAP.')
alaEServiceSapCvlanCvlan = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4094)))
if mibBuilder.loadTexts: alaEServiceSapCvlanCvlan.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanCvlan.setDescription('This object is the CVLAN ID that this binding is targeted at. The CVLAN ID may be 0, which indicates an all or untagged only mapping type.')
alaEServiceSapCvlanMapType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("single", 1), ("all", 2), ("untaggedOnly", 3))).clone('single')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapCvlanMapType.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanMapType.setDescription('This object is the mapping type that defines what CVLANs are mapped into this service. Multiple mappings can be defined for CVLAN to service, however only one all (2) or untaggedOnly (3) mapping entry can be created per UNI. A mapping type of Single (1) denotes a specific CVLAN value to bind to the service. A mapping type of All (2) denotes that all customer frames that do not map to any other SAP, will be mapped into this service. A mapping type of Untagged (3) denotes that only the untagged frames will be mapped into this service.')
alaEServiceSapCvlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapCvlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a SAP.')
alaEServicePortTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7), )
if mibBuilder.loadTexts: alaEServicePortTable.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortTable.setDescription('A table that contains the ports used by the EService feature. Both UNI and NNI are listed here.')
alaEServicePortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1), ).setIndexNames((0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortID"))
if mibBuilder.loadTexts: alaEServicePortEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortEntry.setDescription('The list of ports being used by EService.')
alaEServicePortID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: alaEServicePortID.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortID.setDescription('The IfIndex of this UNI or NNI Port.')
alaEServicePortType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3))).clone(namedValues=NamedValues(("uni", 1), ("nni", 3))).clone('uni')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortType.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortType.setDescription('The type of port for Vlan Stacking operation. uni (1) represents a customer facing port on which traffic may enter the E-Service. nni (2) respresents a provider network port over which the E-Service may be connected.')
alaEServicePortVendorTpid = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 3), Integer32().clone(33024)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortVendorTpid.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortVendorTpid.setDescription('he TPID for this port if type is NNI. It is used for the incoming data traffic parsing and it is substituted to the 802.1Q standard Tpid for the outgoing data traffic. This is used for compatibility with other vendor equipment. The default value is the standard value 0x8100.')
alaEServicePortLegacyStpBpdu = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notApplicable", 0), ("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortLegacyStpBpdu.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortLegacyStpBpdu.setDescription('The legacy STP BPDU treatment for this port if NNI. It defines the type of processing applied to STP legacy BPDUs on network ports. Legacy BPDU refer to conventional/customer BPDUs with MAC address 01:80:c2:00:00:00 and its processing on network ports can be enabled/disabled by this object.By default the value is disabled i.e provider MAC BPDU with MAC address 01:80:c2:00:00:08 would be processed at network ports.')
alaEServicePortLegacyGvrpPdu = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notApplicable", 0), ("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortLegacyGvrpPdu.setStatus('deprecated')
if mibBuilder.loadTexts: alaEServicePortLegacyGvrpPdu.setDescription('The legacy GVRP PDU treatment for this port if NNI. It defines the type of processing applied to GVRP PDUs on network ports. ')
alaEServicePortUniProfile = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 6), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortUniProfile.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortUniProfile.setDescription('The label of an existing UNI profile that which contains various properties to be applied to this port if UNI.')
alaEServicePortTransBridging = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortTransBridging.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortTransBridging.setDescription('The Transparent Bridging status for the nni Port.')
alaEServicePortLegacyMvrpPdu = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notApplicable", 0), ("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortLegacyMvrpPdu.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortLegacyMvrpPdu.setDescription('The legacy MVRP PDU treatment for this port if NNI. It defines the type of processing applied to MVRP PDUs on network ports. ')
alaEServicePortRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a binding')
alaEServiceSapUniTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 8), )
if mibBuilder.loadTexts: alaEServiceSapUniTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniTable.setDescription('A table that contains the UNI that are bound to each SAP for classifying traffic into each EService. Not that writing to this table may create a new UNI.')
alaEServiceSapUniEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 8, 1), ).setIndexNames((0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapUniSap"), (0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapUniUni"))
if mibBuilder.loadTexts: alaEServiceSapUniEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniEntry.setDescription('The list of SAP-UNI bindings being used by EService.')
alaEServiceSapUniSap = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: alaEServiceSapUniSap.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniSap.setDescription('The SAP ID that is configured onto this port.')
alaEServiceSapUniUni = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 8, 1, 2), InterfaceIndex())
if mibBuilder.loadTexts: alaEServiceSapUniUni.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniUni.setDescription('The IfIndex of this UNI Port.')
alaEServiceSapUniRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 8, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapUniRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a binding')
alaEServiceNniSvlanTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9), )
if mibBuilder.loadTexts: alaEServiceNniSvlanTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanTable.setDescription('A table that contains the SVLANs bound to each NNI for use by the EService feature.')
alaEServiceNniSvlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9, 1), ).setIndexNames((0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceNniSvlanNni"), (0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceNniSvlanSvlan"))
if mibBuilder.loadTexts: alaEServiceNniSvlanEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanEntry.setDescription('The list of NNI-SVLAN bindings being used by EService.')
alaEServiceNniSvlanNni = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: alaEServiceNniSvlanNni.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanNni.setDescription('The IfIndex of this NNI Port.')
alaEServiceNniSvlanSvlan = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4094)))
if mibBuilder.loadTexts: alaEServiceNniSvlanSvlan.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanSvlan.setDescription('The SVLAN bound to this port. SVLAN cannot be 1.')
alaEServiceNniSvlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceNniSvlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a binding')
alaEServiceNniSvlanVpaType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("stp", 1), ("erp", 2))).clone('stp')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceNniSvlanVpaType.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanVpaType.setDescription('The object is used to specify whether the VPA state is to be controlled by an ERP or a STP. By default VPA state is controlled by STP.')
alcatelIND1EServiceMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 2, 1)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapUniGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapCvlanGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceNniSvlanGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceInfoGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alcatelIND1EServiceMIBCompliance = alcatelIND1EServiceMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1EServiceMIBCompliance.setDescription('Compliance statement for E-Service.')
alaEServiceSapProfileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 1)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileCVLANTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileReplacementCVLAN"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfilePriorityMapMode"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileFixedPriority"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileIngressBW"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileBandwidthShare"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileRowStatus"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileEgressBW"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceSapProfileGroup = alaEServiceSapProfileGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileGroup.setDescription('Collection of objects for management of E-Service Sap Profiles.')
alaEServiceUNIProfileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 2)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileStpBpduTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfile8021xTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfile8021ABTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfile8023adTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileGvrpTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileAmapTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileMvrpTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceUNIProfileGroup = alaEServiceUNIProfileGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileGroup.setDescription('Collection of objects for management of EService UNI Profiles.')
alaEServiceGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 3)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSVLAN"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceVlanType"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceGroup = alaEServiceGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceGroup.setDescription('Collection of objects for management of E-Services.')
alaEServiceSapGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 4)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapServiceID"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfile"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceSapGroup = alaEServiceSapGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapGroup.setDescription('Collection of objects for management of E-Service SAPs.')
alaEServiceSapCvlanGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 5)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapCvlanMapType"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapCvlanRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceSapCvlanGroup = alaEServiceSapCvlanGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanGroup.setDescription('Collection of objects for management of E-Service SAP CVLAN bindings.')
alaEServicePortGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 6)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortType"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortVendorTpid"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortLegacyStpBpdu"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortLegacyGvrpPdu"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortUniProfile"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortTransBridging"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortLegacyMvrpPdu"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServicePortGroup = alaEServicePortGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortGroup.setDescription('Collection of objects for management of E-Service Ports.')
alaEServiceSapUniGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 7)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapUniRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceSapUniGroup = alaEServiceSapUniGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniGroup.setDescription('Collection of objects for management of E-Service SAP to UNI Binding.')
alaEServiceNniSvlanGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 8)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceNniSvlanRowStatus"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceNniSvlanVpaType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceNniSvlanGroup = alaEServiceNniSvlanGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanGroup.setDescription('Collection of objects for management of E-Service SVLAN to NNI Binding.')
alaEServiceInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 9)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceMode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceInfoGroup = alaEServiceInfoGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceInfoGroup.setDescription('Collection of objects for management of E-Service Info Binding.')
mibBuilder.exportSymbols("ALCATEL-IND1-E-SERVICE-MIB", alaEServiceUNIProfile8021xTreatment=alaEServiceUNIProfile8021xTreatment, alaEServiceNniSvlanEntry=alaEServiceNniSvlanEntry, alaEServiceUNIProfileGroup=alaEServiceUNIProfileGroup, alaEServicePortTable=alaEServicePortTable, alaEServiceUNIProfileAmapTreatment=alaEServiceUNIProfileAmapTreatment, alaEServiceTable=alaEServiceTable, alcatelIND1EServiceMIB=alcatelIND1EServiceMIB, alaEServiceSapCvlanCvlan=alaEServiceSapCvlanCvlan, alcatelIND1EServiceMIBCompliance=alcatelIND1EServiceMIBCompliance, alaEServiceUNIProfileTable=alaEServiceUNIProfileTable, alaEServicePortType=alaEServicePortType, alaEServiceUNIProfileMvrpTreatment=alaEServiceUNIProfileMvrpTreatment, alaEServiceNniSvlanVpaType=alaEServiceNniSvlanVpaType, alaEServiceInfo=alaEServiceInfo, alaEServicePortUniProfile=alaEServicePortUniProfile, alaEServiceSapProfileReplacementCVLAN=alaEServiceSapProfileReplacementCVLAN, alaEServiceSapProfileTable=alaEServiceSapProfileTable, alaEServiceSapProfileID=alaEServiceSapProfileID, alaEServiceSapEntry=alaEServiceSapEntry, alaEServiceUNIProfileStpBpduTreatment=alaEServiceUNIProfileStpBpduTreatment, alaEServicePortTransBridging=alaEServicePortTransBridging, alaEServicePortEntry=alaEServicePortEntry, alaEServiceSapUniSap=alaEServiceSapUniSap, alaEServiceSapProfileGroup=alaEServiceSapProfileGroup, alaEServiceSapProfileCVLANTreatment=alaEServiceSapProfileCVLANTreatment, alaEServiceSapProfileRowStatus=alaEServiceSapProfileRowStatus, alaEServiceRowStatus=alaEServiceRowStatus, alaEServiceSapProfileEgressBW=alaEServiceSapProfileEgressBW, alaEServicePortLegacyStpBpdu=alaEServicePortLegacyStpBpdu, alaEServiceSapRowStatus=alaEServiceSapRowStatus, alaEServiceUNIProfile8021ABTreatment=alaEServiceUNIProfile8021ABTreatment, alaEServiceSapCvlanGroup=alaEServiceSapCvlanGroup, alaEServiceSapUniEntry=alaEServiceSapUniEntry, alaEServicePortRowStatus=alaEServicePortRowStatus, alaEServiceNniSvlanRowStatus=alaEServiceNniSvlanRowStatus, alaEServiceSapTable=alaEServiceSapTable, alaEServiceNniSvlanSvlan=alaEServiceNniSvlanSvlan, alcatelIND1EServiceMIBGroups=alcatelIND1EServiceMIBGroups, alcatelIND1EServiceMIBConformance=alcatelIND1EServiceMIBConformance, alaEServiceUNIProfileID=alaEServiceUNIProfileID, alaEServiceSapProfilePriorityMapMode=alaEServiceSapProfilePriorityMapMode, alaEServiceSapServiceID=alaEServiceSapServiceID, alaEServiceID=alaEServiceID, alcatelIND1eServiceMIBObjects=alcatelIND1eServiceMIBObjects, alaEServiceSapUniTable=alaEServiceSapUniTable, alaEServiceNniSvlanGroup=alaEServiceNniSvlanGroup, AlaEServiceUNIProfileProtocolTreatment=AlaEServiceUNIProfileProtocolTreatment, alaEServiceSapProfileIngressBW=alaEServiceSapProfileIngressBW, alaEServiceVlanType=alaEServiceVlanType, alaEServiceUNIProfileEntry=alaEServiceUNIProfileEntry, alaEServiceSapID=alaEServiceSapID, alaEServiceSapProfileEntry=alaEServiceSapProfileEntry, alaEServiceSapProfileFixedPriority=alaEServiceSapProfileFixedPriority, alaEService=alaEService, alaEServiceSapCvlanRowStatus=alaEServiceSapCvlanRowStatus, alaEServicePortGroup=alaEServicePortGroup, alaEServiceInfoGroup=alaEServiceInfoGroup, alaEServiceEntry=alaEServiceEntry, alaEServiceSVLAN=alaEServiceSVLAN, alaEServiceMode=alaEServiceMode, alaEServiceSapUniGroup=alaEServiceSapUniGroup, alaEServiceSapUniUni=alaEServiceSapUniUni, alaEServiceNniSvlanTable=alaEServiceNniSvlanTable, alaEServiceSapProfile=alaEServiceSapProfile, alaEServiceUNIProfileRowStatus=alaEServiceUNIProfileRowStatus, alaEServicePortVendorTpid=alaEServicePortVendorTpid, alaEServicePortLegacyGvrpPdu=alaEServicePortLegacyGvrpPdu, alaEServiceSapCvlanEntry=alaEServiceSapCvlanEntry, alaEServicePortID=alaEServicePortID, alaEServiceSapGroup=alaEServiceSapGroup, alaEServicePortLegacyMvrpPdu=alaEServicePortLegacyMvrpPdu, alaEServiceUNIProfile8023adTreatment=alaEServiceUNIProfile8023adTreatment, alaEServiceSapProfileBandwidthShare=alaEServiceSapProfileBandwidthShare, PYSNMP_MODULE_ID=alcatelIND1EServiceMIB, alaEServiceNniSvlanNni=alaEServiceNniSvlanNni, alaEServiceSapCvlanSapID=alaEServiceSapCvlanSapID, alaEServiceGroup=alaEServiceGroup, alaEServiceSapUniRowStatus=alaEServiceSapUniRowStatus, alaEServiceSapCvlanMapType=alaEServiceSapCvlanMapType, alaEServiceSapCvlanTable=alaEServiceSapCvlanTable, alcatelIND1EServiceMIBCompliances=alcatelIND1EServiceMIBCompliances, alaEServiceUNIProfileGvrpTreatment=alaEServiceUNIProfileGvrpTreatment)
|
[
"dcwangmit01@gmail.com"
] |
dcwangmit01@gmail.com
|
4be926645d799b5ba94de45a6cc2f96033dd6cea
|
b8b4e487a084c707bbc994214edf36c9a2710e00
|
/project/analyze.py
|
6f5679bf3bcfc6bc0a8d68f7ee936a98c1ecf767
|
[] |
no_license
|
kp-john/project
|
de6a26abea3076cefab50a0371661c7a5db6a756
|
00a9fc19d09b8a1f580c6394f8a82ea4b5179506
|
refs/heads/master
| 2023-08-15T08:51:32.189677
| 2021-10-21T08:46:13
| 2021-10-21T08:46:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,255
|
py
|
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import stockdata, corona
import sys
import pandas
#"Malgun Gothic" 폰트 설정
sns.set(font="Malgun Gothic",
rc={"axes.unicode_minus":False},
style='darkgrid')
if __name__ == "__main__":
app = stockdata.QApplication(sys.argv)
trade = stockdata.system_trading()
# # stock1(생물공학 - 알테오젠)
# trade.rq_chart_data("196170", "20211016", 1)
# stock1_day_data = pandas.DataFrame(trade.day_data, columns=['date','open','high','low','close','volume','trade_volume'])
# stock2(해운사 - hmm)
trade.rq_chart_data("011200", "20211016", 1)
stock2_day_data = pandas.DataFrame(trade.day_data, columns=['date','open','high','low','close','volume','trade_volume'])
# # stock3(생명과학도구및서비스 - 씨젠)
# trade.rq_chart_data("096530", "20211016", 1)
# stock3_day_data = pandas.DataFrame(trade.day_data, columns=['date','open','high','low','close','volume','trade_volume'])
#dtype object->int형으로 변환
# stock1_day_data = stock1_day_data.apply(pd.to_numeric)
# df1 = stock1_day_data.sort_values(by='date', ascending=True)
stock2_day_data = stock2_day_data.apply(pd.to_numeric)
df2 = stock2_day_data.sort_values(by='date', ascending=True)
# stock3_day_data = stock3_day_data.apply(pd.to_numeric)
# df3 = stock3_day_data.sort_values(by='date', ascending=True)
#주식데이터 df1,df2,df3와 코로나데이터 corona.df 합치기
# df1_inner = pandas.merge(df1, corona.df, on='date', how='inner')
df2_inner = pandas.merge(df2, corona.df, on='date', how='inner')
# df3_inner = pandas.merge(df3, corona.df, on='date', how='inner')
#종가, 확진자수, 일일확진자수, 1차접종완료, 접종완료 컬럼추출
# train_df1 = df1_inner[['close', '확진자수', '일일확진자수', '1차접종완료', '접종완료']]
train_df2 = df2_inner[['close', '확진자수', '일일확진자수', '1차접종완료', '접종완료']]
# train_df3 = df3_inner[['close', '확진자수', '일일확진자수', '1차접종완료', '접종완료']]
#상관계수
# df1_corr = train_df1.corr(method='pearson')
df2_corr = train_df2.corr(method='pearson')
# df3_corr = train_df3.corr(method='pearson')
# print(df1_corr)
print(df2_corr)
# print(df3_corr)
#상관계수 히트맵
# plt.rcParams['figure.figsize'] = [10,10]
#
# sns.heatmap(train_df1.corr(), annot=True, cmap='Blues', vmin=-1, vmax=1)
# plt.title('생물공학 - 알테오젠', fontsize=15)
# plt.show()
plt.rcParams['figure.figsize'] = [10, 10]
sns.heatmap(train_df2.corr(), annot=True, cmap='Pastel1', vmin=-1, vmax=1)
plt.title('해운사 - hmm', fontsize=15)
plt.show()
# plt.rcParams['figure.figsize'] = [10, 10]
# sns.heatmap(train_df3.corr(), annot=True, cmap='Greens', vmin=-1, vmax=1)
# plt.title('생명과학도구및서비스 - 씨젠', fontsize=15)
# plt.show()
# K-means, decision tree - 주가예측
# 뉴스 크롤링(네이버금융? 이베스트? - 일단 긍(1),부정(0)으로만 평가)
# 추가 고려사항) 정해진 3종목 이외 다른 종목까지 진행?
|
[
"qkfkadlemf@naver.com"
] |
qkfkadlemf@naver.com
|
e8b477d1afa64079ec213f67159d6fe7a9737249
|
782bb76a8f26874883baa0162a7e23f1b8bd8ae1
|
/img_conversion_using_cv2.py
|
46814bbae57413105d6888a3d7bae3b1197b9ac1
|
[] |
no_license
|
DivyaReddyNaredla/Python
|
a34c2259b1deaa74fbcc2c09eb7ab75a59b4b76c
|
a8f6f9315501979630e4d5be2a46b10a479736be
|
refs/heads/master
| 2022-10-20T07:56:37.873467
| 2020-06-05T05:42:12
| 2020-06-05T05:42:12
| 269,540,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
import cv2
img = "ibm.png"
img1 = cv2.imread(img)
cv2.imwrite("ibm2.jpg",img1)
|
[
"noreply@github.com"
] |
DivyaReddyNaredla.noreply@github.com
|
3475609803c5fec24d9602e8f2f214ff2e1146fa
|
0c66e605e6e4129b09ea14dbb6aa353d18aaa027
|
/diventi/products/migrations/0028_auto_20200119_1557.py
|
e26102391438dd63340bedc439d85503f7d4b02e
|
[
"Apache-2.0"
] |
permissive
|
flavoi/diventi
|
58fbc8c947f387cbcc1ce607878a59a6f2b72313
|
c0b1efe2baa3ff816d6ee9a8e86623f297973ded
|
refs/heads/master
| 2023-07-20T09:32:35.897661
| 2023-07-11T19:44:26
| 2023-07-11T19:44:26
| 102,959,477
| 2
| 1
|
Apache-2.0
| 2023-02-08T01:03:17
| 2017-09-09T14:10:51
|
Python
|
UTF-8
|
Python
| false
| false
| 521
|
py
|
# Generated by Django 2.2.8 on 2020-01-19 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0027_auto_20191217_0738'),
]
operations = [
migrations.AlterField(
model_name='product',
name='price',
field=models.PositiveIntegerField(default=0, help_text='This price must be valued in euro cents. For example: 500 for 5.00€, 120 for 1.20€ etc.', verbose_name='price'),
),
]
|
[
"flavius476@gmail.com"
] |
flavius476@gmail.com
|
5ae8895b70d3c766d80a1f22a634ad71a70d012e
|
ab1d0fcd4900e0a88d49999cbbde4b06cc441e5d
|
/Labs/Lab 5/lab05_soln/raytracer_main.py
|
9cd89a7bb62e8dba71c76dd33c177a47aecd373e
|
[] |
no_license
|
ThomasMGilman/ETGG1803_ConceptsOf3DGraphicsAndMath
|
bf261b7ce16bb686e42b1a2600aa97b4f8984b65
|
fdf4e216b117769246154cd360b2c321f4581354
|
refs/heads/master
| 2020-03-29T23:14:05.715926
| 2018-09-26T17:18:25
| 2018-09-26T17:18:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,643
|
py
|
import raytracer
import objects3d
import time
import pygame
import math3d
caseNum = 2
# Pygame setup
if caseNum == 1:
win_width = 700; win_height = 150;
elif caseNum == 2:
win_width = 800; win_height = 600;
else:
win_width = 300; win_height = 200;
pygame.display.init()
screen = pygame.display.set_mode((win_width, win_height))
clock = pygame.time.Clock()
done = False
# Raytracer setup
if caseNum == 1:
cameraPos = math3d.VectorN(0, 0, -20)
cameraUp = math3d.VectorN(0, 1, 0)
cameraCoi = math3d.VectorN(0, 0, 0)
cameraNear = 3.2
cameraFov = 45.0
elif caseNum == 2:
cameraPos = math3d.VectorN(5, 7, -20)
cameraUp = math3d.VectorN(1, 10, 0).normalized()
cameraCoi = math3d.VectorN(2, 5, 3)
cameraNear = 1.5
cameraFov = 60.0
elif caseNum == 3:
cameraPos = math3d.VectorN(-5, 7, -30)
cameraUp = math3d.VectorN(0, 1, 0)
cameraCoi = math3d.VectorN(2, 5, 3)
cameraNear = 1.5
cameraFov = 60.0
camera = objects3d.Camera(cameraPos, cameraCoi, cameraUp, screen, cameraFov, cameraNear, True)
sphere1 = objects3d.Sphere(math3d.VectorN(2,5,3), 7.0, math3d.VectorN(1,0,0))
plane1 = objects3d.Plane(math3d.VectorN(0,1,0), 5.0, math3d.VectorN(0,1,0))
plane2 = objects3d.Plane(math3d.VectorN(0.1,1,0), 4.0, math3d.VectorN(0,0,1))
box1 = objects3d.AABB(math3d.VectorN(2, 9, -6), math3d.VectorN(8, 15, 0), math3d.VectorN(1,1,0))
#mesh1 = objects3d.Polymesh("sword.obj", math3d.VectorN(-10,8,3), 1.0, math3d.VectorN(1.0,0.3,0.8))
rt = raytracer.Raytracer(camera)
rt.addObject(sphere1)
rt.addObject(plane1)
rt.addObject(plane2)
rt.addObject(box1)
#rt.addObject(mesh1)
totalTime = 0.0
currentLine = 0
print("\n+==============================================+")
print("| PHASE II tests |")
print("+==============================================+")
if caseNum == 1:
testPts = [(0, 0), (win_width - 1, win_height - 1), (win_width // 2, win_height // 2), (113, 23), (623,83)]
else:
testPts = [(0, 0), (win_width - 1, win_height - 1), (win_width // 2, win_height // 2), (113, 542), (723,11)]
for pygamePos in testPts:
camera.getViewplanePosition(pygamePos[0], pygamePos[1], True)
# Game Loop
while not done:
# Update
if currentLine < win_height:
rt.renderOneLine(currentLine)
currentLine += 1
dt = clock.tick()
totalTime += dt
# Input
event = pygame.event.poll()
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
done = True
# Draw (nothing to do!)
pygame.display.flip()
# Pygame shutdown
pygame.display.quit()
|
[
"Thomas.Gilman@ymail.com"
] |
Thomas.Gilman@ymail.com
|
98dedda4af53cd513aba1f2a0550e46f1b00586d
|
0afc304e4b808e729d62cdd9e7d9643a411d1677
|
/config/urls.py
|
6bc9ecf2264a784c50b80eec74046851ef8e28c5
|
[] |
no_license
|
lisa4930007/One
|
f9a85bd4e1b7c931f65f3bd64deaf4e79435465f
|
a225096fc7e2a3004a0f26049276613f03df7802
|
refs/heads/master
| 2023-05-14T04:48:47.321243
| 2021-04-22T13:36:17
| 2021-04-22T13:36:17
| 357,780,705
| 0
| 0
| null | 2021-06-05T02:29:32
| 2021-04-14T05:06:36
|
Python
|
UTF-8
|
Python
| false
| false
| 980
|
py
|
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('accounts.urls')),
path('social-auth/', include('social_django.urls', namespace='social')),
path('', include('pages.urls')),
]
|
[
"lisalai@gapp.nthu.edu.tw"
] |
lisalai@gapp.nthu.edu.tw
|
945eaa648ef3e5cb13a1bccac23e9c28d10512c0
|
5c8eaf49e53146601fefbfe3b8d40fa78c4bcd69
|
/pysot/toolkit/datasets/v4r.py
|
ddba28b8abb2b8d0dc411543182814f8da1c1782
|
[] |
no_license
|
Louis-Leee/Ad2Attack
|
c6116333d87fecd147a154425fa1c86e909dc508
|
900c0bdfb01a3c72d544b135cff8057d0680dcbb
|
refs/heads/master
| 2023-08-14T11:43:57.223463
| 2021-09-24T12:39:50
| 2021-09-24T12:39:50
| 409,960,220
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,653
|
py
|
import json
import os
import numpy as np
from PIL import Image
from tqdm import tqdm
from glob import glob
from .dataset import Dataset
from .video import Video
def ca():
path='/home/louis/Documents/V4RFlight112'
name_list=os.listdir(path+'/data_seq')
name_list.sort()
a=len(name_list)
b=[]
for i in range(a):
b.append(name_list[i])
c=[]
for jj in range(a):
imgs=path+'/data_seq/'+str(name_list[jj])
txt=path+'/anno/'+str(name_list[jj])+'.txt'
bbox=[]
f = open(txt) # 返回一个文件对象
file= f.readlines()
li=os.listdir(imgs)
li.sort()
for ii in range(len(file)):
try:
li[ii]=name_list[jj]+'/'+li[ii]
except:
a=1
line = file[ii].strip('\n').split(' ')
if len(line)!=4:
line = file[ii].strip('\n').split(',')
if len(line)!=4:
line = file[ii].strip('\n').split('\t')
try:
line[0]=int(line[0])
except:
line[0]=float(line[0])
try:
line[1]=int(line[1])
except:
line[1]=float(line[1])
try:
line[2]=int(line[2])
except:
line[2]=float(line[2])
try:
line[3]=int(line[3])
except:
line[3]=float(line[3])
bbox.append(line)
if len(bbox)!=len(li):
print (jj)
f.close()
c.append({'attr':[],'gt_rect':bbox,'img_names':li,'init_rect':bbox[0],'video_dir':name_list[jj]})
d=dict(zip(b,c))
return d
class UAVVideo(Video):
"""
Args:
name: video name
root: dataset root
video_dir: video directory
init_rect: init rectangle
img_names: image names
gt_rect: groundtruth rectangle
attr: attribute of video
"""
def __init__(self, name, root, video_dir, init_rect, img_names,
gt_rect, attr, load_img=False):
super(UAVVideo, self).__init__(name, root, video_dir,
init_rect, img_names, gt_rect, attr, load_img)
class V4RDataset(Dataset):
"""
Args:
name: dataset name, should be 'UAV123', 'UAV20L'
dataset_root: dataset root
load_img: wether to load all imgs
"""
def __init__(self, name, dataset_root, load_img=False):
super(V4RDataset, self).__init__(name, dataset_root)
meta_data = ca()
# load videos
pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100)
self.videos = {}
for video in pbar:
pbar.set_postfix_str(video)
self.videos[video] = UAVVideo(video,
dataset_root,
meta_data[video]['video_dir'],
meta_data[video]['init_rect'],
meta_data[video]['img_names'],
meta_data[video]['gt_rect'],
meta_data[video]['attr'])
# set attr
attr = []
for x in self.videos.values():
attr += x.attr
attr = set(attr)
self.attr = {}
self.attr['ALL'] = list(self.videos.keys())
for x in attr:
self.attr[x] = []
for k, v in self.videos.items():
for attr_ in v.attr:
self.attr[attr_].append(k)
|
[
"17721296636@163.com"
] |
17721296636@163.com
|
652f83eed911d77706a697850b58219dc87667dd
|
cd86185c0370a641f6c5f9ef20b0be56d9eb60bf
|
/docs/conf.py
|
575bb97827b864c0b46eac3ddcd5d7ad576c6831
|
[
"MIT"
] |
permissive
|
lesamouraipourpre/CircuitPython_Candlesticks
|
f61a7622d91aab3e85ab1aa40f342279d2a98a21
|
70057ffd12c5a2064d94c637b1fcc113a3202128
|
refs/heads/main
| 2023-06-16T04:55:29.135765
| 2021-07-05T13:57:45
| 2021-07-05T13:57:45
| 385,964,651
| 0
| 0
|
MIT
| 2021-07-14T14:13:54
| 2021-07-14T14:13:53
| null |
UTF-8
|
Python
| false
| false
| 5,645
|
py
|
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = ["displayio", "vectorio"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Show the docstring from both the class and its __init__() method.
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = " CircuitPython Candlesticks Library"
copyright = "2021 Jose David"
author = "Jose David"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
".env",
"CODE_OF_CONDUCT.md",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "CircuitPython_CandlesticksLibrarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"CircuitPython_CandlesticksLibrary.tex",
"CircuitPython Candlesticks Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"CircuitPython_CandlesticksLibrary",
"CircuitPython Candlesticks Library Documentation",
[author],
1,
),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"CircuitPython_CandlesticksLibrary",
"CircuitPython Candlesticks Library Documentation",
author,
"CircuitPython_CandlesticksLibrary",
"One line description of project.",
"Miscellaneous",
),
]
|
[
"jquintana202020@gmail.com"
] |
jquintana202020@gmail.com
|
2620861cf54ff2d1ae44aed67d936dd807aed01f
|
f6e561772157156db860284aac46c2a596686a4d
|
/PyQt4 IDE/microhope_lib/__init__.py
|
e75a29d31a77730446531eebe0e3b4e0ba82b5c1
|
[] |
no_license
|
JosekuttyMJ/microHOPE
|
5cb1c9fda4ff353caa34c915e7ac1390fe52aa1d
|
fce58402bf5fbf4fc6ed980bb7721823a431174f
|
refs/heads/master
| 2020-12-03T09:33:38.599012
| 2015-01-28T06:56:11
| 2015-01-28T06:56:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27
|
py
|
__all__ = ["mh_latest_ui"]
|
[
"arunjayan32@gmail.com"
] |
arunjayan32@gmail.com
|
af65bdd5edd78745f8c63e9d570bc61d19f4926b
|
9e5977a915b669c62085c0c5d806a27e7c7481d7
|
/advent_of_code/2021/day15/main.py
|
73a4d117398067dda20f099d6ff5c3e56ae69004
|
[] |
no_license
|
antoinemadec/test
|
f9ddecc6c045170e3dfa63c6a230bc3939c8cf02
|
c0865b71fc167fc0085fd8e564628753d9aa0b94
|
refs/heads/master
| 2023-08-02T20:44:57.288423
| 2023-07-26T19:29:41
| 2023-07-26T19:29:41
| 93,432,340
| 1
| 0
| null | 2023-04-18T13:59:41
| 2017-06-05T18:01:16
|
Verilog
|
UTF-8
|
Python
| false
| false
| 2,125
|
py
|
#!/usr/bin/env python3
class Pos:
pos_dict = {}
def __init__(self, pos, weight):
self.pos = pos
self.weight = weight
self.lowest_risk = None
self.pos_dict[pos] = self
def get_ripple_positions(size, i, reverse=False):
positions = []
for x in range(size):
if x > i:
positions.append((i, x))
for x in range(size):
if x > i:
positions.append((x, i))
positions.reverse()
positions.append((i, i))
if reverse:
positions.reverse()
return positions
def compute_lowest_risks(d, size):
for i in range(size):
for pos in get_ripple_positions(size, (size-i)-1):
point = d[pos]
risks = []
# FIXME: missing up case when on left, and left case when on top
for i, j in [(0, 1), (1, 0)]:
x, y = pos[0]+i, pos[1]+j
if (x, y) not in d:
continue
risks.append(d[(x, y)].lowest_risk)
if len(risks) == 0:
point.lowest_risk = point.weight
else:
point.lowest_risk = min(risks) + point.weight
def build_meta_grid(lines, meta_grid_size, size):
for meta_i in range(meta_grid_size):
for meta_pos in get_ripple_positions(meta_grid_size, meta_i, reverse=True):
for i, row in enumerate(lines):
for j, weight in enumerate(row):
x = i + meta_pos[0]*size
y = j + meta_pos[1]*size
weight += meta_pos[0] + meta_pos[1]
if weight > 9:
weight -= 9
Pos((x, y), weight)
def part12(lines, p2=False):
size = len(lines)
meta_grid_size = (1, 5)[p2]
build_meta_grid(lines, meta_grid_size, size)
compute_lowest_risks(Pos.pos_dict, size * meta_grid_size)
return Pos.pos_dict[(0, 0)].lowest_risk - Pos.pos_dict[(0, 0)].weight
with open('input.txt', 'r') as f:
lines = [[int(c) for c in l.strip()] for l in f.readlines()]
print(part12(lines))
print(part12(lines, True))
|
[
"ajamadec@gmail.com"
] |
ajamadec@gmail.com
|
6b78cdf809dea0a3f7a9ae6213b26cbed2fb32f6
|
d411624ab2f176b9b475c91af4eaeca30b309723
|
/wordcount/settings.py
|
df82733a196449d6c1b46185be029f5f9398407e
|
[] |
no_license
|
sumanta-ghosh/wordcount
|
bcb8e4c0f1629a0960066b3afe8d62c741b864f8
|
65bc5844338b97da12e33e78cd4452597f87c65b
|
refs/heads/master
| 2021-01-04T18:34:13.167015
| 2020-02-15T12:58:34
| 2020-02-15T12:58:34
| 240,711,119
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,097
|
py
|
"""
Django settings for wordcount project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yhl)dpm7y0+ow17i#8o()@-r&)9v^7_)o&djz-v1b$@i(++a9#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wordcount.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wordcount.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"sumanta.ghosh.mit@gmail.com"
] |
sumanta.ghosh.mit@gmail.com
|
30591b5a81ad213a23a3274b5b6e3b178d939eb7
|
578ff132c94d9f423951f16168e18de5e845d50e
|
/worktestauthproblem/models/permissions.py
|
ea142b192197c9c989466617beb8440690bd2c84
|
[] |
no_license
|
shsaraswat06/application-security-toptracer
|
fe7b27778d7f8a1ed3bef4ef851b9774dea9040c
|
a1452d9d1db7c05e9d2feb766bcc2b46a00b2bf9
|
refs/heads/master
| 2023-06-10T07:07:25.324402
| 2021-06-30T10:19:42
| 2021-06-30T10:19:42
| 381,659,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
from .orm import Orm
class Permissions:
def __init__(self, service_name):
self.service_name = service_name
self.resource = 'permissions'
self.orm = Orm(resource_name=self.resource)
self.permissions = self.orm.get()
def get_permissions(self):
permissions = next(
(
permission['permissions']
for permission in self.permissions
if permission["service"] == self.service_name
),
None,
)
return permissions
def add_permission(self, service_name, applications):
permissions = [{app: {'can-access-data': True}} for app in applications]
permissions_data = {'service': service_name, 'permissions': permissions}
self.orm.create(data=permissions_data)
|
[
"shubham.saraswat@oneflow.com"
] |
shubham.saraswat@oneflow.com
|
7c4343aef9f3c59310652592b24a9c9ec8231a1c
|
b9e715b6a6ec1ade7f5e73e4b1f93b2425ca6cf0
|
/gemini-FT-proposal-5-2017/get_galex_cutouts.py
|
5ed3deea3550fa8b6378e8f19d3c9853f5449223
|
[] |
no_license
|
johnnygreco/notebooks
|
1449bffbb17ac2e7c52713fadfe12a69c33a6fc5
|
fb9e56d1f0f217287c3b2a23c26f89c53055db4f
|
refs/heads/master
| 2021-01-17T07:49:06.672665
| 2017-09-29T23:10:07
| 2017-09-29T23:10:07
| 59,698,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
import sys
sys.path.append('../../../projects')
import cutouts
import hhana
from astropy.table import Table
galex = Table.read('data/gemini-ft-sample.csv')
targets = [139, 118, 230, 27]
for ID in targets:
prefix = 'figures/hugs-'+str(ID)
ra, dec = galex[galex['hugs-id']==ID]['MatchRA', 'MatchDEC'][0]
cutouts.galex.getGalexCutout(
ra, dec, size=35, name=prefix, label_survey=False)
|
[
"jgreco@astro.princeton.edu"
] |
jgreco@astro.princeton.edu
|
2f944d70bb962407accb344b9ede5c40b2f7a3a7
|
cd7166c7b6332b4c331f9bb10986d7af298a605a
|
/Config.py
|
5c50cb5ed342bf9368be64c712354e695b96d045
|
[] |
no_license
|
tuanquanghpvn/socket4py
|
a795ff3bbb64e41f72e42a619d70165fc77084fe
|
53e21524cfd97010ccbd0971b1b969467d09e193
|
refs/heads/master
| 2020-05-18T00:49:59.976982
| 2015-03-11T06:23:05
| 2015-03-11T06:23:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
__author__ = 'JsAndPy'
class Config:
# Init Socket
HOST = 'localhost'
PORT = 2222
|
[
"tuanquanghpvn@gmail.com"
] |
tuanquanghpvn@gmail.com
|
8792fc4efc127501c899b6e063144b028ff7beab
|
6071a7d86d3a6ad2a4253278b5b79d3c59c3849c
|
/bitcodin/test/output/testcase_get_s3_output.py
|
998de6608ed7f6a05c7b3d054d2056660245b711
|
[
"Unlicense"
] |
permissive
|
pavanpalli/bitcodin-python
|
a3145aae4e4bd6600611401b834f76d9e62df917
|
53b534dfb9cb23d9fb409e38cb4207277aee957f
|
refs/heads/master
| 2020-12-25T05:53:57.483768
| 2015-08-05T06:49:28
| 2015-08-05T06:49:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
__author__ = 'Dominic Miglar <dominic.miglar@bitmovin.net>'
import unittest
from bitcodin import create_output
from bitcodin import delete_output
from bitcodin import get_output
from bitcodin import S3Output
from bitcodin.test.settings import aws_config
from bitcodin.test.bitcodin_test_case import BitcodinTestCase
class GetS3OutputTestCase(BitcodinTestCase):
def setUp(self):
super(GetS3OutputTestCase, self).setUp()
self.s3_configuration = {
'name': 'Python API Test Output',
'host': aws_config.get('host', None),
'access_key': aws_config.get('access_key', None),
'secret_key': aws_config.get('secret_key', None),
'bucket': aws_config.get('bucket', None),
'prefix': aws_config.get('prefix', None),
'region': aws_config.get('region', None),
'make_public': False
}
output = S3Output(
access_key=self.s3_configuration.get('access_key'),
secret_key=self.s3_configuration.get('secret_key'),
name=self.s3_configuration.get('name'),
host=self.s3_configuration.get('host'),
bucket=self.s3_configuration.get('bucket'),
prefix=self.s3_configuration.get('prefix'),
region=self.s3_configuration.get('region'),
make_public=self.s3_configuration.get('make_public')
)
self.output = create_output(output)
def runTest(self):
output = get_output(self.output.output_id)
self.assertEquals(self.output.name, output.name)
self.assertEquals(self.output.bucket, output.bucket)
self.assertEquals(self.output.prefix, output.prefix)
self.assertEquals(self.output.region, output.region)
self.assertEquals(self.output.make_public, output.make_public)
def tearDown(self):
delete_output(self.output.output_id)
super(GetS3OutputTestCase, self).tearDown()
if __name__ == '__main__':
unittest.main()
|
[
"dominic.miglar@w1r3.net"
] |
dominic.miglar@w1r3.net
|
b444b19a3bf1da0d3d18e4fd044a8a7ef7641a4a
|
3e607e43f264a4020c010422f0b2f9919c298033
|
/start.py
|
1fdf1a1cb2c657528449253971922a2165d8f301
|
[] |
no_license
|
MagnusBordewich/Pibot
|
a74529371d93950bdecc9e73aa4254038a443a74
|
484dc97d5b8cf7f770cd6dabd1896cf29ba1564f
|
refs/heads/master
| 2021-01-10T16:17:21.072658
| 2016-03-01T12:48:34
| 2016-03-01T12:48:34
| 52,875,832
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,976
|
py
|
import numpy as np
import copy
import cv
import cv2
import time
import os
import StringIO
from BrickPi import * #import BrickPi.py file to use BrickPi operations
import threading
os.system('sudo modprobe bcm2835-v4l2')
side = 0
w=80
h=60
turning_rate = 60
running = True
time_limit=60
startTime=time.time()
BrickPiSetup() # setup the serial port for communication
BrickPi.MotorEnable[PORT_A] = 1 #Enable the Motor A
BrickPi.MotorEnable[PORT_D] = 1 #Enable the Motor D
#This thread is used for keeping the motors running
class myThread (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
while running and (time.time()<startTime+time_limit):
BrickPiUpdateValues() # Ask BrickPi to update values for senso
# time.sleep(.1) # sleep for 200 ms
thread1 = myThread(1, "Thread-1", 1) #Setup and start the thread
thread1.setDaemon(True)
thread1.start()
# This sets up the video capture
cap = cv2.VideoCapture(0)
cap.set(3,w)
cap.set(4,h)
time.sleep(2)
#cap.set(15,-80.0)
seeking=True
seekTime=time.time()
# Main loop
while True and (time.time()<startTime+time_limit):
try:
found = False
ret, image = cap.read()
image = cv2.flip(image,-1)
#image2 = copy.deepcopy(image)
#image2 = cv2.cvtColor(image2,cv2.COLOR_RGB2BGR)
binary = cv2.GaussianBlur(image,(5,5),0)
binary = cv2.cvtColor(binary,cv2.COLOR_BGR2HSV)
lower_pink = np.array([164,50,50])
upper_pink = np.array([176,255,255])
kernel = np.ones((5,5),np.uint8)
mask = cv2.inRange(binary,lower_pink,upper_pink)
mask = cv2.erode(mask,kernel,iterations=1)
mask = cv2.dilate(mask,kernel,iterations=1)
contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
blob_x = w/2
area = 0
if len(contours)>0:
largest = 0
area = cv2.contourArea(contours[0])
if len(contours)>1:
for i in range(1,len(contours)):
temp_area = cv2.contourArea(contours[i])
if temp_area>area:
area=temp_area
largest = i
if area > 100:
found=True
coords = cv2.moments(contours[largest])
blob_x = int(coords['m10']/coords['m00'])
#blob_y = int(coords['m01']/coords['m00'])
#diam = int(np.sqrt(area)/4)
#cv2.circle(image,(blob_x,blob_y),diam,(0,255,0),1)
#cv2.line(image,(blob_x-2*diam,blob_y),(blob_x+2*diam,blob_y),(0,255,0),1)
#cv2.line(image,(blob_x,blob_y-2*diam),(blob_x,blob_y+2*diam),(0,255,0),1)
#cv2.drawContours(image,contours,largest,(255,0,0),3)
if not found:
if seeking == False:
L_motor_speed=0
R_motor_speed=0
else:
if time.time()<seekTime+10:
if side == -1:
L_motor_speed=-70
R_motor_speed=70
else:
L_motor_speed=70
R_motor_speed=-70
else:
seeking=False
else:
seeking=True
seekTime=time.time()
direction = blob_x -w/2
if direction <0:
side = -1
else:
side = 1
L_motor_speed=220*side*direction/w
R_motor_speed=-220*side*direction/w
BrickPi.MotorSpeed[PORT_A] = L_motor_speed
BrickPi.MotorSpeed[PORT_D] = R_motor_speed
found = False
except KeyboardInterrupt:
break
key_pressed = cv2.waitKey(33)
if key_pressed ==27:
break
cap.release()
|
[
"m.j.r.bordewich@durham.ac.uk"
] |
m.j.r.bordewich@durham.ac.uk
|
55e82a61b41e201ad163f255e9179fc6a9b7ce58
|
a4c3eb6ce6e1a04d2d4909a808e61fe22d6c7817
|
/Supplier/migrations/0033_auto_20200826_1232.py
|
38233fdf5ea709c812d7344e611f1c9b6598257e
|
[] |
no_license
|
AIMsnet/Backend
|
2522d192ef1e1a95e49a7d4c0b3a83c77660dcf1
|
42a32850a88eebd038df3fee47f94309c76e005a
|
refs/heads/master
| 2023-02-22T13:51:54.531918
| 2021-01-28T09:08:44
| 2021-01-28T09:08:44
| 267,277,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,212
|
py
|
# Generated by Django 3.0.6 on 2020-08-26 07:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Supplier', '0032_auto_20200825_1354'),
]
operations = [
migrations.AddField(
model_name='product',
name='description_eight',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='description_five',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='description_four',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='description_nine',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='description_one',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='description_seven',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='description_six',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='description_ten',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='description_three',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='description_two',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='specification_eight',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='specification_five',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='specification_four',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='specification_nine',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='specification_one',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='specification_seven',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='specification_six',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='specification_ten',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='specification_three',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='product',
name='specification_two',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
]
|
[
"hasnainshaikh31@gmail.com"
] |
hasnainshaikh31@gmail.com
|
c0c758ec3f45045fd732d1505955fd973d3253de
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc036/D/4119191.py
|
5214b136ffa3fbda10cfeb4ddda4f643d5080a9d
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
import sys
stdin = sys.stdin
sys.setrecursionlimit(10**5)
def li(): return map(int, stdin.readline().split())
def li_(): return map(lambda x: int(x)-1, stdin.readline().split())
def lf(): return map(float, stdin.readline().split())
def ls(): return stdin.readline().split()
def ns(): return stdin.readline().rstrip()
def lc(): return list(ns())
def ni(): return int(stdin.readline())
def nf(): return float(stdin.readline())
def dfs(graph:list, par:int, cur:int, mod:int):
children = []
for child in graph[cur]:
if child == par:
continue
children.append(child)
if len(children) == 0:
return 2, 1
else:
topall = 1
topwht = 1
for child in children:
topallchild, topwhtchild = dfs(graph, cur, child, mod)
topwht *= topallchild
topwht %= mod
topall *= topwhtchild
topall %= mod
return (topall+topwht)%mod, topwht
n = ni()
graph = [[] for _ in range(n)]
MOD = 10**9+7
for _ in range(n-1):
a,b = li_()
graph[a].append(b)
graph[b].append(a)
ans, _ = dfs(graph, 0, 0, MOD)
print(ans)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
60597c73a482cbddf64360493f4c0f493476a87e
|
8b0b2711dd95943a3623e6cdd63a4bc4aa906cb0
|
/app/config.py
|
141f98655d89fa601206eb482c4cf3564399780f
|
[] |
no_license
|
raycursive/overwatch_stats
|
18a71934cbf1bb219e807978aafe5fb98ee9fafe
|
99f95f84548846336c3d0449a41a68dc34e98102
|
refs/heads/master
| 2020-06-22T14:36:12.302372
| 2019-07-19T08:26:00
| 2019-07-19T08:26:00
| 197,730,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
MONGO_URL = 'mongodb://127.0.0.1:27017/ow_stats'
|
[
"m@raycursive.com"
] |
m@raycursive.com
|
12b7d32876e7618f91c79671a447d6a02a6f5d9a
|
4be92d2ea9ede0084f31cda1f51acdadf1392043
|
/dags/src/authorization.py
|
0672971540249f899e58b98d999d9e7947038a58
|
[] |
no_license
|
rohitamale18/airflow_demo
|
3c5a55d4d40197090e42f2081577f746abb1dc96
|
6c6eca90c148179f577483258a4efa9f85e8af67
|
refs/heads/master
| 2022-12-07T12:02:27.390489
| 2020-07-28T19:08:07
| 2020-07-28T19:08:07
| 283,300,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,038
|
py
|
import os
from src.logger import get_logger
import boto3
from airflow.models import Variable
logger = get_logger(__name__)
class Authorization(object):
def __init__(self) -> None:
self.value = None
def set_key(self, key):
self.value = Variable.get(key)
def get_key(self):
return self.value
@staticmethod
def validate_env_existence(key) -> bool:
if not Variable.get(key):
logger.info("Environment variable, " + key + " not present in system")
return False
else:
return True
class AWSClient(Authorization):
def __init__(self) -> None:
# super().__init__()
self.access_key_value = None
self.secret_access_key_value = None
self.client = None
self.aws_session = boto3.Session
self.access_key_name = "AWS_ACCESS_KEY_ID"
self.secret_access_key_name = "AWS_SECRET_ACCESS_KEY"
self.get_aws_client()
def set_aws_keys(self) -> None:
if self.validate_env_existence(self.access_key_name):
self.set_key(self.access_key_name)
self.access_key_value = self.get_key()
if self.validate_env_existence(self.secret_access_key_name):
self.set_key(self.secret_access_key_name)
self.secret_access_key_value = self.get_key()
def set_aws_session(self) -> None:
try:
self.aws_session = boto3.Session(aws_access_key_id=self.access_key_value,
aws_secret_access_key=self.secret_access_key_value,
region_name="us-east-1")
except Exception as e:
logger.error("Failed to set AWS Session. Error: " + str(e))
def get_aws_client(self):
self.set_aws_keys()
self.set_aws_session()
try:
self.client = self.aws_session.client('firehose')
except Exception as e:
logger.info("Exception occurred while initiating AWS Connection: " + str(e))
|
[
"rohit.amale@woodmac.com"
] |
rohit.amale@woodmac.com
|
fb3af5de3c27d3150ece8709175402b61b6aeb73
|
08dcd0424d8ea0ae357d75b69a25c4b90653dec7
|
/DRP/migrations/0025_auto_20160524_1613.py
|
17dcd2cca4980f60fc0f8d0c5fc94471283384ed
|
[] |
no_license
|
zhaojhao/DRP
|
fb2a834ce8297855bd8e74e41140940db5501b68
|
eae2009eadf87ffd2378233f3e153d385f4654d2
|
refs/heads/master
| 2021-01-18T10:33:26.395991
| 2016-06-20T17:31:50
| 2016-06-20T17:31:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('DRP', '0024_auto_20160512_1658'),
]
operations = [
migrations.AlterField(
model_name='performedreaction',
name='convertedLegacyRef',
field=models.CharField(blank=True, max_length=40, null=True, validators=[django.core.validators.RegexValidator(
b'^[a-z0-9._]*[a-z][a-z0-9._]*$', b'Please include only values which are limited to alphanumeric characters, underscores, periods, and must include at least one alphabetic character.')]),
),
migrations.AlterField(
model_name='performedreaction',
name='performedDateTime',
field=models.DateTimeField(default=None, help_text=b'Timezone assumed EST, Date in format YYYY-MM-DD',
null=True, verbose_name=b'Date Reaction Performed', blank=True),
),
migrations.AlterField(
model_name='performedreaction',
name='reference',
field=models.CharField(max_length=40, validators=[django.core.validators.RegexValidator(
b'^[a-z0-9\\._]*[a-z][a-z0-9\\._]*$', b'Please include only values which are limited to alphanumeric characters, underscores, periods, and must include at least one alphabetic character.')]),
),
]
|
[
"padler1@haverford.edu"
] |
padler1@haverford.edu
|
f58c19c5218fc279438b07e3ca1976d176013a3a
|
2868a3f3bca36328b4fcff5cce92f8adeb25b033
|
/+100ns/Co_optimized/step1_dc/set.py
|
25b40663a2257d720ef9bd0d368b0791db804c94
|
[] |
no_license
|
linfranksong/TM-enzyme_input
|
1c2a5e12e69c48febd5b5900aa00fe2339d42298
|
6e46a5b2c451efb93761707b77917a98ca0bfedc
|
refs/heads/master
| 2022-03-19T19:49:09.373397
| 2019-12-04T00:11:59
| 2019-12-04T00:11:59
| 205,220,795
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,131
|
py
|
import os
dir = os.path.dirname(os.path.realpath(__file__)) + '/'
#for a in [150,200,250,300,350,400,450,500,550,600]:
for a in [150]:
#for a in [200,250,300,350,400,450,500,550,600]:
os.system("rm -r %s_dc_repe"%(a))
os.system("cp -r temp/ %s_dc_repe"%(a))
adir=dir+ "%s_dc_repe/"%(a)
os.chdir(adir)
os.system("sed -i 's/MMM/%s/g' */*pbs"%(a))
array= [0,0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078,1.0]
for n in range(1,len(array)-1):
i=array[n]
os.system("rm -r %s"%(i))
os.system("cp -r files %s"%(i))
wdir=adir+"%s/"%(i)
os.chdir(wdir)
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/NNN/%s/g' *.pbs"%(array[n+1]))
os.system("sed -i 's/PPP/%s/g' *.pbs"%(array[n-1]))
os.chdir(adir)
sdir=adir+"0/"
os.chdir(sdir)
i=0
os.system("cp /mnt/gs18/scratch/users/songlin3/run/glx-0904/+100ns/Co_optimized/step0_fep/%s_fep/1.0/%s_1.0_eq_center.rst ."%(a,a))
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("sbatch 0_eq.pbs")
sdir=adir+"1.0/"
os.chdir(sdir)
i=1.0
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("sed -i 's/MMM/%s/g' center.in"%(a))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
ffbebed7bde154343f092f82297bf03461a5e933
|
d1a118e40bd725a4ceac57abefed5e2d69b6bc9d
|
/request_sign/signature.py
|
b10cf7a02495973e9b19861087dc2ae34129400a
|
[] |
no_license
|
yongzhengw/django-request-sign
|
a9cc8f4b4f9940f3dea8bdd10f407305a15b97df
|
812aa26ea8a40db8d2820f42a52f1c05e363a61d
|
refs/heads/master
| 2023-07-17T05:17:24.863434
| 2021-08-17T09:40:14
| 2021-08-17T09:40:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,249
|
py
|
"""
@author: liyao
@contact: liyao2598330@126.com
@time: 2020/8/14 4:39 下午
"""
import json.decoder
import re
import hashlib
import logging
from datetime import datetime
from urllib.parse import unquote
from django.core.cache import cache
from request_sign.settings import (
SIGNATURE_METHOD,
SIGNATURE_SECRET,
SIGNATURE_ALLOW_TIME_ERROR,
NONCE_CACHE_KEY,
SIGNATURE_PASS_URL,
SIGNATURE_DEBUG,
SIGNATURE_PASS_URL_REGULAR
)
DELETE_KEY_MAP = [[], {}, None, '']
logger = logging.getLogger("default")
def signature_parameters(nonce: str, parameters: list):
parameters_str = ''.join(re.findall(r"[A-Za-z0-9]", "".join(parameters) + SIGNATURE_SECRET)) + \
nonce
# 参数名ASCII码从小到大排序
parameters_sort = "".join(sorted(list(parameters_str))).split("_")
parameters_sort[0], parameters_sort[1] = parameters_sort[1], parameters_sort[0]
# MD5加密
m = hashlib.md5()
m.update("".join(parameters_sort).encode('UTF-8'))
return m.hexdigest()
def check_pass_url_regular(path):
for r in SIGNATURE_PASS_URL_REGULAR:
if re.search(r, path):
return True
return False
class Log:
def __init__(self, debug=False):
self.debug = debug
self.prefix = "#MIDDLEWARE# request_sign -> "
def info(self, message):
if self.debug:
logger.info(self.prefix + message)
def error(self, message):
if self.debug:
logger.error(self.prefix + message)
def check_signature(request):
"""
检查签名是否符合
"""
if request.method.lower() not in SIGNATURE_METHOD or \
request.path in SIGNATURE_PASS_URL or \
check_pass_url_regular(request.path):
return True
timestamp = request.META.get("HTTP_T")
nonce = request.META.get("HTTP_N")
sign = request.META.get("HTTP_S")
log = Log(debug=SIGNATURE_DEBUG)
log.info(("timestamp, nonce, sign=[%s, %s, %s]" % (timestamp, nonce, sign)))
if not all([timestamp, nonce, sign]):
log.error("required parameter missing, no pass")
return False
# 判断cache是否正常
if hasattr(cache, 'get') and hasattr(cache, 'set'):
if cache.get(NONCE_CACHE_KEY.format(nonce=nonce)):
log.error("nonce:%s repeat, no pass" % nonce)
return False
else:
cache.set(NONCE_CACHE_KEY.format(nonce=nonce), True, 300)
try:
timestamp = int(timestamp)
except:
log.error("timestamp format error, no pass")
return False
now_timestamp = datetime.now().timestamp()
if (now_timestamp - SIGNATURE_ALLOW_TIME_ERROR) > timestamp or timestamp > (
now_timestamp + SIGNATURE_ALLOW_TIME_ERROR):
log.error("request timestamp expired, not pass")
return False
get_parameters = request.GET.dict()
post_parameters = request.POST.dict()
try:
body_parameters = json.loads(request.body.decode("utf-8")) if request.body else None
except json.decoder.JSONDecodeError:
body_parameters = None
log.info(
"get_parameters, post_parameters, body_parameters=[%s, %s, %s]" % (
get_parameters, post_parameters, body_parameters
)
)
parameters = handle_parameter(get_parameters,
post_parameters, body_parameters, str(timestamp))
log.info("after parameters process: %s" % parameters)
result = signature_parameters(nonce, parameters)
log.info("get sign:%s, origin:%s -> %s" % (
result, sign, result == sign
))
return sign == result
def handle_parameter(get_parameters, post_parameters, body_parameters, timestamp) -> list:
parameter_list = []
for p in [get_parameters, post_parameters, body_parameters, timestamp]:
if isinstance(p, dict):
t = {}
for key, value in p.items():
if value not in DELETE_KEY_MAP:
t[key] = value
parameter_list.append(json.dumps(t))
elif isinstance(p, str):
parameter_list.append(unquote(p))
elif isinstance(p, bytes):
parameter_list.append(str(p, encoding="utf-8"))
return parameter_list
|
[
"liyao2598330@126.com"
] |
liyao2598330@126.com
|
eb04f67d1f28d2896c4f7cf56d21b890101b979e
|
dea576f4d05f91eba81c31a0a98f2d2bad93a7a5
|
/pandasDataFrame.py
|
2e5678ab5cf10173f09fcbf0b0effc83e5943765
|
[] |
no_license
|
bastianposey/PANDAS
|
f4d896500dcdb3cfb319fae1c1ffc7387c91a74a
|
26a59bcda0c9ed91f492f7a06ba279191cfc3af7
|
refs/heads/master
| 2023-09-05T01:44:44.324899
| 2021-10-13T22:18:13
| 2021-10-13T22:18:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
import pandas as pd
grades_dict = { 'Wally': [87,96,70],
'Eva':[100,87,90],
'Sam':[94,77,90],
'Katie':[100,81,82],
'Bob':[83,65,85]}
grades = pd.DataFrame(grades_dict)
grades.index = ['Test1', 'Test2', 'Test3']
#print(grades)
#print(grades['Eva'])
#print(grades.Sam)
# using the loc and iloc methods
#print(grades.loc['Test2'])
#print(grades.iloc[1])
# For consecutive rows
#print(grades.loc['Test1':'Test3'])
#print(grades.iloc[:3])
#for non-consecutive rows
#print(grades.loc[['Test1','Test3']])
#print(grades.iloc[[0,2]])
#View only Eva's and Katie's grades for Test1 and Test2
#print(grades.Eva.loc[['Test1','Test2']])
#print(grades.Katie.loc[['Test1','Test2']])
#print("")
#View only Sam's THRU Bob's grades for Test1 and Test3
print(grades.Sam.loc["Test1":"Test3"])
#print(grades.Bob.loc["Test1":"Test3"])
print(grades.loc[['Test1', 'Test3'], "Sam":"Bob"])
grades_A = grades[grades >= 90]
print(grades_A)
# create a dataframe for everyone wiht a B grade
grades_B = [(grades>=80) & (grades < 90)]
print(grades_B)
# creates a dataframe for everyone with a A or B grade
grades_A_or_B = [(grades>=90) | (grades>-80)]
print(grades_A_or_B)
pd.set_option('precision',2)
#print(grades.T.describe())
print(grades.sort_index(ascending=False))
print(grades.sort_index(axis=1, ascending=False))
print(grades.sort_values(by='Test1', axis=1,ascending=False))
print(grades.T.sort_values(by='Test1', ascending=False))
print(grades.loc['Test1'].sort_values(ascending=False))
|
[
"bastian_posey2@baylor.edu"
] |
bastian_posey2@baylor.edu
|
acd65c46ffa5dd3f4fa612a415887f694e67e27f
|
9a6c5607ae6f6305f1427fe5ee37ab8a0aa9b710
|
/0 Python Fundamental/25.c.filter.py
|
bf848a8ad7d8b4c95625bf195a090ed00fc3af2e
|
[] |
no_license
|
raviitsoft/Python_Fundamental_DataScience
|
3796b957751a6d9125452bcf2aa409e64d7c8d8a
|
6f99fdd187646f0d28ffd4ddbe3ace4597c47967
|
refs/heads/master
| 2020-12-22T19:39:46.814043
| 2020-01-28T09:04:55
| 2020-01-28T09:04:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
ages = [5, 12, 17, 18, 24, 32]
def myFunc(x):
if x < 18:
return False
else:
return True
adults = filter(myFunc, ages)
# print(adults)
# print(list(adults))
#############################
z = filter(lambda a: True if a >= 18 else False, ages)
print(list(z))
z = filter(lambda a: a >= 18, ages)
print(list(z))
############################
x = [1, 2, 3, 4, 5, 99]
y = [1, 2, 6, 7, 8, 99]
z = list(filter(lambda a: a in x, y))
# print(z)
z = list(filter(lambda x: True if x<3 else False, x))
print(z)
z = list(filter(lambda x: x<3, x))
print(z)
|
[
"lintangwisesa@ymail.com"
] |
lintangwisesa@ymail.com
|
2a6b93697a823699f907bd04a3d16ae2b742d3dd
|
8b683dd48ad3021990ca5133ec24a1ab260b687c
|
/worm_plates/collect/refine_coords.py
|
c86eb3c3b422cbf802411536855a272433f692d0
|
[] |
no_license
|
ver228/worm-eggs
|
fd4afa13cba12f6553c0e8225fb591d9ea3806f1
|
0b2db08d9d81c3b31d9ebcd593059db02b3ee2fe
|
refs/heads/master
| 2022-04-01T06:29:56.358944
| 2020-02-14T15:55:39
| 2020-02-14T15:55:39
| 240,544,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,710
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 16:22:37 2019
@author: avelinojaver
"""
from pathlib import Path
import pandas as pd
import tables
import tqdm
import cv2
import numpy as np
from skimage.feature import peak_local_max
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
#%%
def correct_coords(img_, coords_, min_distance = 1, max_dist = 5):
#%%
peaks = peak_local_max(img_, min_distance = min_distance)
peaks = peaks[:, ::-1]
#remove `peaks` that is not close by to any `coord` by at most `max_dist`
D = cdist(coords_, peaks)
#peaks with an intensity smaller than the coords intensities will be spurious
peaks_ints = img_[peaks[:, 1], peaks[:, 0]]
cc = coords_.astype(np.int)
coords_int = img_[cc[:, 1], cc[:, 0]]
good = (D <= max_dist).any(axis=0)
good &= peaks_ints >= coords_int.min()
D = D[:, good]
valid_peaks = peaks[good]
#find the closest peaks
closest_indexes = np.argmin(D, axis=1)
#we will consider as an easy assigment if the closest peak is assigned to only one coord
u_indexes = np.unique(closest_indexes)
counts = np.bincount(closest_indexes)[u_indexes]
easy_assigments = u_indexes[counts == 1]
valid_pairs = [(ii, x) for ii, x in enumerate(closest_indexes) if x in easy_assigments]
if len(valid_pairs) > 0:
easy_rows, easy_cols = map(np.array, zip(*valid_pairs))
easy_cost = D[easy_rows, easy_cols]
good = easy_cost<max_dist
easy_rows = easy_rows[good]
easy_cols = easy_cols[good]
assert (D[easy_rows, easy_cols] <= max_dist).all()
#now hard assigments are if a peak is assigned to more than one peak
ambigous_rows = np.ones(D.shape[0], np.bool)
ambigous_rows[easy_rows] = False
ambigous_rows, = np.where(ambigous_rows)
ambigous_cols = np.ones(D.shape[1], np.bool)
ambigous_cols[easy_cols] = False
ambigous_cols, = np.where(ambigous_cols)
else:
ambigous_rows = np.arange(D.shape[0])
ambigous_cols = np.arange(D.shape[1])
easy_rows = np.array([], dtype=np.int)
easy_cols = np.array([], dtype=np.int)
D_r = D[ambigous_rows][:, ambigous_cols]
good = (D_r <= max_dist).any(axis=0)
D_r = D_r[:, good]
ambigous_cols = ambigous_cols[good]
#for this one we use the hungarian algorithm for the assigment. This assigment is to slow over the whole matrix
ri, ci = linear_sum_assignment(D_r)
hard_rows, hard_cols = ambigous_rows[ri], ambigous_cols[ci]
assert (D_r[ri, ci] == D[hard_rows, hard_cols]).all()
hard_cost = D[hard_rows, hard_cols]
good = hard_cost<max_dist
hard_rows = hard_rows[good]
hard_cols = hard_cols[good]
#let's combine both and assign the corresponding peak
rows = np.concatenate((easy_rows, hard_rows))
cols = np.concatenate((easy_cols, hard_cols))
new_coords = coords_.copy()
new_coords[rows] = valid_peaks[cols] #coords that do not satisfy the close peak condition will not be changed
return new_coords
#%%
if __name__ == '__main__':
_debug = False
min_distance = 2
max_dist = 5
r = max_dist*2+1
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(r, r))
src_root_dir = Path.home() / 'workspace/localization/data/worm_eggs_adam/'
dst_root_dir = Path.home() / 'workspace/localization/data/worm_eggs_adam_refined/'
src_files = [x for x in src_root_dir.rglob('*.hdf5') if not x.name.startswith('.')]
for src_file in tqdm.tqdm(src_files):
with pd.HDFStore(src_file, 'r') as fid:
df = fid['/coords']
img = fid.get_node('/img')[:]
#%%
#create a mask using the known coordinates
valid_mask = np.zeros_like(img)
cols = df['cx'].astype(np.int)
rows = df['cy'].astype(np.int)
valid_mask[rows, cols] = 1
valid_mask = cv2.dilate(valid_mask, kernel) > 0
#then I will use the inverted maxima to to create local maxima corresponding to the refined eggs peaks
img_peaks = ~img
img_peaks -= img_peaks[valid_mask].min()
img_peaks[~valid_mask] = 0
#img_peaks = cv2.blur(img_peaks, (1,1))
#%%
#finaly use the correct coords function to assing each labelled coords to a local maxima
cc = df[['cx','cy']].values
new_coords = correct_coords(img_peaks, cc, min_distance, max_dist)
coords = pd.DataFrame({'type_id':1, 'cx':new_coords[:,0], 'cy':new_coords[:,1]})
coords = coords.to_records(index=False)
dst_file = str(src_file).replace(str(src_root_dir), str(dst_root_dir))
dst_file = Path(dst_file)
dst_file.parent.mkdir(exist_ok=True, parents=True)
with tables.File(str(dst_file), 'w') as fid:
fid.create_carray('/', 'img', obj = img)
fid.create_table('/', 'coords', obj = coords)
#%%
if _debug:
#%%
import matplotlib.pylab as plt
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True)
axs[0].imshow(img, cmap = 'gray')
axs[1].imshow(img_peaks, cmap = 'gray')
for ax in axs:
ax.plot(df['cx'], df['cy'], '.r')
ax.plot(coords['cx'], coords['cy'], '.g')
plt.show()
#%%
break
|
[
"ver228@gmail.com"
] |
ver228@gmail.com
|
2cc01a5bcad3b82724cee5c8768f5a5ce0700964
|
29929e5236a44fb315792f0aa21f7d81f8ce3b3e
|
/autos/migrations/0001_initial.py
|
f32828470630a008050b8685082931282f63540a
|
[] |
no_license
|
hell0ut/DjangoTesting
|
fa175ec48b3e420f7f5324d9211830ff0c695d80
|
a00e8bbc947cd7522144945c3f8b2f1d72fca37f
|
refs/heads/master
| 2023-08-16T07:00:19.149461
| 2021-09-25T23:02:50
| 2021-09-25T23:02:50
| 284,971,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
# Generated by Django 3.1 on 2020-08-08 14:21
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Make',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter a make (e.g. Dodge)', max_length=200, validators=[django.core.validators.MinLengthValidator(2, 'Make must be greater than 1 character')])),
],
),
migrations.CreateModel(
name='Auto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(max_length=200, validators=[django.core.validators.MinLengthValidator(2, 'Nickname must be greater than 1 character')])),
('mileage', models.PositiveIntegerField()),
('comments', models.CharField(max_length=300)),
('make', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='autos.make')),
],
),
]
|
[
"hell0ut@users.noreply.github.com"
] |
hell0ut@users.noreply.github.com
|
bd7b287b4684f03a23b44c0db107bd72b40c3163
|
747ec225125c19acb0838ce4270c990deb0e1a15
|
/src/utils/writecsv.py
|
2496cb08a7e972a273c4c1fe994eb39de2bd6e58
|
[] |
no_license
|
juanjo3ns/gridworldRL
|
c2d01d9b3add464e22ea6a7687774a9967ca009d
|
e119d97c9b9a0d401f16be45b90e1d1fe7dfe52d
|
refs/heads/master
| 2022-07-10T04:16:53.167556
| 2019-08-10T19:11:06
| 2019-08-10T19:11:06
| 188,305,100
| 0
| 0
| null | 2022-06-21T22:03:04
| 2019-05-23T20:54:09
|
Python
|
UTF-8
|
Python
| false
| false
| 583
|
py
|
import csv
import os
class CSV:
def __init__(self,type, version , file):
self.path = '/data/src/csvdata/'
if not os.path.exists(os.path.join(self.path,type)):
os.mkdir(os.path.join(self.path,type))
if not os.path.exists(os.path.join(self.path,type,version)):
os.mkdir(os.path.join(self.path,type,version))
self.file = file
self.csvfile = open(os.path.join(self.path, type, version, self.file + '.csv'), 'w')
self.csvwriter = csv.writer(self.csvfile, delimiter=',')
def write(self, row):
self.csvwriter.writerow(row)
def close(self):
self.csvfile.close()
|
[
"juanjo.3ns@gmail.com"
] |
juanjo.3ns@gmail.com
|
b47a4a5a30e4c6a69d65b10c5957b45ee232cebb
|
f1a313e07e512649d513de1cbf43a680222a2a08
|
/classes/fan.py
|
debb1a32ab2a6e57fed56479d558dcc74e6158a9
|
[] |
no_license
|
ambrogio-galbusera/gr0g
|
04ba61ad8c33ed563e4dc8cac3fdd5c57a05f537
|
78658513c052e89f7823ff4027f7ae7e6fbd8f39
|
refs/heads/master
| 2023-02-04T04:16:13.656160
| 2020-12-20T08:37:38
| 2020-12-20T08:37:38
| 309,800,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
import gertutil as gu
class Fan:
fan_channel = 2
fan_freq = 1000
def __init__ (self) :
print("[FAN ] PWM brushed on board {}, channel {}".format(gu.board, self.fan_channel))
gu.pwm_init(self.fan_channel)
gu.pwm_set(self.fan_channel,self.fan_freq,0)
def set (self,dc) :
print("[FAN ] Setting PWM to {}".format(dc))
gu.pwm_set(self.fan_channel,self.fan_freq,dc)
def off (self) :
print("[FAN ] Power off")
gu.pwm_off(self.fan_channel)
|
[
"amgalbu@gmail.com"
] |
amgalbu@gmail.com
|
3402c917b6adeac02e37b98b2e7fb951b691c3b0
|
7797913bc78cfa0cfdb30813d4268e02770722e0
|
/Section 1/Q13.py
|
6c52afab4b9f16447181fdd6bccb59c92347316f
|
[] |
no_license
|
pkoi5088/CodeFestival
|
ecdf5dbdbc78793fd093a614f60d2639a68bffb3
|
cdc3065f6a2557936432f09344d5bfa09ff8b651
|
refs/heads/main
| 2023-03-19T14:49:02.488625
| 2021-03-14T03:20:15
| 2021-03-14T03:20:15
| 345,632,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
planet={1:'수성',2:'금성',3:'지구',4:'화성',5:'목성',6:'토성',7:'천왕성',8:'해왕성'}
N=int(input())
print(planet[N])
|
[
"pkoi5088@gmail.com"
] |
pkoi5088@gmail.com
|
e4a1d05206a0a5d258b62ecda07b2e6abc7290b0
|
dd5e7feb8fd98f77c6a5597e37c666cf97b71ee3
|
/py/cnn_miracle_check.py
|
1066651cc740b90cf87689b119953f30fbdb5c6a
|
[] |
no_license
|
N-31V/robotic-software
|
1d6706baa746cabf404cd0c17fc5a14ad03f1088
|
7aa1933394978cf07f8d62b915088c1b7c8cc940
|
refs/heads/master
| 2021-07-21T22:47:01.740444
| 2020-05-30T19:06:37
| 2020-05-30T19:06:37
| 173,922,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
#!/usr/bin/env python3
from cnn_miracle import *
import cv2 as cv
import sys
import argparse
def createParser ():
parser = argparse.ArgumentParser()
parser.add_argument ('-o', '--old', action='store_true', default=False)
return parser
if __name__ == '__main__':
parser = createParser()
namespace = parser.parse_args(sys.argv[1:])
model = make_model()
if namespace.old:
test_data = np.load('test_data.npy', allow_pickle=True)
else:
test_data = process_test_data()
for elem in test_data[:30]:
img_num = elem[1]
img_data = elem[0]
img_np = np.array(elem[0])
data = img_np.reshape(IMG_WIDTH,IMG_HEIGHT,3)
model_out = model.predict([data])[0]
if np.argmax(model_out) == 1: str_label="Isn't miracle"
else: str_label='Miracle'
img = elem[0]
cv.imshow ( str_label , img)
cv.waitKey (0)
cv.destroyAllWindows()
|
[
"litvintseva.ann@gmail.com"
] |
litvintseva.ann@gmail.com
|
b4648464a36f3eb658eb2eb905a4d93fc19c34f4
|
d0f0cad6484b70838078efc01f6ec4122b489be4
|
/dz7/3.py
|
8b6508ae1f7036b0a49273111a114549c2771f21
|
[] |
no_license
|
w2kzx80/py
|
9543f6e3d89ee546adfd0e8d48a1accbefb18409
|
0f54a95a2e72eb5cba0442d3be233f23722529f6
|
refs/heads/main
| 2023-01-28T17:54:47.662471
| 2020-12-02T18:05:00
| 2020-12-02T18:05:00
| 308,981,748
| 0
| 0
| null | 2020-12-02T18:05:02
| 2020-10-31T22:20:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
class Cell:
def __init__(self, ccnt:int):
self.ccnt=ccnt
def __str__(self):
return f"{self.ccnt}"
def __add__(self, other):
if type(self) == Cell and type(other) == Cell:
return Cell(self.ccnt + other.ccnt)
raise TypeError
def __sub__(self, other):
if type(self) == Cell and type(other) == Cell:
if self.ccnt > other.ccnt:
return Cell(self.ccnt - other.ccnt)
print("second cell too big")
return self
raise TypeError
def __mul__(self, other):
if type(self) == Cell and type(other) == Cell:
return Cell(self.ccnt * other.ccnt)
raise TypeError
def __truediv__(self, other):
if type(self) == Cell and type(other) == Cell:
return Cell(self.ccnt // other.ccnt)
raise TypeError
def make_order(self, rlen):
rows = []
rsum = self.ccnt
while rsum > rlen:
rows.append("".join(["*" for ri in range(0,rlen)]))
rsum -= rlen
rows.append("".join(["*" for ri in range(0, rsum)]))
return "\n".join(rows)
myCell1 = Cell(12)
myCell2 = Cell(5)
myCell3 = Cell(7)
print(myCell1 + myCell2 + myCell3)
print(myCell1 / myCell2)
print(myCell2 * myCell3)
print(myCell3 - myCell2)
print(myCell2 - myCell3)
print(f"ORDER c1:\n{myCell1.make_order(6)}\n\n")
print(f"ORDER c2:\n{myCell2.make_order(6)}\n\n")
print(f"ORDER c3:\n{myCell3.make_order(6)}\n\n")
print(myCell1 + 12)
|
[
"w2kzx80@gmail.com"
] |
w2kzx80@gmail.com
|
ea192e4b23bffcdc8beec6046e91bd58a61df8fb
|
168dfe4755fe68bc3ec43a70d87197645b5ed98c
|
/Crossin/test7.py
|
9ce0228e86d593ec9c9fefe674a21485d47e5c65
|
[] |
no_license
|
cjmking2010/python_study
|
e6364109be508dbbf34508999a72dd714b0c3986
|
48c365aece079218f0f78b1f0f90f0c2d5a12eec
|
refs/heads/master
| 2021-01-01T16:15:14.012244
| 2018-01-11T08:53:55
| 2018-01-11T08:53:55
| 97,796,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename: test7.py
# Author: peter.chen
from random import randint
a = randint(5,10)
b = randint(1,100)
print a
print b
|
[
"cjm20008@126.com"
] |
cjm20008@126.com
|
c45fd4f5a8e15cd035acf90dac7bafd543d04c90
|
4c88b359375197bbfd96028a08ded14a48bb60ad
|
/Conditionals.py
|
9f45513fcde2632e826850e618c85cfd85c9fc04
|
[] |
no_license
|
Joexder/Python-Curse
|
6a8247f98760ba331a9e25973c73d97b90348b17
|
ac3fb169da78da48599024755a0a909a6b932f3c
|
refs/heads/master
| 2020-08-03T11:28:08.448505
| 2019-09-29T23:00:14
| 2019-09-29T23:00:14
| 211,736,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
x = input("<escribe tu edad: ")
if int(x) > 26:
print("Por mayor")
elif int(x) == 26:
print("En el limite")
else:
print("Por menor")
y = input("Escribe un Color:")
if y == 'red' or y == 'blue' or y == 'yellow':
print("Colores primarios")
else:
print("Colores Secundarios")
|
[
"joel96terce@gmail.com"
] |
joel96terce@gmail.com
|
fadd8b2d4391ccc33a5c230fc0d56aaf7713c9fb
|
e247fb17dcdce92f3cb439880b0c7df7a0b0f869
|
/rooms/migrations/0019_auto_20210731_1450.py
|
9600f52897a28df09f19198d0465011ecb6a8755
|
[] |
no_license
|
hongsoojong/airbnb-clone-2021
|
6807bc3cec5ff70039c71ac44c9c28b13841c894
|
17a46b31d808f6e531c5c4b2f13aa7204cb51293
|
refs/heads/master
| 2023-07-15T12:36:00.429074
| 2021-08-24T07:42:00
| 2021-08-24T07:42:00
| 384,682,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
# Generated by Django 2.2.5 on 2021-07-31 05:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rooms', '0018_auto_20210731_1441'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='file',
field=models.ImageField(upload_to='room_photos'),
),
migrations.AlterField(
model_name='room',
name='room_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rooms', to='rooms.RoomType'),
),
]
|
[
"84722193+hongsoojong@users.noreply.github.com"
] |
84722193+hongsoojong@users.noreply.github.com
|
6f7ccfa4d4f3f222e1b912cf39b3a94c834653d2
|
04b07d92102301b14aaed0b1dbef85dd0c8ed59d
|
/code/tinyos/apps/RxDecoder/Decoded.py
|
6625f3ec8ae823d72f8a42554e10ac925cf4c7d4
|
[] |
no_license
|
ab39826/IndexCoding
|
8fb03bb6152b7e47905f7b463c201182bc04bfb1
|
f8e36a9baefef46d7c7be858eabc35b0384cb0cc
|
refs/heads/master
| 2016-09-03T07:29:56.648775
| 2015-09-04T14:03:01
| 2015-09-04T14:03:01
| 41,918,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,776
|
py
|
#!/usr/bin/python
import time
import numpy as np
import sys
#tos stuff
from DecodedMsg import *
from tinyos.message import MoteIF
class MyClass:
def __init__(self,N):
self.prevtime = time.time()
self.N = N
self.A = make_A_matrix(self.N)
self.counter = 0;
self.current_row = 0;
# Create a MoteIF
self.mif = MoteIF.MoteIF()
# Attach a source to it
self.source = self.mif.addSource("sf@localhost:9002")
# SomeMessageClass.py would be generated by MIG
self.mif.addListener(self, DecodedMsg)
# Called by the MoteIF's receive thread when a new message
# is received
def receive(self, src, msg):
time.sleep(1)
m = DecodedMsg(msg.dataGet())
self.counter = m.get_counter()
timeformat = '%Y/%d/%m %H:%M:%S'
print 'Received message %s: counter: %d' % (time.strftime(timeformat), self.counter)
print ' current row: ', m.get_current_row()
print ' true current row: ', self.current_row
z = np.array(m.get_V_row())
z = z[0:self.current_row+1]
print z
V = self.A[:m.get_current_row()+1]
#U, S, W = np.linalg.svd(V.T)
#print S
Vnull = V[ :, [1,3,5,7] ]
#U,S,V = np.linalg.svd(Vnull.T)
#print S
print np.matrix(Vnull).T*np.matrix(z).T
#U, s, W = np.linalg.svd(Vnull.T)
#print W.T
#print self.A[m.get_current_row()][:]
#print m.get_current_row()
#print S
#V_null = self.A[0:self.current_row+1,[1,3, 9, 14]]
#U, S, W = np.linalg.svd(V_null)
#print S
#if m.get_perform_svd() == self.N:
##print ' svd received:'
#Svals = m.get_W()
#print 'Rx svd: ', Svals
#U,S,V = np.linalg.svd(self.A)
##S = [s**2 for s in S]
##print ' svd check:'
#print 'PC svd: ', S
#self.perform_svd = 0
#self.A = make_A_matrix(self.N)
#print 'MSE: ', np.linalg.norm(np.array(S)-np.array(Svals),2)
#proctime = time.time() - self.prevtime
#print 'Elapsed time: %f seconds' % proctime
#else:
#self.prevtime = time.time()
#self.perform_svd += 1
self.counter += 1
self.current_row = (self.current_row + 1) % self.N
#if self.current_row == 0:
#self.A = make_A_matrix(self.N)
self.send()
def send(self):
smsg = DecodedMsg()
smsg.set_counter(self.counter)
smsg.set_current_row(self.current_row)
smsg.set_V_row(self.A[self.current_row])
self.mif.sendMsg(self.source, 0xFFFF, smsg.get_amType(), 0, smsg)
def make_A_matrix(N):
A = np.random.randn(N,N)
B = np.matrix(np.random.randn(4,4))
U, s, W = np.linalg.svd(B)
s[-1] = 0
B = np.array(U*np.diag(s)*W)
A[0:4,1] = B[:,0]
A[0:4,3] = B[:,1]
A[0:4,5] = B[:,2]
A[0:4,7] = B[:,0]
print A
return A
if __name__ == "__main__":
print "Running"
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
if len(sys.argv) > 1:
N = int(sys.argv[1])
else:
N = 6
m = MyClass(N)
time.sleep(1)
m.send()
|
[
"Anurag.Banerjee@utexas.edu"
] |
Anurag.Banerjee@utexas.edu
|
a429a7f8ebb10dd8c7961a52873a18b1be3fc2e5
|
0156c5bb165ee4017c46f62db2a70455b7dca63a
|
/dir_for_bind/counter.py
|
94f08a89e9fb52da153538483f943e0dcbc0db64
|
[] |
no_license
|
KotovVitaliy/LearnQA_Docker
|
ba8206b14a7ab2d130d49bddf763a6cd5374d1c5
|
e6afc4c7bf11bf6dea579a2777c9aeb70c4e1a18
|
refs/heads/master
| 2022-06-19T18:12:51.577052
| 2020-05-11T12:02:58
| 2020-05-11T12:02:58
| 253,278,981
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30
|
py
|
for i in range(1, 6): print(i)
|
[
"v.kotov@corp.badoo.com"
] |
v.kotov@corp.badoo.com
|
643b2ad8db2c458d77f96dff2374d2efa0c66723
|
a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea
|
/airflow/api_connexion/schemas/dag_warning_schema.py
|
9531eb6b36bc3833a39d24bcef895f01444f9bb6
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ishiis/airflow
|
4305794e36b611d01f49e3f2401be3dc49782670
|
292440d54f4db84aaf0c5a98cf5fcf34303f2fa8
|
refs/heads/master
| 2022-07-30T00:51:28.806940
| 2022-07-14T12:07:11
| 2022-07-14T12:07:11
| 209,801,072
| 1
| 0
|
Apache-2.0
| 2019-09-20T13:47:26
| 2019-09-20T13:47:26
| null |
UTF-8
|
Python
| false
| false
| 1,705
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models.dagwarning import DagWarning
class DagWarningSchema(SQLAlchemySchema):
"""Import error schema"""
class Meta:
"""Meta"""
model = DagWarning
dag_id = auto_field(data_key="dag_id", dump_only=True)
warning_type = auto_field()
message = auto_field()
timestamp = auto_field(format="iso")
class DagWarningCollection(NamedTuple):
"""List of dag warnings with metadata"""
dag_warnings: List[DagWarning]
total_entries: int
class DagWarningCollectionSchema(Schema):
"""Import error collection schema"""
dag_warnings = fields.List(fields.Nested(DagWarningSchema))
total_entries = fields.Int()
dag_warning_schema = DagWarningSchema()
dag_warning_collection_schema = DagWarningCollectionSchema()
|
[
"noreply@github.com"
] |
ishiis.noreply@github.com
|
09ce754a548f68f3ade391cfe3187a6d981e4cb8
|
af8d5f01fb4d77d997f1c7fc3a677261a9f16763
|
/ML/folder_wise/Decision tree/decision.py
|
1eb83d10275d90a37e59b223af025b42abb35ae3
|
[] |
no_license
|
ashishsalunkhe/BE-Comp-Sem-8
|
e6660ac09553dc927e6ff6edf006598f42beb442
|
1fe3df171cd3dcd732baa010aac59c8cd6abe1db
|
refs/heads/master
| 2022-09-30T19:31:59.440717
| 2020-06-05T08:21:30
| 2020-06-05T08:21:30
| 269,570,770
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
import pandas as pd
import numpy as np
#reading Dataset
dataset=pd.read_csv("dt.csv")
X=dataset.iloc[:,:-1]
y=dataset.iloc[:,5]
#Perform Label encoding
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
X=X.apply(le.fit_transform)
print("X")
from sklearn.tree import DecisionTreeClassifier
regressor=DecisionTreeClassifier()
regressor.fit(X.iloc[:,1:5],y)
#Predict value for the given Expression
X_in=np.array([1,1,0,0])
y_pred=regressor.predict([X_in])
print("Prediction:", y_pred)
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
dot_data=StringIO()
export_graphviz(regressor,out_file=dot_data,filled=True,rounded=True,special_characters=True)
graph=pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('tree.png')
|
[
"avsalunkhe98@gmail.com"
] |
avsalunkhe98@gmail.com
|
f84fa5685ebf64f3dc5afa3a2688e45e99336d77
|
b4e830b837f556b026d30c77c98bff8aa8de761e
|
/profile_project/accounts/migrations/0003_auto_20190606_1432.py
|
ebfe422e57d2d8d1bd8b5d8fcf363854641d77f9
|
[] |
no_license
|
DavidJMcGarvey/User-Profile-with-Django
|
c8f1b415c587a6fe5f40b5f0e4f1d0c2a01bf679
|
fbcb709d5b40569ba3d1899e0fef71fbb42a50c6
|
refs/heads/master
| 2022-11-26T19:25:38.394320
| 2019-07-03T23:12:23
| 2019-07-03T23:12:23
| 189,677,682
| 0
| 0
| null | 2022-11-22T03:14:49
| 2019-06-01T01:33:53
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,290
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2019-06-06 21:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20190604_1438'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='avatar',
field=models.ImageField(default=None, upload_to=''),
),
migrations.AlterField(
model_name='userprofile',
name='bio',
field=models.TextField(default='I am a nerd for love.'),
),
migrations.AlterField(
model_name='userprofile',
name='birthday',
field=models.DateField(default='1990-01-15', max_length=255),
),
migrations.AlterField(
model_name='userprofile',
name='confirm_email',
field=models.EmailField(default='dave@email.com', max_length=254),
),
migrations.AlterField(
model_name='userprofile',
name='email',
field=models.EmailField(default='dave@email.com', max_length=254),
),
migrations.AlterField(
model_name='userprofile',
name='favorite_animal',
field=models.CharField(default='Gianni', max_length=255),
),
migrations.AlterField(
model_name='userprofile',
name='favorite_color',
field=models.CharField(default='Blue', max_length=255),
),
migrations.AlterField(
model_name='userprofile',
name='first_name',
field=models.CharField(default='Dave', max_length=255),
),
migrations.AlterField(
model_name='userprofile',
name='hobby',
field=models.CharField(default='Buckets', max_length=255),
),
migrations.AlterField(
model_name='userprofile',
name='hometown',
field=models.CharField(default='Denver', max_length=255),
),
migrations.AlterField(
model_name='userprofile',
name='last_name',
field=models.CharField(default='McGarvey', max_length=255),
),
]
|
[
"davidjmcgarvey@gmail.com"
] |
davidjmcgarvey@gmail.com
|
4d3bc7d3165b2029c9fb1ec9a5e3389bde0e2870
|
4aee52a4375c4792f5f79c8d44376d21ee24a485
|
/basics/hello.py
|
4d976f1ba7ff8dadc2aada3a19cbad03fe305f88
|
[] |
no_license
|
python-practice-04-2019/repo-1.1.1
|
4e5450f046653eb7faea5e3bce109ce7496a3326
|
937885fe9ad65754c11cd09adfa6dfa561d5d3a8
|
refs/heads/master
| 2020-05-15T15:56:17.279287
| 2019-04-20T19:55:11
| 2019-04-20T19:55:11
| 182,381,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
print("hello world")
x = 2
if(x == 2):
print("test")
|
[
"RabindraPatra@corp.fairisaac.com"
] |
RabindraPatra@corp.fairisaac.com
|
122c197fb6d6d84f46359e2ff7088f11282e764d
|
eb26a760a3e2074666df980cf9f482fae029e0ce
|
/venv/Scripts/pip3-script.py
|
5da2c1c87277b8fc3b07c9a681e69f2c7fc117ec
|
[] |
no_license
|
netanelm7/Work_Hours_Project
|
024cc6528ef1643f9fc1a000795b2f8e0614bc44
|
3bedd7ee6c92817bbd8731240d7c313746b7b92d
|
refs/heads/master
| 2020-04-02T11:09:44.258428
| 2018-10-23T18:00:04
| 2018-10-23T18:00:04
| 154,374,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
#!L:\Pycharm\Work_Hours_Project\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"39493783+netanelm7@users.noreply.github.com"
] |
39493783+netanelm7@users.noreply.github.com
|
bf1e15e32502c294cb2398b0ca3de70499a04222
|
8b5d58fc22888d2fb051a3c59936659ca347043a
|
/NtupleAnalysis/src/Hplus2tbAnalysis/work/plotting/plotHistograms.py
|
35ff6c7ca74f8f56b0ceca013609826b3d9da715
|
[] |
no_license
|
attikis/HplusHW
|
c54f4429dd48e99b7e597043fa6d442d7a3573ba
|
e62ce79c914c6b5bfd1faa44ff94356fb55fe561
|
refs/heads/master
| 2020-06-15T22:00:43.733407
| 2019-07-05T10:30:07
| 2019-07-05T10:30:07
| 195,402,507
| 1
| 0
| null | 2019-07-05T12:02:22
| 2019-07-05T12:02:22
| null |
UTF-8
|
Python
| false
| false
| 9,600
|
py
|
#!/usr/bin/env python
'''
Usage (single plot):
./plotHistograms.py -m <pseudo_mcrab_directory> <jsonfile>
Usage (multiple plots):
./plotHistograms.py -m <pseudo_mcrab_directory> json/AfterAllSelections/*.json
or
./plotHistograms.py -m <pseudo_mcrab_directory> json/AfterAllSelections/*.json json/AfterStandardSelections/*.json
Last Used:
./plotHistograms.py -m Hplus2tbAnalysis_161128_082955/ json/AfterAllSelections/BjetPt.json
or
./plotHistograms.py -m Hplus2tbAnalysis_161128_082955/ json/AfterAllSelections/*.json
or
./plotHistograms.py -m Hplus2tbAnalysis_161128_082955/ json/AfterAllSelections/*/*.json
'''
#================================================================================================
# Imports
#================================================================================================
import os
import sys
from optparse import OptionParser
import getpass
import socket
import json
import HiggsAnalysis.NtupleAnalysis.tools.dataset as dataset
import HiggsAnalysis.NtupleAnalysis.tools.tdrstyle as tdrstyle
import HiggsAnalysis.NtupleAnalysis.tools.styles as styles
import HiggsAnalysis.NtupleAnalysis.tools.plots as plots
import HiggsAnalysis.NtupleAnalysis.tools.histograms as histograms
import HiggsAnalysis.NtupleAnalysis.tools.aux as aux
import ROOT
#================================================================================================
# Main
#================================================================================================
def Print(msg, printHeader=False):
fName = __file__.split("/")[-1]
if printHeader==True:
print "=== ", fName
print "\t", msg
else:
print "\t", msg
return
def Verbose(msg, printHeader=True, verbose=False):
if not opts.verbose:
return
Print(msg, printHeader)
return
def GetLumi(datasetsMgr):
Verbose("Determininig Integrated Luminosity")
lumi = 0.0
for d in datasetsMgr.getAllDatasets():
if d.isMC():
continue
else:
lumi += d.getLuminosity()
Verbose("Luminosity = %s (pb)" % (lumi), True )
return lumi
def GetDatasetsFromDir(opts, json):
Verbose("Getting datasets")
if len(json["samples"])<1:
Print("No samples defined in the JSON file. Exit", True)
print __doc__
sys.exit()
else:
return dataset.getDatasetsFromMulticrabDirs([opts.mcrab],
dataEra=json["dataEra"],
searchMode=json["searchMode"],
analysisName=json["analysis"],
includeOnlyTasks="|".join(json["samples"]),
optimizationMode=json["optMode"])
def Plot(jsonfile, opts):
Verbose("Plotting")
with open(os.path.abspath(jsonfile)) as jfile:
j = json.load(jfile)
Print("Plotting %s in %s" % (j["title"], j["saveDir"]), True)
# Setup the style
style = tdrstyle.TDRStyle()
style.setGridX(j["gridX"]=="True")
style.setGridY(j["gridY"]=="True")
# Set ROOT batch mode boolean
ROOT.gROOT.SetBatch(opts.batchMode)
# Setup & configure the dataset manager
datasetsMgr = GetDatasetsFromDir(opts, j)
#datasetsMgr.loadLuminosities()
datasetsMgr.updateNAllEventsToPUWeighted()
if opts.verbose:
datasetsMgr.PrintCrossSections()
datasetsMgr.PrintLuminosities()
# Set/Overwrite cross-sections
for d in datasetsMgr.getAllDatasets():
if "ChargedHiggs" in d.getName():
datasetsMgr.getDataset(d.getName()).setCrossSection(1.0)
plots.mergeRenameReorderForDataMC(datasetsMgr)
# Print dataset information
datasetsMgr.PrintInfo()
# Get Integrated Luminosity
lumi = GetLumi(datasetsMgr)
# Plot the histogram
DataMCPlot(datasetsMgr, j)
return
def DataMCPlot(datasetsMgr, json):
Verbose("Creating Data-MC plot")
# Create the Data-MC Plot
p = plots.DataMCPlot(datasetsMgr, json["histogram"])
# Customise histograms before drawing (before being converted to TGraphs)
if "drawStyle" in json:
p.histoMgr.setHistoDrawStyleAll(json["drawStyle"])
if "rebinx" in json:
p.histoMgr.forEachHisto(lambda h: h.getRootHisto().RebinX(json["rebinX"]))
if "rebiny" in json:
if json["rebinY"] != "None":
p.histoMgr.forEachHisto(lambda h: h.getRootHisto().RebinY(json["rebinY"]))
# Label size (optional. Commonly Used in counters)
xlabelSize = None
if "xlabelsize" in json:
xlabelSize = json["xlabelsize"]
ylabelSize = None
if "ylabelsize" in json:
ylabelSize = json["ylabelsize"]
# Draw a customised plot
saveName = os.path.join(json["saveDir"], json["title"])
plots.drawPlot(p,
saveName,
xlabel = json["xlabel"],
ylabel = json["ylabel"],
rebinX = json["rebinX"],
rebinY = json["rebinY"],
ratioYlabel = json["ratioYlabel"],
ratio = json["ratio"]=="True",
stackMCHistograms = json["stackMCHistograms"]=="True",
ratioInvert = json["ratioInvert"]=="True",
addMCUncertainty = json["addMCUncertainty"]=="True",
addLuminosityText = json["addLuminosityText"]=="True",
addCmsText = json["addCmsText"]=="True",
cmsExtraText = json["cmsExtraText"],
opts = json["opts"],
opts2 = json["ratioOpts"],
log = json["logY"]=="True",
errorBarsX = json["errorBarsX"]=="True",
moveLegend = json["moveLegend"],
# cutLine = json["cutValue"], #cannot have this and "cutBox" defined
cutBox = {"cutValue": json["cutValue"], "fillColor": json["cutFillColour"], "box": json["cutBox"]=="True", "line": json["cutLine"]=="True", "greaterThan": json["cutGreaterThan"]=="True"},
xlabelsize = xlabelSize,
ylabelsize = ylabelSize,
)
# Remove legend?
if json["removeLegend"] == "True":
p.removeLegend()
# Additional text
histograms.addText(json["extraText"].get("x"), json["extraText"].get("y"), json["extraText"].get("text"), json["extraText"].get("size") )
# Save in all formats chosen by user
saveFormats = json["saveFormats"]
for i, ext in enumerate(saveFormats):
Print("%s" % saveName + ext, i==0)
p.saveAs(saveName, formats=saveFormats)
return
def main(opts):
Verbose("main function")
jsonFiles = []
# For-loop: All system script arguments
for arg in sys.argv[1:]:
# Skip if not a json file
if ".json" not in arg:
continue
# Sanity check - File exists
if not os.path.exists(arg):
Print("The JSON file \"%s\" does not seem to be a valid path.. Please check that the file exists. Exit" % (arg), True)
sys.exit()
# Load & append json file
with open(os.path.abspath(arg)) as jsonFile:
try:
json.load(jsonFile)
jsonFiles.append(arg)
except ValueError, e:
Print("Problem loading JSON file %s. Please check the file" % (arg))
sys.exit()
# Sanity check - At least 1 json file found
if len(jsonFiles) == 0:
Print("No JSON files found. Please read the script instructions. Exit", True)
print __doc__
sys.exit()
# For-loop: All json files
for j in jsonFiles:
Print("Processing JSON file \"%s\"" % (j), True)
Plot(j, opts)
return
#================================================================================================
# Main
#================================================================================================
if __name__ == "__main__":
# Default Settings
global opts
BATCHMODE = True
VERBOSE = False
parser = OptionParser(usage="Usage: %prog [options]" , add_help_option=False,conflict_handler="resolve")
parser.add_option("-m", "--mcrab", dest="mcrab", action="store",
help="Path to the multicrab directory for input")
parser.add_option("-b", "--batchMode", dest="batchMode", action="store_false", default=BATCHMODE,
help="Enables batch mode (canvas creation NOT generates a window) [default: %s]" % BATCHMODE)
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=VERBOSE,
help="Enables verbose mode (for debugging purposes) [default: %s]" % VERBOSE)
(opts, parseArgs) = parser.parse_args()
# Require at least two arguments (script-name, path to multicrab)
if opts.mcrab == None:
Print("Not enough arguments passed to script execution. Printing docstring & EXIT.")
print __doc__
sys.exit(0)
# Call the main function
main(opts)
if not opts.batchMode:
raw_input("=== plotHistograms.py: Press any key to quit ROOT ...")
|
[
"attikis@cern.ch"
] |
attikis@cern.ch
|
98c23c12da7c87e54e9d2ca110846a9658f7421f
|
c28df1b0bf09c1ce1cb6d9de219676827f899e8d
|
/person/models.py
|
15cd98731d76acb568c797b28b4da1531700cac6
|
[] |
no_license
|
frozyum/split-expenses-app-backend
|
36fc501c432b6508a7c34bc08b6ab27a6bff245d
|
55e23ea222396814a80512df3fab0d5710c88fdf
|
refs/heads/master
| 2023-01-01T08:56:16.145140
| 2020-10-27T09:17:14
| 2020-10-27T09:17:14
| 298,987,935
| 0
| 0
| null | 2020-09-28T16:49:28
| 2020-09-27T08:32:47
|
Python
|
UTF-8
|
Python
| false
| false
| 306
|
py
|
from django.db import models
# Create your models here.
from group.models import Group
class Person(models.Model):
group = models.ForeignKey(to=Group, on_delete=models.CASCADE)
name = models.CharField(max_length=12)
def __str__(self):
return str(self.name) + "in " + str(self.group)
|
[
"smamu13@freeuni.edu.ge"
] |
smamu13@freeuni.edu.ge
|
4381790eeb2b2b365378271185ea7d4b8453afa4
|
0ddf984c1e64c937bb2864968002b86e4e4dd77a
|
/IE517_F20_HW4/regression housing2.py
|
1bd8949c029d2fc2b5ab3e44db59c9f9e00435d4
|
[] |
no_license
|
kanonlove/UIUC-IE-517
|
7ee3774c127f33a28d33e5eb8aadbf4e3dc1ebfe
|
853c537fbea5faf3b034f2f6c17dc4b01e8940d7
|
refs/heads/master
| 2022-12-25T14:04:40.094655
| 2020-10-10T03:31:08
| 2020-10-10T03:31:08
| 290,069,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,540
|
py
|
import pandas as pd
import seaborn as sns
import numpy as np
from pandas import DataFrame
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
import matplotlib.pyplot as plt;
"""
Part 1: Exploratory Data Analysis
Describe the data sufficiently using the methods and visualizations that we used
previously in Module 3 and again this week. Include any output, graphs, tables, heatmaps,
box plots, etc. Label your figures and axes. DO NOT INCLUDE CODE!
Split data into training and test sets. Use random_state = 42. Use 80% of the data
for the training set. Use the same split for all models.
Part 2: Linear regression
Fit a linear model using SKlearn to all of the features of the dataset.
Describe the model (coefficients and y intercept), plot the residual errors,
calculate performance metrics: MSE and R2.
Part 3.1: Ridge regression
Fit a Ridge model using SKlearn to all of the features of the dataset.
Test some settings for alpha. Describe the model (coefficients and y intercept),
plot the residual errors, calculate performance metrics: MSE and R2. Which alpha gives the best performing model?
Part 3.2: LASSO regression
Fit a LASSO model using SKlearn to all of the features of the dataset.
Test some settings for alpha. Describe the model (coefficients and y intercept),
plot the residual errors, calculate performance metrics: MSE and R2. Which alpha gives the best performing model?
Part 4: Conclusions
Write a short paragraph summarizing your findings.
Part 5: Appendix
Link to github repo
"""
df=pd.read_csv("HW4_housing2.csv")
print(df.head())
#print(len(df))
print("\n")
"""
• CRIM: Per capita crime rate by town
• ZN: Proportion of residential land zoned for lots over 25,000 sq. ft.
• INDUS: Proportion of non-retail business acres per town
• CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
• NOX: Nitric oxide concentration (parts per 10 million)
• RM: Average number of rooms per dwelling
• AGE: Proportion of owner-occupied units built prior to 1940
• DIS: Weighted distances to five Boston employment centers
• RAD: Index of accessibility to radial highways
• TAX: Full-value property tax rate per $10,000
• PTRATIO: Pupil-teacher ratio by town
• B: 1000(Bk - 0.63)^2, where Bk is the proportion of [people of African American descent] by town
• LSTAT: Percentage of lower status of the population
• MEDV: Median value of owner-occupied homes in $1000s (TARGET)
"""
# heat map
cormat = DataFrame(df.corr())
#visualize correlations using heatmap
fig, ax = plt.subplots(figsize=(10,10))
sns.heatmap(cormat,linewidth=1,square=True,ax=ax,annot=True)
ax.set_ylim([27, 0])
plt.xlabel("Heat Map of Boston House Price")
plt.show()
print("\n")
#CRIM
plt.hist(df["CRIM"],edgecolor="black",bins=10)
plt.xlabel("per capita crime rate by town")
plt.ylabel("numbers")
plt.show()
#ZN
plt.hist(df["ZN"],edgecolor="black",bins=10)
plt.xlabel("ZProportion of residential land zoned for lots over 25,000 sq. ft")
plt.ylabel("numbers")
plt.show()
#INDUS
plt.hist(df["INDUS"],edgecolor="black",bins=[0,2.5,5,7.5,10,12.5,15,17.5,20,22.5,25,27.5,30])
plt.xlabel("Proportion of non-retail business acres per town")
plt.ylabel("numbers")
plt.show()
#CHAS
false_number=0
true_number=0
for value in df["CHAS"]:
if value== 0:
false_number=false_number+1
else:
true_number=true_number+1
plt.bar(["tract bounding river","tract not bounding river"],height=[true_number,false_number],width=0.4,edgecolor="black")
plt.xlabel("Charles River dummy variable")
plt.ylabel("Number")
plt.show()
#NOX
plt.hist(df["NOX"],edgecolor="black",bins=[0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1])
plt.xlabel("Nitric oxide concentration (parts per 10 million)")
plt.ylabel("numbers")
plt.show()
#RM
plt.hist(df["RM"],edgecolor="black",bins=[0,1,2,3,4,5,6,7,8,9,10])
plt.xlabel("Average number of rooms per dwelling")
plt.ylabel("numbers")
plt.show()
#AGE
plt.hist(df["AGE"],edgecolor="black",bins=[0,10,20,30,40,50,60,70,80,90,100,110])
plt.xlabel("Proportion of owner-occupied units built prior to 1940")
plt.ylabel("numbers")
plt.show()
#DIS
plt.hist(df["DIS"],edgecolor="black",bins=[0,2,4,6,8,10,12,14])
plt.xlabel("Weighted distances to five Boston employment centers")
plt.ylabel("numbers")
plt.show()
#RAD
#this one is more like binary data
#I am not sure what the "data" are
#I might also do a hist for the data below 10
below_10_number=0
index_24_number=0
for value in df["RAD"]:
if value == 24:
index_24_number=index_24_number+1
else:
below_10_number=below_10_number+1
plt.bar(["index below 10","index is 24"],height=[below_10_number,index_24_number],width=0.4,edgecolor="black")
plt.xlabel("Index of accessibility to radial highways")
plt.ylabel("Number")
plt.show()
plt.hist(df["RAD"],edgecolor="black",bins=[0,2,4,6,8,10,12,14,16,18,20,24,26])
plt.xlabel("Index of accessibility to radial highways")
plt.ylabel("numbers")
plt.show()
#TAX
plt.hist(df["TAX"],edgecolor="black",bins=[0,100,200,300,400,500,600,700,800])
plt.xlabel("Full-value property tax rate per $10,000")
plt.ylabel("numbers")
plt.show()
#PTRATIO
plt.hist(df["PTRATIO"],edgecolor="black",bins=[0,2.5,5,7.5,10,12.5,15,17.5,20,22.5,25])
plt.xlabel("Pupil-teacher ratio by town")
plt.ylabel("numbers")
plt.show()
#B
plt.hist(df["B"],edgecolor="black",bins=[0,50,100,150,200,250,300,350,400])
plt.xlabel("1000(Bk - 0.63)^2,Bk is the proportion of people of African American descent by town")
plt.ylabel("numbers")
plt.show()
#LSTAT
plt.hist(df["LSTAT"],edgecolor="black",bins=[0,5,10,15,20,25,30,35,40])
plt.xlabel("Percentage of lower status of the population")
plt.ylabel("numbers")
plt.show()
#MEDV
plt.hist(df["MEDV"],edgecolor="black",bins=[0,5,10,15,20,25,30,35,40,45,50,55,60])
plt.xlabel("Median value of owner-occupied homes in $1000s (TARGET)")
plt.ylabel("numbers")
plt.show()
#scatter plots for two most correlated features RM and LSAT
plt.scatter(df["LSTAT"],df["MEDV"])
plt.xlabel("LSTAT")
plt.ylabel("MEDV")
plt.show()
plt.scatter(df["RM"],df["MEDV"])
plt.xlabel("RM")
plt.ylabel("MEDV")
plt.xlim(0,10)
plt.show()
#exploratory ends here
print("\n")
print("Linear Regression without penalty")
print("\n")
#split the data
#standardize the data
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
X,y=df.iloc[:,0:26],df.iloc[:,26]
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2, random_state=42)
#print( X_train.shape, y_train.shape)
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# linear regression
linear_reg=LinearRegression()
linear_reg.fit(X_train,y_train)
y_test_linear_predict=linear_reg.predict(X_test)
y_train_linear_predict=linear_reg.predict(X_train)
#coefficient and intercept for each column
counter=0
for value in linear_reg.coef_:
print(df.columns[counter],"coefficient:",value)
counter=counter+1
print("\n")
print("intercept is",linear_reg.intercept_)
#residual plot of linear regression
plt.scatter(y_train_linear_predict,y_train_linear_predict-y_train,color="red",edgecolor="white",label="Training Data")
plt.scatter(y_test_linear_predict,y_test_linear_predict-y_test,edgecolor="white",label="Testing Data")
plt.legend(loc="upper left")
plt.hlines(y=0,xmin=-50,xmax=100,lw=2)
plt.xlim(-10,50)
plt.xlabel("predicted values")
plt.ylabel("residuals")
plt.show()
#MSE and R^2
print("MSE Train:")
print (mean_squared_error(y_train_linear_predict,y_train))
print("MSE Test:")
print(mean_squared_error(y_test_linear_predict,y_test))
print("\n")
print("R^2 Train:")
print (r2_score(y_train_linear_predict,y_train))
print("R^2 Test:")
print (r2_score(y_test_linear_predict,y_test))
print("\n")
#Ridge Regression
print("Ridge Regression")
# use cross validation to pick the best alpha for Ridge Regression
from sklearn.linear_model import RidgeCV
ridgecv = RidgeCV(alphas=range(1,100),cv=5)
ridgecv.fit(X_train, y_train)
print("\n")
print("under cross validation")
print("the best alpha is",ridgecv.alpha_)
print("\n")
ridge_reg=Ridge(alpha=ridgecv.alpha_)
ridge_reg.fit(X_train,y_train)
y_test_ridge_predict=ridge_reg.predict(X_test)
y_train_ridge_predict=ridge_reg.predict(X_train)
#coefficient and intercept for each column
counter=0
for value in ridge_reg.coef_:
print(df.columns[counter],"coefficient:",value)
counter=counter+1
print("\n")
print("intercept is",ridge_reg.intercept_)
#residual plot of ridge regression
plt.scatter(y_train_ridge_predict,y_train_ridge_predict-y_train,color="red",edgecolor="white",label="Training Data")
plt.scatter(y_test_ridge_predict,y_test_ridge_predict-y_test,edgecolor="white",label="Testing Data")
plt.legend(loc="upper left")
plt.hlines(y=0,xmin=-50,xmax=100,lw=2)
plt.xlim(-10,50)
plt.xlabel("predicted values")
plt.ylabel("residuals")
plt.show()
#MSE and R^2
print("MSE Train:")
print (mean_squared_error(y_train_ridge_predict,y_train))
print("MSE Test:")
print(mean_squared_error(y_test_ridge_predict,y_test))
print("\n")
print("R^2 Train:")
print (r2_score(y_train,y_train_ridge_predict))
print("R^2 Test:")
print (r2_score(y_test,y_test_ridge_predict))
print("\n")
#Lasso Regression
print("Lasso Regression")
# use cross validation to pick the best alpha for Lasso Regression
from sklearn.linear_model import LassoCV
lassocv = LassoCV(alphas=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0],cv=5)
lassocv.fit(X_train, y_train)
print("\n")
print("under cross validation")
print("the best alpha is",lassocv.alpha_)
print("\n")
lasso_reg=Lasso(alpha=lassocv.alpha_)
lasso_reg.fit(X_train,y_train)
y_test_lasso_predict=lasso_reg.predict(X_test)
y_train_lasso_predict=lasso_reg.predict(X_train)
#coefficient and intercept for each column
counter=0
for value in lasso_reg.coef_:
print(df.columns[counter],"coefficient:",value)
counter=counter+1
print("\n")
print("intercept is",lasso_reg.intercept_)
#residual plot of ridge regression
plt.scatter(y_train_lasso_predict,y_train_lasso_predict-y_train,color="red",edgecolor="white",label="Training Data")
plt.scatter(y_test_lasso_predict,y_test_lasso_predict-y_test,edgecolor="white",label="Testing Data")
plt.legend(loc="upper left")
plt.hlines(y=0,xmin=-50,xmax=100,lw=2)
plt.xlim(-10,50)
plt.xlabel("predicted values")
plt.ylabel("residuals")
plt.show()
#MSE and R^2
print("MSE Train:")
print (mean_squared_error(y_train_lasso_predict,y_train))
print("MSE Test:")
print(mean_squared_error(y_test_lasso_predict,y_test))
print("\n")
print("R^2 Train:")
print (r2_score(y_train_lasso_predict,y_train))
print("R^2 Test:")
print (r2_score(y_test_lasso_predict,y_test))
print("\n")
print("My name is Yi Zhou")
print("My NetID is: yizhou16")
print("I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.")
|
[
"noreply@github.com"
] |
kanonlove.noreply@github.com
|
4feb08a787d4150afc715d7edaf517188ec7e248
|
4f406cd3e9b2600f4e581408ca01f5ceb14deaa7
|
/gps_logger.py
|
369058ceb00efe2a56e224099eb751253dc01d99
|
[
"CC-BY-4.0"
] |
permissive
|
bortek/gps_tracker
|
40babfb9d6c6e39a535e73a951d2ece6f9accd93
|
51c36e26dfeacacbbe8c9d7c45a9e048df7b6656
|
refs/heads/master
| 2020-04-26T00:26:46.575836
| 2019-05-01T03:54:20
| 2019-05-01T03:54:20
| 173,178,275
| 0
| 0
| null | 2019-02-28T19:55:43
| 2019-02-28T19:55:43
| null |
UTF-8
|
Python
| false
| false
| 2,449
|
py
|
"""
gps_logger.py logs received gps points in a local file.
"""
__author__ = "Konstantinos Kagiampakis"
__license__ = """
Creative Commons Attribution 4.0 International
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
"""
import gpsd
import json
import time
import sys
STEP = 3
# Connect to the local gpsd
while True:
try:
print("Connecting on GPSD...")
gpsd.connect()
except:
print("Could not connect to GPSD.\nThis script is persistent and will try to reconnect to GPSD in 10 sec.",sys.exc_info()[0])
time.sleep(10)
else:
print("GPSD connected!")
break
filename = "/home/pi/gps_route.log"
try:
f = open(filename, 'a')
except:
raise
while True:
try:
try:
packet = gpsd.get_current()
if packet.mode > 1:
if packet.mode >= 2:
print("Latitude: " + str(packet.lat))
print("Longitude: " + str(packet.lon))
print("Track: " + str(packet.track))
print("Horizontal Speed: " + str(packet.hspeed))
print("Time: " + str(packet.time))
print("Error: " + str(packet.error))
if packet.mode == 3:
print("Altitude: " + str(packet.alt))
print("Climb: " + str(packet.climb))
point = {'lat': str(packet.lat), 'lon': str(packet.lon), 'track': str(packet.track), 'hspeed': str(packet.hspeed), 'time': str(packet.time)}
if packet.mode == 3:
point['alt'] = str(packet.alt)
point['climb'] = str(packet.climb)
str_point = json.dumps(point)
print("storing point to file:#"+str_point+"# str len:"+str(len(str_point)))
f.write(str_point+',\n')
else:
print("There is no GPS FIX yet. Packet mode 0.")
time.sleep(10)
except (NameError, KeyError):
print("There is no GPS FIX yet. Key or Name exception.")
time.sleep(3)
except:
print (sys.exc_info()[0])
time.sleep(10)
time.sleep(STEP)
except KeyboardInterrupt:
print(" Received KeyboardInterrupt")
try:
print("Closing file.")
f.close()
except:
raise
else:
print("File closed.")
break
except:
print(sys.exc_info()[0])
|
[
"kostiskag@gmail.com"
] |
kostiskag@gmail.com
|
f3b97341a44d4fe1f94ed1617edef2d9b577cc13
|
e6fb8de1ecfafa787d826d3255029b4ad7776adf
|
/Class2/1181.py
|
e0189a35f15ce2c5fd3870c1f99820626382d7a9
|
[] |
no_license
|
minari1505/AlgorithmForH-M
|
2318e743558b21ea6295f100075d99e2bd6e2c58
|
e751acf0897f96640e0c3d85d8a7c37ed9202c44
|
refs/heads/main
| 2023-08-30T00:31:58.426096
| 2021-10-27T08:37:46
| 2021-10-27T08:37:46
| 386,270,328
| 0
| 1
| null | 2021-10-06T12:55:09
| 2021-07-15T11:48:28
|
Python
|
UTF-8
|
Python
| false
| false
| 300
|
py
|
import sys
n=int(input())
line = [sys.stdin.readline().strip() for i in range(n)]
set_line=set(line)
line = list(set_line)
line.sort()
line.sort(key=len)
for i in line:
print(i)
#dic = {}
#print(dic.items())
#print(sorted(dic.items(),key=lambda x : x[1]))
#for key in dic.keys():
# print(key)
|
[
"threeyearr@gmail.com"
] |
threeyearr@gmail.com
|
2c9e6b8271ba65dce0eadea3990a851e1695961a
|
aa6a439b97ad6a60d7f49b59f4c9bafbff1e7736
|
/tests/tests/meta_add_lazy.py
|
83905f9423f7c77c64042ab806242fdcd635677f
|
[
"MIT"
] |
permissive
|
yigarashi-9/mini_python
|
31b5d04a68a89f899e618a0884eab2b245bd43fb
|
d62b9040f8427057a20d18340a27bdf2dfc8c22e
|
refs/heads/master
| 2021-04-09T11:30:05.430724
| 2018-10-26T06:53:21
| 2018-10-26T06:53:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
class A:
def __init__(self, x):
self.x = x
class B(A):
def __add__(self, other):
return self.x + other.x + 1
class C(A):
pass = 1
def myadd(self, other):
return self.x + other.x + 100
a1 = A(0)
a2 = A(0)
b1 = B(0)
b2 = B(0)
c1 = C(0)
c2 = C(0)
A.__add__ = myadd
assert (a1 + a2) + (b1 + b2) + (c1 + c2) == 201
|
[
"yuu.igarashi.9@gmail.com"
] |
yuu.igarashi.9@gmail.com
|
86a0bc8d228e9bc61e282dd6bd5b4e84a35754b0
|
91005296393b602549f16e5827b49e5bb9d0c760
|
/fruit_store/fruit_store/settings.py
|
55f6787747e4e353b982dba2c60e6e52d838248d
|
[] |
no_license
|
nguyenvanhuybk99/django_rest_mongo
|
7498a2aed63fa2c8c38e05604b239ce332b41980
|
c114bb5f981b2eb29495e550d2c5aa0efbd377a8
|
refs/heads/master
| 2023-04-28T15:30:54.441411
| 2021-05-22T01:42:38
| 2021-05-22T01:42:38
| 369,690,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,615
|
py
|
"""
Django settings for fruit_store project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ij8sa@@26^2(7^uy3=7a$k54yq45o6o7gnkcu8+!-4&rlq$y4*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["0.0.0.0", "127.0.0.1"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'shop',
'customer',
'manager',
'cart'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fruit_store.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fruit_store.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'djongo',
'NAME': 'fruit_store',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [],
'TEST_REQUEST_DEFAULT_FORMAT': 'json'
}
APPEND_SLASH=False
|
[
"nguyenvanhuybk99@gmail.com"
] |
nguyenvanhuybk99@gmail.com
|
162eb2ee34fdecebf7be87ac009e79c0a715e25f
|
77077a391973d1f8c05647d08fc135facd04fc5e
|
/xlsxwriter/test/app/test_app02.py
|
fa347d734560186995daf0fad3e57c79c5129178
|
[
"BSD-2-Clause-Views"
] |
permissive
|
DeltaEpsilon7787/XlsxWriter
|
28fb1012eaa42ea0f82e063f28c0c548ca016c5e
|
550b9c5bd678c861dcc9f6f4072b33a69566e065
|
refs/heads/main
| 2023-08-02T09:14:10.657395
| 2021-09-06T10:51:56
| 2021-09-06T10:51:56
| 384,948,081
| 0
| 0
|
NOASSERTION
| 2021-07-11T12:57:26
| 2021-07-11T12:57:25
| null |
UTF-8
|
Python
| false
| false
| 2,234
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...app import App
class TestAssembleApp(unittest.TestCase):
"""
Test assembling a complete App file.
"""
def test_assemble_xml_file(self):
"""Test writing an App file."""
self.maxDiff = None
fh = StringIO()
app = App()
app._set_filehandle(fh)
app._add_part_name('Sheet1')
app._add_part_name('Sheet2')
app._add_heading_pair(('Worksheets', 2))
app._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes">
<Application>Microsoft Excel</Application>
<DocSecurity>0</DocSecurity>
<ScaleCrop>false</ScaleCrop>
<HeadingPairs>
<vt:vector size="2" baseType="variant">
<vt:variant>
<vt:lpstr>Worksheets</vt:lpstr>
</vt:variant>
<vt:variant>
<vt:i4>2</vt:i4>
</vt:variant>
</vt:vector>
</HeadingPairs>
<TitlesOfParts>
<vt:vector size="2" baseType="lpstr">
<vt:lpstr>Sheet1</vt:lpstr>
<vt:lpstr>Sheet2</vt:lpstr>
</vt:vector>
</TitlesOfParts>
<Company>
</Company>
<LinksUpToDate>false</LinksUpToDate>
<SharedDoc>false</SharedDoc>
<HyperlinksChanged>false</HyperlinksChanged>
<AppVersion>12.0000</AppVersion>
</Properties>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
25e9604e708758b56d016d3c384898a1c74f8d6d
|
e2752da08870dc935d21d2157786aa919766c296
|
/zemiao/129.py
|
c1bf0b0cdc9d496e1e042931b7ba65dc6c27c091
|
[] |
no_license
|
Zichuanyun/go-shuati
|
87dc192da65a9f08ebe8530f623e473a2f3c2865
|
7c443f85217ab96ceac717ece7fc472271e1d3ab
|
refs/heads/master
| 2020-03-19T11:03:12.855384
| 2018-09-27T17:20:25
| 2018-09-27T17:20:25
| 136,425,571
| 9
| 6
| null | 2018-06-21T04:51:56
| 2018-06-07T05:22:35
|
Java
|
UTF-8
|
Python
| false
| false
| 742
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
ans = 0
if not root:
return ans
stack = [(root, root.val)]
while stack:
node, temp = stack.pop()
if node.left:
stack += [(node.left, temp*10+node.left.val)]
if node.right:
stack += [(node.right, temp*10+node.right.val)]
if not (node.left or node.right):
ans += temp
return ans
|
[
"zemiaozhu@outlook.com"
] |
zemiaozhu@outlook.com
|
bf4e35a17cd505f3970d1ba493b1fbbd9b43493c
|
7113ecb49ec8c2f74ede2b14f2e04dd39c97c851
|
/apps/users/models.py
|
3e17c843928fdf82e4ee91cdaebe68df32ebea93
|
[] |
no_license
|
Anton-Karpenko/test4
|
a6429dd0632bdd53511ee533d2314196270b0d01
|
29ce45a0737a08fce284aa9f752eeb5a9a6a64df
|
refs/heads/master
| 2020-06-22T17:11:36.484175
| 2019-07-21T21:52:29
| 2019-07-21T21:52:29
| 197,752,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.models import TimeStampedModel
from apps.base.utils import custom_uuid
class User(AbstractUser, TimeStampedModel):
id = models.CharField(
max_length=11,
primary_key=True,
default=custom_uuid,
editable=False,
)
# First Name and Last Name do not cover name patterns around the globe.
name = models.CharField(
_("Name of User"),
blank=True,
max_length=255
)
class Meta:
db_table = 'users'
|
[
"antykkarp@gmail.com"
] |
antykkarp@gmail.com
|
6f714648f18ce1ce8143128be377d7d28c4191de
|
c54e219c257e5b81489cda16268e17137b0ca9b4
|
/fake_script.py
|
c8e09215684366fe70b2ad2efc016809fa5304a6
|
[] |
no_license
|
ericearl/playground
|
259c33e3878f5907f55bd612dda771d206a8b5af
|
3e88d37a2db0c0081d5a770afa792b38736a5641
|
refs/heads/master
| 2020-07-08T12:16:10.453382
| 2019-08-21T22:18:30
| 2019-08-21T22:18:30
| 203,669,587
| 0
| 0
| null | 2019-08-21T22:18:32
| 2019-08-21T21:51:37
|
Python
|
UTF-8
|
Python
| false
| false
| 32
|
py
|
import script
print('new one')
|
[
"eric.earl@gmail.com"
] |
eric.earl@gmail.com
|
4ecd369a2ed6d3164513285595b15ebc98e9de89
|
8ced7a54037616cede4ec2449c9e29aa2f1fb9d1
|
/prac_04/litst_comprehentions.py
|
72b4f07273368fe6e45111aa0726c299d8518694
|
[] |
no_license
|
micah-clarke/Practicals
|
d95ed42918c6c83827a37f0abf7d1c2c5e36d2df
|
b8e1965dddce4e635ca2f25bf1682efc79962a11
|
refs/heads/master
| 2020-05-04T21:43:20.857199
| 2019-04-11T04:16:03
| 2019-04-11T04:18:36
| 179,486,105
| 0
| 0
| null | 2019-04-11T04:36:55
| 2019-04-04T11:41:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
"""
CP1404/CP5632 Practical
List comprehensions
"""
names = ["Bob", "Angel", "Jimi", "Alan", "Ada"]
full_names = ["Bob Martin", "Angel Harlem", "Jimi Hendrix", "Alan Turing",
"Ada Lovelace"]
# for loop that creates a new list containing the first letter of each name
first_initials = []
for name in names:
first_initials.append(name[0])
print(first_initials)
# list comprehension that does the same thing as the loop above
first_initials = [name[0] for name in names]
print(first_initials)
# list comprehension that creates a list containing the initials
# splits each name and adds the first letters of each part to a string
full_initials = [name.split()[0][0] + name.split()[1][0] for name in
full_names]
print(full_initials)
# one more example, using filtering to select only the names that start with A
a_names = [name for name in names if name.startswith('A')]
print(a_names)
# List comprehension to create a list of all of the full_names
# in lowercase format
lowercase_full_names = [name.lower() for name in full_names]
print(lowercase_full_names)
almost_numbers = ['0', '10', '21', '3', '-7', '88', '9']
# List comprehension to create a list of integers
# from the above list of strings
numbers = [int(almost_number) for almost_number in almost_numbers]
print(numbers)
# List comprehension to create a list of only the numbers that are
# greater than 9 from the numbers (not strings) you just created
big_numbers = [number for number in numbers if number > 9]
print(big_numbers)
|
[
"48541196+micah-clarke@users.noreply.github.com"
] |
48541196+micah-clarke@users.noreply.github.com
|
e477eb40de85d06949607669ae61fe6137e832d2
|
37361d420b3ecb1636a7a80720ac0ca572e6480b
|
/front_sy.py
|
de9aee206653bc3529026c4aae3af0b0c61a0fd5
|
[] |
no_license
|
chlrhalsgur/2021_1_py_project
|
1d7169d0622902e8e19fa9c803924e4625d53eb8
|
14861bccb8d26fa2531b39239c5db282ac8a0644
|
refs/heads/master
| 2023-05-28T17:14:24.482214
| 2021-06-10T09:54:12
| 2021-06-10T09:54:12
| 370,598,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
from tkinter import *
#from tkhtmlview import HTMLLabel //html사용해서 네이버영화, 네이버영화랭킹링크 사용할려고 했음 but 안됨
#https://remixicon.com/ 사용할려고 했던 아이콘
win=Tk()
#Show Movie 클릭했을 경우
def Naver_movie_Ranking():
window_naver=Toplevel(win)
window_naver.geometry("1000x600")
#두 번째 페이지
def create_window():
window = Toplevel(win) #새로운 창 열기
window.geometry("1000x600")
window.configure(bg='#4682B4')
btn = Button(window, text = "Show Movie", command=Naver_movie_Ranking)
btn1 = Button(window, text = "Show Movie Ranking")
btn2 = Button(window, text = "Our New Movie Ranking")
btn.pack(pady=40)
btn1.pack(pady=20)
btn2.pack(pady=20)
win.geometry("1000x600") #사이즈 가로x세로(픽셀)
win.title("Movie Ranking") #타이틀
win.option_add("*Font","Courier 40") #기본 폰트와 글자크기 설정
#첫 번째 페이지
lab=Label(win, text = "Pop Corn Movie")
lab.pack(side=TOP, pady=60)
lab.configure(font=("Courier", 70, "italic"))
#첫 번째 페이지 버튼
btn = Button(win, text="Let's start", command=create_window)
btn.pack(side=BOTTOM, pady=50)
#배경색
win.configure(bg='#49A')
win.mainloop() #창 열기
|
[
"sy91031@naver.com"
] |
sy91031@naver.com
|
884954af9fd64a0f3d0508d1272327e2ed3bedf5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03013/s813541273.py
|
bdbeb4a2fbfb0352ad56b6b9937305511b1f8a7c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
N, M = map(int, input().split(' '))
broken_list = []
if M > 0:
for i in range(M):
broken_list.append(int(input()))
broken_set =set(broken_list)
nums = [0] * (N + 1)
nums[0] = 1
if 1 not in broken_set:
nums[1] = 1
for i in range(2, N + 1):
nums[i] = nums[i - 1] + nums[i - 2]
if i in broken_set:
nums[i] = 0
print(nums[N] % 1000000007)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ad75ec26cf69addbf6442ccd275abe67170dd61d
|
c0b0b50d52cb0d4f10f91595709a1e1815637e60
|
/ephr_module/migrations/0019_auto_20170402_0942.py
|
5d6ddc377bb8a61a6db3b8ecc8028e79b2970bdb
|
[] |
no_license
|
namkan/patientvault
|
50bdcea0c2f746533a1c11794fc14f76e838fa6a
|
5617a3686fe48325a5a77372c7bd29f30fe8a175
|
refs/heads/master
| 2022-11-25T20:10:56.884810
| 2017-05-01T12:18:23
| 2017-05-01T12:18:23
| 89,921,210
| 0
| 0
| null | 2022-11-22T01:26:28
| 2017-05-01T12:13:03
|
HTML
|
UTF-8
|
Python
| false
| false
| 411
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ephr_module', '0018_pvradorders_order_name'),
]
operations = [
migrations.RenameField(
model_name='pvradorders',
old_name='shared_yesno',
new_name='is_sharable',
),
]
|
[
"naman.kansal@vyalatech.com"
] |
naman.kansal@vyalatech.com
|
276a93f98115025f9db6a4c5e6df42b82e9feccc
|
db274b14aa63f4cf40b1e496ffeef918d8654f69
|
/manage.py
|
d6262ef4e1dd3c3abaf89b085aa2ffe2f3d672f2
|
[] |
no_license
|
mahmudgithub/demo_pactics_project_seven
|
02f98d8373dfa3e9b5d8e06d2e5f01a030d48291
|
4a8aa330a6abfb5e12916c368bd849190788127a
|
refs/heads/master
| 2022-03-28T11:58:34.185598
| 2020-01-29T06:35:22
| 2020-01-29T06:35:22
| 236,919,480
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_g.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"mahmudhossain838@gmail.com"
] |
mahmudhossain838@gmail.com
|
0ae3c143fd0a34bdfeeca9add4677386aeca66be
|
e8ef988b5e13e39e430a72f5b99092d7f565bf2b
|
/app.py
|
a9b81f979691b63e4461db03ed5f153c15318593
|
[] |
no_license
|
gdeepakdeepuml/pomodoro-app
|
03c1c0969dce3c99e0627ac8aad53b68ef5b0099
|
bc78b3c78c77c54315d826f76aa515a45e91431e
|
refs/heads/main
| 2023-02-26T19:08:32.188016
| 2021-02-01T13:10:32
| 2021-02-01T13:10:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
# App created by Data Professor http://youtube.com/dataprofessor
# GitHub repo of this app
# Demo of this app
import streamlit as st
import time
# CSS by andfanilo
# Source: https://discuss.streamlit.io/t/creating-a-nicely-formatted-search-field/1804
def local_css(file_name):
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
#def remote_css(url):
# st.markdown(f'<link href="{url}" rel="stylesheet">', unsafe_allow_html=True)
#def icon(icon_name):
# st.markdown(f'<i class="material-icons">{icon_name}</i>', unsafe_allow_html=True)
local_css("style.css")
#remote_css('https://fonts.googleapis.com/icon?family=Material+Icons')
#---------------------------------#
st.write("""
# The Pomodoro App
Let's do some focus work in data science with this app.
Developed by: [Data Professor](http://youtube.com/dataprofessor)
""")
# Timer
# Created by adapting from:
# https://www.geeksforgeeks.org/how-to-create-a-countdown-timer-using-python/
# https://docs.streamlit.io/en/latest/api.html#lay-out-your-app
button_clicked = st.button("Start")
t1 = 1500
t2 = 300
if button_clicked:
with st.empty():
while t1:
mins, secs = divmod(t1, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
st.header(f"⏳ {timer}")
time.sleep(1)
t1 -= 1
st.success("🔔 25 minutes is over! Time for a break!")
with st.empty():
while t2:
# Start the break
mins2, secs2 = divmod(t2, 60)
timer2 = '{:02d}:{:02d}'.format(mins2, secs2)
st.header(f"⏳ {timer2}")
time.sleep(1)
t2 -= 1
st.error("⏰ 5 minute break is over!")
|
[
"noreply@github.com"
] |
gdeepakdeepuml.noreply@github.com
|
4159cf0257ad3d20a29b9c1d3308026f6be5c1cf
|
1925c535d439d2d47e27ace779f08be0b2a75750
|
/leetcode/best_time_to_buy_and_sell_stock_4.py
|
1d58d8730fa45eba6ecf813ee448ef105a05236d
|
[] |
no_license
|
arthurDz/algorithm-studies
|
ee77d716041671c4b8bb757d8d96f3d10b6589f7
|
1e4d23dd0c40df34f58d71c7ca3e6491be732075
|
refs/heads/master
| 2023-04-27T12:17:06.209278
| 2021-04-30T20:16:18
| 2021-04-30T20:16:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,462
|
py
|
# Say you have an array for which the ith element is the price of a given stock on day i.
# Design an algorithm to find the maximum profit. You may complete at most k transactions.
# Note:
# You may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
# Example 1:
# Input: [2,4,1], k = 2
# Output: 2
# Explanation: Buy on day 1 (price = 2) and sell on day 2 (price = 4), profit = 4-2 = 2.
# Example 2:
# Input: [3,2,6,5,0,3], k = 2
# Output: 7
# Explanation: Buy on day 2 (price = 2) and sell on day 3 (price = 6), profit = 6-2 = 4.
# Then buy on day 5 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
def maxProfit(k, prices):
if not prices or k < 0: return 0
minimum = prices[0]
profit = 0
for key, v in enumerate(prices):
minimum = min(minimum, v)
if k == 1:
profit = max(profit, v - minimum)
else:
profit = max(profit, v - minimum + maxProfit(k - 1, prices[key + 1:]))
return profit
def maxProfit(k, prices):
n = len(prices)
if n < 2:
return 0
# k is big enougth to cover all ramps.
if k >= n / 2:
return sum(i - j
for i, j in zip(prices[1:], prices[:-1]) if i - j > 0)
globalMax = [[0] * n for _ in xrange(k + 1)]
for i in xrange(1, k + 1):
# The max profit with i transations and selling stock on day j.
localMax = [0] * n
for j in xrange(1, n):
profit = prices[j] - prices[j - 1]
localMax[j] = max(
# We have made max profit with (i - 1) transations in
# (j - 1) days.
# For the last transation, we buy stock on day (j - 1)
# and sell it on day j.
globalMax[i - 1][j - 1] + profit,
# We have made max profit with (i - 1) transations in
# (j - 1) days.
# For the last transation, we buy stock on day j and
# sell it on the same day, so we have 0 profit, apparently
# we do not have to add it.
globalMax[i - 1][j - 1], # + 0,
# We have made profit in (j - 1) days.
# We want to cancel the day (j - 1) sale and sell it on
# day j.
localMax[j - 1] + profit)
globalMax[i][j] = max(globalMax[i][j - 1], localMax[j])
return globalMax[k][-1]
|
[
"yunfan.yang@minerva.kgi.edu"
] |
yunfan.yang@minerva.kgi.edu
|
66ac40d9cca324cb59d106f29f489141d55e0c6b
|
fe52adcccf82ea5e81ec882eafb9fa11d7312e52
|
/main.py
|
9bd7ddaa888c538f473569acdb5383d8cf7b608c
|
[] |
no_license
|
f18-os/producer-consumer-lab-alexmelz95
|
15f53a1187ab701b0dfc1f0dd374fdf9e784d9ad
|
a1f98ccfe76697e825185a63857731dfa2c8cc4d
|
refs/heads/master
| 2020-04-07T06:00:51.482106
| 2018-11-24T22:01:41
| 2018-11-24T22:01:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
import ConsProdThread
import queue
fileName = 'clip.mp4'
# shared queues
extractionQueue = queue.Queue(10)
grayscaleQueue = queue.Queue(10)
theHelp.finished1 = False
theHelp.finished2 = False
t1 = ConsProdThread('extractFrames',params1=fileName,params2=extractionQueue)
t2 = ConsProdThread('grayScale',params1=extractionQueue, params2=grayscaleQueue)
t3 = ConsProdThread('displayFrames', params1=grayscaleQueue)
|
[
"alex.melz95@gmail.com"
] |
alex.melz95@gmail.com
|
80fde7ee8b5e1fdbb6283256a002d905055f43f7
|
d007f0fceaaad349b7b8cb9eda5382919621e644
|
/core/helpers.py
|
2493da2b4a8abb732fe2e371edfa043a366f1da7
|
[] |
no_license
|
dsarrut/badminapp
|
fddcc1b8f79b536e9ca0e5e952dcd8584fcb292e
|
de1ffe0879b443227c6d7cca972664c26e133e4f
|
refs/heads/master
| 2022-04-29T01:47:08.397626
| 2020-05-01T09:45:38
| 2020-05-01T09:45:38
| 254,719,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
py
|
def raise_except(s):
raise Exception(s)
|
[
"david.sarrut@gmail.com"
] |
david.sarrut@gmail.com
|
731c66717f6fccb33365c99d8aac3d158051db66
|
d954e2f74d1186c8e35be8ea579656513d8d3b98
|
/rllib/algorithms/algorithm.py
|
9900c03202990821f5dfb9100ad1ead2f61353ee
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
vakker/ray
|
a865de214e60f9e62d61c03ae7ce55ad6030f84c
|
de238dd626a48a16c8b3cd006f3482db75f63a83
|
refs/heads/master
| 2023-01-23T22:30:44.839942
| 2022-10-23T01:05:48
| 2022-10-23T01:05:48
| 171,845,804
| 0
| 1
|
Apache-2.0
| 2023-01-14T08:01:04
| 2019-02-21T09:54:36
|
Python
|
UTF-8
|
Python
| false
| false
| 140,996
|
py
|
from collections import defaultdict
import concurrent
import copy
from datetime import datetime
import functools
import gym
import importlib
import json
import logging
import math
import numpy as np
import os
from packaging import version
import pkg_resources
import tempfile
import time
from typing import (
Callable,
Container,
DefaultDict,
Dict,
List,
Optional,
Set,
Tuple,
Type,
Union,
)
from ray.rllib.offline.offline_evaluator import OfflineEvaluator
import tree
import ray
from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag
from ray.actor import ActorHandle
from ray.air.checkpoint import Checkpoint
import ray.cloudpickle as pickle
from ray.exceptions import GetTimeoutError, RayActorError, RayError
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
from ray.rllib.algorithms.callbacks import DefaultCallbacks
from ray.rllib.algorithms.registry import ALGORITHMS as ALL_ALGORITHMS
from ray.rllib.env.env_context import EnvContext
from ray.rllib.env.utils import _gym_env_creator
from ray.rllib.evaluation.episode import Episode
from ray.rllib.evaluation.metrics import (
collect_episodes,
collect_metrics,
summarize_episodes,
)
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.execution.common import (
STEPS_TRAINED_THIS_ITER_COUNTER, # TODO: Backward compatibility.
)
from ray.rllib.execution.parallel_requests import AsyncRequestsManager
from ray.rllib.execution.rollout_ops import synchronous_parallel_sample
from ray.rllib.execution.train_ops import multi_gpu_train_one_step, train_one_step
from ray.rllib.offline import get_offline_io_resource_bundles
from ray.rllib.offline.estimators import (
OffPolicyEstimator,
ImportanceSampling,
WeightedImportanceSampling,
DirectMethod,
DoublyRobust,
)
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch, concat_samples
from ray.rllib.utils import deep_update, FilterManager, merge_dicts
from ray.rllib.utils.annotations import (
DeveloperAPI,
ExperimentalAPI,
OverrideToImplementCustomLogic,
OverrideToImplementCustomLogic_CallToSuperRecommended,
PublicAPI,
override,
)
from ray.rllib.utils.checkpoints import CHECKPOINT_VERSION, get_checkpoint_info
from ray.rllib.utils.debug import update_global_seed_if_necessary
from ray.rllib.utils.deprecation import (
DEPRECATED_VALUE,
Deprecated,
deprecation_warning,
)
from ray.rllib.utils.error import ERR_MSG_INVALID_ENV_DESCRIPTOR, EnvError
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.metrics import (
NUM_AGENT_STEPS_SAMPLED,
NUM_AGENT_STEPS_SAMPLED_THIS_ITER,
NUM_AGENT_STEPS_TRAINED,
NUM_ENV_STEPS_SAMPLED,
NUM_ENV_STEPS_SAMPLED_THIS_ITER,
NUM_ENV_STEPS_TRAINED,
SYNCH_WORKER_WEIGHTS_TIMER,
TRAINING_ITERATION_TIMER,
)
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
from ray.rllib.utils.policy import validate_policy_id
from ray.rllib.utils.pre_checks.multi_agent import check_multi_agent
from ray.rllib.utils.replay_buffers import MultiAgentReplayBuffer
from ray.rllib.utils.spaces import space_utils
from ray.rllib.utils.typing import (
AgentID,
AlgorithmConfigDict,
EnvCreator,
EnvInfoDict,
EnvType,
EpisodeID,
PartialAlgorithmConfigDict,
PolicyID,
PolicyState,
ResultDict,
SampleBatchType,
TensorStructType,
TensorType,
)
from ray.tune.execution.placement_groups import PlacementGroupFactory
from ray.tune.experiment.trial import ExportFormat
from ray.tune.logger import Logger, UnifiedLogger
from ray.tune.registry import ENV_CREATOR, _global_registry
from ray.tune.resources import Resources
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.trainable import Trainable
from ray.util import log_once
from ray.util.timer import _Timer
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
@DeveloperAPI
def with_common_config(extra_config: PartialAlgorithmConfigDict) -> AlgorithmConfigDict:
"""Returns the given config dict merged with common agent confs.
Args:
extra_config: A user defined partial config
which will get merged with a default AlgorithmConfig() object and returned
as plain python dict.
Returns:
AlgorithmConfigDict: The merged config dict resulting from AlgorithmConfig()
plus `extra_config`.
"""
return Algorithm.merge_trainer_configs(
AlgorithmConfig().to_dict(), extra_config, _allow_unknown_configs=True
)
@PublicAPI
class Algorithm(Trainable):
"""An RLlib algorithm responsible for optimizing one or more Policies.
Algorithms contain a WorkerSet under `self.workers`. A WorkerSet is
normally composed of a single local worker
(self.workers.local_worker()), used to compute and apply learning updates,
and optionally one or more remote workers (self.workers.remote_workers()),
used to generate environment samples in parallel.
Each worker (remotes or local) contains a PolicyMap, which itself
may contain either one policy for single-agent training or one or more
policies for multi-agent training. Policies are synchronized
automatically from time to time using ray.remote calls. The exact
synchronization logic depends on the specific algorithm used,
but this usually happens from local worker to all remote workers and
after each training update.
You can write your own Algorithm classes by sub-classing from `Algorithm`
or any of its built-in sub-classes.
This allows you to override the `execution_plan` method to implement
your own algorithm logic. You can find the different built-in
algorithms' execution plans in their respective main py files,
e.g. rllib.algorithms.dqn.dqn.py or rllib.algorithms.impala.impala.py.
The most important API methods a Algorithm exposes are `train()`,
`evaluate()`, `save()` and `restore()`.
"""
# Whether to allow unknown top-level config keys.
_allow_unknown_configs = False
# List of top-level keys with value=dict, for which new sub-keys are
# allowed to be added to the value dict.
_allow_unknown_subkeys = [
"tf_session_args",
"local_tf_session_args",
"env_config",
"model",
"optimizer",
"multiagent",
"custom_resources_per_worker",
"evaluation_config",
"exploration_config",
"replay_buffer_config",
"extra_python_environs_for_worker",
"input_config",
"output_config",
]
# List of top level keys with value=dict, for which we always override the
# entire value (dict), iff the "type" key in that value dict changes.
_override_all_subkeys_if_type_changes = [
"exploration_config",
"replay_buffer_config",
]
# List of keys that are always fully overridden if present in any dict or sub-dict
_override_all_key_list = ["off_policy_estimation_methods"]
_progress_metrics = [
"episode_reward_mean",
"evaluation/episode_reward_mean",
"num_env_steps_sampled",
"num_env_steps_trained",
]
@staticmethod
def from_checkpoint(
checkpoint: Union[str, Checkpoint],
policy_ids: Optional[Container[PolicyID]] = None,
policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None,
policies_to_train: Optional[
Union[
Container[PolicyID],
Callable[[PolicyID, Optional[SampleBatchType]], bool],
]
] = None,
) -> "Algorithm":
"""Creates a new algorithm instance from a given checkpoint.
Note: This method must remain backward compatible from 2.0.0 on.
Args:
checkpoint: The path (str) to the checkpoint directory to use
or an AIR Checkpoint instance to restore from.
policy_ids: Optional list of PolicyIDs to recover. This allows users to
restore an Algorithm with only a subset of the originally present
Policies.
policy_mapping_fn: An optional (updated) policy mapping function
to use from here on.
policies_to_train: An optional list of policy IDs to be trained
or a callable taking PolicyID and SampleBatchType and
returning a bool (trainable or not?).
If None, will keep the existing setup in place. Policies,
whose IDs are not in the list (or for which the callable
returns False) will not be updated.
Returns:
The instantiated Algorithm.
"""
checkpoint_info = get_checkpoint_info(checkpoint)
# Not possible for (v0.1) (algo class and config information missing
# or very hard to retrieve).
if checkpoint_info["checkpoint_version"] == version.Version("0.1"):
raise ValueError(
"Cannot restore a v0 checkpoint using `Algorithm.from_checkpoint()`!"
"In this case, do the following:\n"
"1) Create a new Algorithm object using your original config.\n"
"2) Call the `restore()` method of this algo object passing it"
" your checkpoint dir or AIR Checkpoint object."
)
if checkpoint_info["checkpoint_version"] < version.Version("1.0"):
raise ValueError(
"`checkpoint_info['checkpoint_version']` in `Algorithm.from_checkpoint"
"()` must be 1.0 or later! You are using a checkpoint with "
f"version v{checkpoint_info['checkpoint_version']}."
)
state = Algorithm._checkpoint_info_to_algorithm_state(
checkpoint_info=checkpoint_info,
policy_ids=policy_ids,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
)
return Algorithm.from_state(state)
@staticmethod
def from_state(state: Dict) -> "Algorithm":
"""Recovers an Algorithm from a state object.
The `state` of an instantiated Algorithm can be retrieved by calling its
`get_state` method. It contains all information necessary
to create the Algorithm from scratch. No access to the original code (e.g.
configs, knowledge of the Algorithm's class, etc..) is needed.
Args:
state: The state to recover a new Algorithm instance from.
Returns:
A new Algorithm instance.
"""
algorithm_class: Type[Algorithm] = state.get("algorithm_class")
if algorithm_class is None:
raise ValueError(
"No `algorithm_class` key was found in given `state`! "
"Cannot create new Algorithm."
)
# algo_class = get_algorithm_class(algo_class_name)
# Create the new algo.
config = state.get("config")
if not config:
raise ValueError("No `config` found in given Algorithm state!")
new_algo = algorithm_class(config=config)
# Set the new algo's state.
new_algo.__setstate__(state)
# Return the new algo.
return new_algo
@PublicAPI
def __init__(
self,
config: Optional[Union[PartialAlgorithmConfigDict, AlgorithmConfig]] = None,
env: Optional[Union[str, EnvType]] = None,
logger_creator: Optional[Callable[[], Logger]] = None,
**kwargs,
):
"""Initializes an Algorithm instance.
Args:
config: Algorithm-specific configuration dict.
env: Name of the environment to use (e.g. a gym-registered str),
a full class path (e.g.
"ray.rllib.examples.env.random_env.RandomEnv"), or an Env
class directly. Note that this arg can also be specified via
the "env" key in `config`.
logger_creator: Callable that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
**kwargs: Arguments passed to the Trainable base class.
"""
# User provided (partial) config (this may be w/o the default
# Algorithm's Config object). Will get merged with AlgorithmConfig()
# in self.setup().
config = config or {}
# Resolve AlgorithmConfig into a plain dict.
# TODO: In the future, only support AlgorithmConfig objects here.
if isinstance(config, AlgorithmConfig):
config = config.to_dict()
# Convert `env` provided in config into a concrete env creator callable, which
# takes an EnvContext (config dict) as arg and returning an RLlib supported Env
# type (e.g. a gym.Env).
self._env_id, self.env_creator = self._get_env_id_and_creator(
env or config.get("env"), config
)
env_descr = (
self._env_id.__name__ if isinstance(self._env_id, type) else self._env_id
)
# Placeholder for a local replay buffer instance.
self.local_replay_buffer = None
# Create a default logger creator if no logger_creator is specified
if logger_creator is None:
# Default logdir prefix containing the agent's name and the
# env id.
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
logdir_prefix = "{}_{}_{}".format(str(self), env_descr, timestr)
if not os.path.exists(DEFAULT_RESULTS_DIR):
# Possible race condition if dir is created several times on
# rollout workers
os.makedirs(DEFAULT_RESULTS_DIR, exist_ok=True)
logdir = tempfile.mkdtemp(prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
# Allow users to more precisely configure the created logger
# via "logger_config.type".
if config.get("logger_config") and "type" in config["logger_config"]:
def default_logger_creator(config):
"""Creates a custom logger with the default prefix."""
cfg = config["logger_config"].copy()
cls = cfg.pop("type")
# Provide default for logdir, in case the user does
# not specify this in the "logger_config" dict.
logdir_ = cfg.pop("logdir", logdir)
return from_config(cls=cls, _args=[cfg], logdir=logdir_)
# If no `type` given, use tune's UnifiedLogger as last resort.
else:
def default_logger_creator(config):
"""Creates a Unified logger with the default prefix."""
return UnifiedLogger(config, logdir, loggers=None)
logger_creator = default_logger_creator
# Metrics-related properties.
self._timers = defaultdict(_Timer)
self._counters = defaultdict(int)
self._episode_history = []
self._episodes_to_be_collected = []
self._remote_workers_for_metrics = []
# Evaluation WorkerSet and metrics last returned by `self.evaluate()`.
self.evaluation_workers: Optional[WorkerSet] = None
# If evaluation duration is "auto", use a AsyncRequestsManager to be more
# robust against eval worker failures.
self._evaluation_async_req_manager: Optional[AsyncRequestsManager] = None
# Initialize common evaluation_metrics to nan, before they become
# available. We want to make sure the metrics are always present
# (although their values may be nan), so that Tune does not complain
# when we use these as stopping criteria.
self.evaluation_metrics = {
"evaluation": {
"episode_reward_max": np.nan,
"episode_reward_min": np.nan,
"episode_reward_mean": np.nan,
}
}
super().__init__(config=config, logger_creator=logger_creator, **kwargs)
# Check, whether `training_iteration` is still a tune.Trainable property
# and has not been overridden by the user in the attempt to implement the
# algos logic (this should be done now inside `training_step`).
try:
assert isinstance(self.training_iteration, int)
except AssertionError:
raise AssertionError(
"Your Algorithm's `training_iteration` seems to be overridden by your "
"custom training logic! To solve this problem, simply rename your "
"`self.training_iteration()` method into `self.training_step`."
)
@OverrideToImplementCustomLogic
@classmethod
def get_default_config(cls) -> AlgorithmConfigDict:
return AlgorithmConfig().to_dict()
@OverrideToImplementCustomLogic_CallToSuperRecommended
@override(Trainable)
def setup(self, config: PartialAlgorithmConfigDict):
# Setup our config: Merge the user-supplied config (which could
# be a partial config dict with the class' default).
self.config = self.merge_trainer_configs(
self.get_default_config(), config, self._allow_unknown_configs
)
self.config["env"] = self._env_id
# Validate the framework settings in config.
self.validate_framework(self.config)
# Set Algorithm's seed after we have - if necessary - enabled
# tf eager-execution.
update_global_seed_if_necessary(self.config["framework"], self.config["seed"])
self.validate_config(self.config)
self._record_usage(self.config)
self.callbacks = self.config["callbacks"]()
log_level = self.config.get("log_level")
if log_level in ["WARN", "ERROR"]:
logger.info(
"Current log_level is {}. For more information, "
"set 'log_level': 'INFO' / 'DEBUG' or use the -v and "
"-vv flags.".format(log_level)
)
if self.config.get("log_level"):
logging.getLogger("ray.rllib").setLevel(self.config["log_level"])
# Create local replay buffer if necessary.
self.local_replay_buffer = self._create_local_replay_buffer_if_necessary(
self.config
)
# Create a dict, mapping ActorHandles to sets of open remote
# requests (object refs). This way, we keep track, of which actors
# inside this Algorithm (e.g. a remote RolloutWorker) have
# already been sent how many (e.g. `sample()`) requests.
self.remote_requests_in_flight: DefaultDict[
ActorHandle, Set[ray.ObjectRef]
] = defaultdict(set)
self.workers: Optional[WorkerSet] = None
self.train_exec_impl = None
# Offline RL settings.
input_evaluation = self.config.get("input_evaluation")
if input_evaluation is not None and input_evaluation is not DEPRECATED_VALUE:
ope_dict = {str(ope): {"type": ope} for ope in input_evaluation}
deprecation_warning(
old="config.input_evaluation={}".format(input_evaluation),
new='config["evaluation_config"]'
'["off_policy_estimation_methods"]={}'.format(
ope_dict,
),
error=True,
help="Running OPE during training is not recommended.",
)
self.config["off_policy_estimation_methods"] = ope_dict
# Deprecated way of implementing Trainer sub-classes (or "templates"
# via the `build_trainer` utility function).
# Instead, sub-classes should override the Trainable's `setup()`
# method and call super().setup() from within that override at some
# point.
# Old design: Override `Trainer._init`.
_init = False
try:
self._init(self.config, self.env_creator)
_init = True
# New design: Override `Trainable.setup()` (as indented by tune.Trainable)
# and do or don't call `super().setup()` from within your override.
# By default, `super().setup()` will create both worker sets:
# "rollout workers" for collecting samples for training and - if
# applicable - "evaluation workers" for evaluation runs in between or
# parallel to training.
# TODO: Deprecate `_init()` and remove this try/except block.
except NotImplementedError:
pass
# Only if user did not override `_init()`:
if _init is False:
# - Create rollout workers here automatically.
# - Run the execution plan to create the local iterator to `next()`
# in each training iteration.
# This matches the behavior of using `build_trainer()`, which
# has been deprecated.
try:
self.workers = WorkerSet(
env_creator=self.env_creator,
validate_env=self.validate_env,
policy_class=self.get_default_policy_class(self.config),
trainer_config=self.config,
num_workers=self.config["num_workers"],
local_worker=True,
logdir=self.logdir,
)
# WorkerSet creation possibly fails, if some (remote) workers cannot
# be initialized properly (due to some errors in the RolloutWorker's
# constructor).
except RayActorError as e:
# In case of an actor (remote worker) init failure, the remote worker
# may still exist and will be accessible, however, e.g. calling
# its `sample.remote()` would result in strange "property not found"
# errors.
if e.actor_init_failed:
# Raise the original error here that the RolloutWorker raised
# during its construction process. This is to enforce transparency
# for the user (better to understand the real reason behind the
# failure).
# - e.args[0]: The RayTaskError (inside the caught RayActorError).
# - e.args[0].args[2]: The original Exception (e.g. a ValueError due
# to a config mismatch) thrown inside the actor.
raise e.args[0].args[2]
# In any other case, raise the RayActorError as-is.
else:
raise e
# By default, collect metrics for all remote workers.
self._remote_workers_for_metrics = self.workers.remote_workers()
# TODO (avnishn): Remove the execution plan API by q1 2023
# Function defining one single training iteration's behavior.
if self.config["_disable_execution_plan_api"]:
# Ensure remote workers are initially in sync with the local worker.
self.workers.sync_weights()
# LocalIterator-creating "execution plan".
# Only call this once here to create `self.train_exec_impl`,
# which is a ray.util.iter.LocalIterator that will be `next`'d
# on each training iteration.
else:
self.train_exec_impl = self.execution_plan(
self.workers, self.config, **self._kwargs_for_execution_plan()
)
# Now that workers have been created, update our policies
# dict in config[multiagent] (with the correct original/
# unpreprocessed spaces).
self.config["multiagent"][
"policies"
] = self.workers.local_worker().policy_dict
# Evaluation WorkerSet setup.
# User would like to setup a separate evaluation worker set.
# Update with evaluation settings:
user_eval_config = copy.deepcopy(self.config["evaluation_config"])
# Merge user-provided eval config with the base config. This makes sure
# the eval config is always complete, no matter whether we have eval
# workers or perform evaluation on the (non-eval) local worker.
eval_config = merge_dicts(self.config, user_eval_config)
self.config["evaluation_config"] = eval_config
if self.config.get("evaluation_num_workers", 0) > 0 or self.config.get(
"evaluation_interval"
):
logger.debug(f"Using evaluation_config: {user_eval_config}.")
# Validate evaluation config.
self.validate_config(eval_config)
# Set the `in_evaluation` flag.
eval_config["in_evaluation"] = True
# Evaluation duration unit: episodes.
# Switch on `complete_episode` rollouts. Also, make sure
# rollout fragments are short so we never have more than one
# episode in one rollout.
if eval_config["evaluation_duration_unit"] == "episodes":
eval_config.update(
{
"batch_mode": "complete_episodes",
"rollout_fragment_length": 1,
}
)
# Evaluation duration unit: timesteps.
# - Set `batch_mode=truncate_episodes` so we don't perform rollouts
# strictly along episode borders.
# Set `rollout_fragment_length` such that desired steps are divided
# equally amongst workers or - in "auto" duration mode - set it
# to a reasonably small number (10), such that a single `sample()`
# call doesn't take too much time and we can stop evaluation as soon
# as possible after the train step is completed.
else:
eval_config.update(
{
"batch_mode": "truncate_episodes",
"rollout_fragment_length": 10
if self.config["evaluation_duration"] == "auto"
else int(
math.ceil(
self.config["evaluation_duration"]
/ (self.config["evaluation_num_workers"] or 1)
)
),
}
)
self.config["evaluation_config"] = eval_config
_, env_creator = self._get_env_id_and_creator(
eval_config.get("env"), eval_config
)
# Create a separate evaluation worker set for evaluation.
# If evaluation_num_workers=0, use the evaluation set's local
# worker for evaluation, otherwise, use its remote workers
# (parallelized evaluation).
self.evaluation_workers: WorkerSet = WorkerSet(
env_creator=env_creator,
validate_env=None,
policy_class=self.get_default_policy_class(self.config),
trainer_config=eval_config,
num_workers=self.config["evaluation_num_workers"],
# Don't even create a local worker if num_workers > 0.
local_worker=False,
logdir=self.logdir,
)
if self.config["enable_async_evaluation"]:
self._evaluation_async_req_manager = AsyncRequestsManager(
workers=self.evaluation_workers.remote_workers(),
max_remote_requests_in_flight_per_worker=1,
return_object_refs=True,
)
self._evaluation_weights_seq_number = 0
self.reward_estimators: Dict[str, OffPolicyEstimator] = {}
ope_types = {
"is": ImportanceSampling,
"wis": WeightedImportanceSampling,
"dm": DirectMethod,
"dr": DoublyRobust,
}
for name, method_config in self.config["off_policy_estimation_methods"].items():
method_type = method_config.pop("type")
if method_type in ope_types:
deprecation_warning(
old=method_type,
new=str(ope_types[method_type]),
error=True,
)
method_type = ope_types[method_type]
elif isinstance(method_type, str):
logger.log(0, "Trying to import from string: " + method_type)
mod, obj = method_type.rsplit(".", 1)
mod = importlib.import_module(mod)
method_type = getattr(mod, obj)
if isinstance(method_type, type) and issubclass(
method_type, OfflineEvaluator
):
# TODO(kourosh) : Add an integration test for all these
# offline evaluators.
policy = self.get_policy()
if issubclass(method_type, OffPolicyEstimator):
method_config["gamma"] = self.config["gamma"]
self.reward_estimators[name] = method_type(policy, **method_config)
else:
raise ValueError(
f"Unknown off_policy_estimation type: {method_type}! Must be "
"either a class path or a sub-class of ray.rllib."
"offline.estimators.off_policy_estimator::OffPolicyEstimator"
)
# Run `on_algorithm_init` callback after initialization is done.
self.callbacks.on_algorithm_init(algorithm=self)
# TODO: Deprecated: In your sub-classes of Trainer, override `setup()`
# directly and call super().setup() from within it if you would like the
# default setup behavior plus some own setup logic.
# If you don't need the env/workers/config/etc.. setup for you by super,
# simply do not call super().setup() from your overridden method.
def _init(self, config: AlgorithmConfigDict, env_creator: EnvCreator) -> None:
raise NotImplementedError
@OverrideToImplementCustomLogic
def get_default_policy_class(self, config: AlgorithmConfigDict) -> Type[Policy]:
"""Returns a default Policy class to use, given a config.
This class will be used inside RolloutWorkers' PolicyMaps in case
the policy class is not provided by the user in any single- or
multi-agent PolicySpec.
This method is experimental and currently only used, iff the Trainer
class was not created using the `build_trainer` utility and if
the Trainer sub-class does not override `_init()` and create it's
own WorkerSet in `_init()`.
"""
return getattr(self, "_policy_class", None)
@override(Trainable)
def step(self) -> ResultDict:
"""Implements the main `Trainer.train()` logic.
Takes n attempts to perform a single training step. Thereby
catches RayErrors resulting from worker failures. After n attempts,
fails gracefully.
Override this method in your Trainer sub-classes if you would like to
handle worker failures yourself.
Otherwise, override only `training_step()` to implement the core
algorithm logic.
Returns:
The results dict with stats/infos on sampling, training,
and - if required - evaluation.
"""
# Do we have to run `self.evaluate()` this iteration?
# `self.iteration` gets incremented after this function returns,
# meaning that e. g. the first time this function is called,
# self.iteration will be 0.
evaluate_this_iter = (
self.config["evaluation_interval"] is not None
and (self.iteration + 1) % self.config["evaluation_interval"] == 0
)
# Results dict for training (and if appolicable: evaluation).
results: ResultDict = {}
local_worker = (
self.workers.local_worker()
if hasattr(self.workers, "local_worker")
else None
)
# Parallel eval + training: Kick off evaluation-loop and parallel train() call.
if evaluate_this_iter and self.config["evaluation_parallel_to_training"]:
(
results,
train_iter_ctx,
) = self._run_one_training_iteration_and_evaluation_in_parallel()
# - No evaluation necessary, just run the next training iteration.
# - We have to evaluate in this training iteration, but no parallelism ->
# evaluate after the training iteration is entirely done.
else:
results, train_iter_ctx = self._run_one_training_iteration()
# Sequential: Train (already done above), then evaluate.
if evaluate_this_iter and not self.config["evaluation_parallel_to_training"]:
results.update(self._run_one_evaluation(train_future=None))
# Attach latest available evaluation results to train results,
# if necessary.
if not evaluate_this_iter and self.config["always_attach_evaluation_results"]:
assert isinstance(
self.evaluation_metrics, dict
), "Trainer.evaluate() needs to return a dict."
results.update(self.evaluation_metrics)
if hasattr(self, "workers") and isinstance(self.workers, WorkerSet):
# Sync filters on workers.
self._sync_filters_if_needed(
from_worker=self.workers.local_worker(),
workers=self.workers,
timeout_seconds=self.config[
"sync_filters_on_rollout_workers_timeout_s"
],
)
# TODO (avnishn): Remove the execution plan API by q1 2023
# Collect worker metrics and add combine them with `results`.
if self.config["_disable_execution_plan_api"]:
episodes_this_iter, self._episodes_to_be_collected = collect_episodes(
local_worker,
self._remote_workers_for_metrics,
self._episodes_to_be_collected,
timeout_seconds=self.config["metrics_episode_collection_timeout_s"],
)
results = self._compile_iteration_results(
episodes_this_iter=episodes_this_iter,
step_ctx=train_iter_ctx,
iteration_results=results,
)
# Check `env_task_fn` for possible update of the env's task.
if self.config["env_task_fn"] is not None:
if not callable(self.config["env_task_fn"]):
raise ValueError(
"`env_task_fn` must be None or a callable taking "
"[train_results, env, env_ctx] as args!"
)
def fn(env, env_context, task_fn):
new_task = task_fn(results, env, env_context)
cur_task = env.get_task()
if cur_task != new_task:
env.set_task(new_task)
fn = functools.partial(fn, task_fn=self.config["env_task_fn"])
self.workers.foreach_env_with_context(fn)
return results
@PublicAPI
def evaluate(
self,
duration_fn: Optional[Callable[[int], int]] = None,
) -> dict:
"""Evaluates current policy under `evaluation_config` settings.
Note that this default implementation does not do anything beyond
merging evaluation_config with the normal trainer config.
Args:
duration_fn: An optional callable taking the already run
num episodes as only arg and returning the number of
episodes left to run. It's used to find out whether
evaluation should continue.
"""
# Call the `_before_evaluate` hook.
self._before_evaluate()
# Sync weights to the evaluation WorkerSet.
if self.evaluation_workers is not None:
self.evaluation_workers.sync_weights(
from_worker=self.workers.local_worker()
)
self._sync_filters_if_needed(
from_worker=self.workers.local_worker(),
workers=self.evaluation_workers,
timeout_seconds=self.config[
"sync_filters_on_rollout_workers_timeout_s"
],
)
self.callbacks.on_evaluate_start(algorithm=self)
if self.config["custom_eval_function"]:
logger.info(
"Running custom eval function {}".format(
self.config["custom_eval_function"]
)
)
metrics = self.config["custom_eval_function"](self, self.evaluation_workers)
if not metrics or not isinstance(metrics, dict):
raise ValueError(
"Custom eval function must return "
"dict of metrics, got {}.".format(metrics)
)
else:
if (
self.evaluation_workers is None
and self.workers.local_worker().input_reader is None
):
raise ValueError(
"Cannot evaluate w/o an evaluation worker set in "
"the Trainer or w/o an env on the local worker!\n"
"Try one of the following:\n1) Set "
"`evaluation_interval` >= 0 to force creating a "
"separate evaluation worker set.\n2) Set "
"`create_env_on_driver=True` to force the local "
"(non-eval) worker to have an environment to "
"evaluate on."
)
# How many episodes/timesteps do we need to run?
# In "auto" mode (only for parallel eval + training): Run as long
# as training lasts.
unit = self.config["evaluation_duration_unit"]
eval_cfg = self.config["evaluation_config"]
rollout = eval_cfg["rollout_fragment_length"]
num_envs = eval_cfg["num_envs_per_worker"]
auto = self.config["evaluation_duration"] == "auto"
duration = (
self.config["evaluation_duration"]
if not auto
else (self.config["evaluation_num_workers"] or 1)
* (1 if unit == "episodes" else rollout)
)
agent_steps_this_iter = 0
env_steps_this_iter = 0
# Default done-function returns True, whenever num episodes
# have been completed.
if duration_fn is None:
def duration_fn(num_units_done):
return duration - num_units_done
logger.info(f"Evaluating current policy for {duration} {unit}.")
metrics = None
all_batches = []
# No evaluation worker set ->
# Do evaluation using the local worker. Expect error due to the
# local worker not having an env.
if self.evaluation_workers is None:
# If unit=episodes -> Run n times `sample()` (each sample
# produces exactly 1 episode).
# If unit=ts -> Run 1 `sample()` b/c the
# `rollout_fragment_length` is exactly the desired ts.
iters = duration if unit == "episodes" else 1
for _ in range(iters):
batch = self.workers.local_worker().sample()
agent_steps_this_iter += batch.agent_steps()
env_steps_this_iter += batch.env_steps()
if self.reward_estimators:
all_batches.append(batch)
metrics = collect_metrics(
self.workers.local_worker(),
keep_custom_metrics=eval_cfg["keep_per_episode_custom_metrics"],
timeout_seconds=eval_cfg["metrics_episode_collection_timeout_s"],
)
# Evaluation worker set only has local worker.
elif self.config["evaluation_num_workers"] == 0:
# If unit=episodes -> Run n times `sample()` (each sample
# produces exactly 1 episode).
# If unit=ts -> Run 1 `sample()` b/c the
# `rollout_fragment_length` is exactly the desired ts.
iters = duration if unit == "episodes" else 1
for _ in range(iters):
batch = self.evaluation_workers.local_worker().sample()
agent_steps_this_iter += batch.agent_steps()
env_steps_this_iter += batch.env_steps()
if self.reward_estimators:
all_batches.append(batch)
# Evaluation worker set has n remote workers.
else:
# How many episodes have we run (across all eval workers)?
num_units_done = 0
_round = 0
while True:
units_left_to_do = duration_fn(num_units_done)
if units_left_to_do <= 0:
break
_round += 1
try:
batches = ray.get(
[
w.sample.remote()
for i, w in enumerate(
self.evaluation_workers.remote_workers()
)
if i * (1 if unit == "episodes" else rollout * num_envs)
< units_left_to_do
],
timeout=self.config["evaluation_sample_timeout_s"],
)
except GetTimeoutError:
logger.warning(
"Calling `sample()` on your remote evaluation worker(s) "
"resulted in a timeout (after the configured "
f"{self.config['evaluation_sample_timeout_s']} seconds)! "
"Try to set `evaluation_sample_timeout_s` in your config"
" to a larger value."
+ (
" If your episodes don't terminate easily, you may "
"also want to set `evaluation_duration_unit` to "
"'timesteps' (instead of 'episodes')."
if unit == "episodes"
else ""
)
)
break
_agent_steps = sum(b.agent_steps() for b in batches)
_env_steps = sum(b.env_steps() for b in batches)
# 1 episode per returned batch.
if unit == "episodes":
num_units_done += len(batches)
# Make sure all batches are exactly one episode.
for ma_batch in batches:
ma_batch = ma_batch.as_multi_agent()
for batch in ma_batch.policy_batches.values():
assert np.sum(batch[SampleBatch.DONES])
# n timesteps per returned batch.
else:
num_units_done += (
_agent_steps if self._by_agent_steps else _env_steps
)
if self.reward_estimators:
# TODO: (kourosh) This approach will cause an OOM issue when
# the dataset gets huge (should be ok for now).
all_batches.extend(batches)
agent_steps_this_iter += _agent_steps
env_steps_this_iter += _env_steps
logger.info(
f"Ran round {_round} of parallel evaluation "
f"({num_units_done}/{duration if not auto else '?'} "
f"{unit} done)"
)
if metrics is None:
metrics = collect_metrics(
self.evaluation_workers.local_worker(),
self.evaluation_workers.remote_workers(),
keep_custom_metrics=self.config["keep_per_episode_custom_metrics"],
timeout_seconds=eval_cfg["metrics_episode_collection_timeout_s"],
)
metrics[NUM_AGENT_STEPS_SAMPLED_THIS_ITER] = agent_steps_this_iter
metrics[NUM_ENV_STEPS_SAMPLED_THIS_ITER] = env_steps_this_iter
# TODO: Remove this key at some point. Here for backward compatibility.
metrics["timesteps_this_iter"] = env_steps_this_iter
# Compute off-policy estimates
estimates = defaultdict(list)
# for each batch run the estimator's fwd pass
for name, estimator in self.reward_estimators.items():
for batch in all_batches:
estimate_result = estimator.estimate(
batch,
split_batch_by_episode=self.config[
"ope_split_batch_by_episode"
],
)
estimates[name].append(estimate_result)
# collate estimates from all batches
if estimates:
metrics["off_policy_estimator"] = {}
for name, estimate_list in estimates.items():
avg_estimate = tree.map_structure(
lambda *x: np.mean(x, axis=0), *estimate_list
)
metrics["off_policy_estimator"][name] = avg_estimate
# Evaluation does not run for every step.
# Save evaluation metrics on trainer, so it can be attached to
# subsequent step results as latest evaluation result.
self.evaluation_metrics = {"evaluation": metrics}
# Trigger `on_evaluate_end` callback.
self.callbacks.on_evaluate_end(
algorithm=self, evaluation_metrics=self.evaluation_metrics
)
# Also return the results here for convenience.
return self.evaluation_metrics
@ExperimentalAPI
def _evaluate_async(
self,
duration_fn: Optional[Callable[[int], int]] = None,
) -> dict:
"""Evaluates current policy under `evaluation_config` settings.
Uses the AsyncParallelRequests manager to send frequent `sample.remote()`
requests to the evaluation RolloutWorkers and collect the results of these
calls. Handles worker failures (or slowdowns) gracefully due to the asynch'ness
and the fact that other eval RolloutWorkers can thus cover the workload.
Important Note: This will replace the current `self.evaluate()` method as the
default in the future.
Args:
duration_fn: An optional callable taking the already run
num episodes as only arg and returning the number of
episodes left to run. It's used to find out whether
evaluation should continue.
"""
# How many episodes/timesteps do we need to run?
# In "auto" mode (only for parallel eval + training): Run as long
# as training lasts.
unit = self.config["evaluation_duration_unit"]
eval_cfg = self.config["evaluation_config"]
rollout = eval_cfg["rollout_fragment_length"]
num_envs = eval_cfg["num_envs_per_worker"]
auto = self.config["evaluation_duration"] == "auto"
duration = (
self.config["evaluation_duration"]
if not auto
else (self.config["evaluation_num_workers"] or 1)
* (1 if unit == "episodes" else rollout)
)
# Call the `_before_evaluate` hook.
self._before_evaluate()
# Put weights only once into object store and use same object
# ref to synch to all workers.
self._evaluation_weights_seq_number += 1
weights_ref = ray.put(self.workers.local_worker().get_weights())
# TODO(Jun): Make sure this cannot block for e.g. 1h. Implement solution via
# connectors.
self._sync_filters_if_needed(
from_worker=self.workers.local_worker(),
workers=self.evaluation_workers,
timeout_seconds=eval_cfg.get("sync_filters_on_rollout_workers_timeout_s"),
)
if self.config["custom_eval_function"]:
raise ValueError(
"`custom_eval_function` not supported in combination "
"with `enable_async_evaluation=True` config setting!"
)
if self.evaluation_workers is None and (
self.workers.local_worker().input_reader is None
or self.config["evaluation_num_workers"] == 0
):
raise ValueError(
"Evaluation w/o eval workers (calling Algorithm.evaluate() w/o "
"evaluation specifically set up) OR evaluation without input reader "
"OR evaluation with only a local evaluation worker "
"(`evaluation_num_workers=0`) not supported in combination "
"with `enable_async_evaluation=True` config setting!"
)
agent_steps_this_iter = 0
env_steps_this_iter = 0
logger.info(f"Evaluating current policy for {duration} {unit}.")
all_batches = []
# Default done-function returns True, whenever num episodes
# have been completed.
if duration_fn is None:
def duration_fn(num_units_done):
return duration - num_units_done
def remote_fn(worker, w_ref, w_seq_no):
# Pass in seq-no so that eval workers may ignore this call if no update has
# happened since the last call to `remote_fn` (sample).
worker.set_weights(weights=w_ref, weights_seq_no=w_seq_no)
batch = worker.sample()
metrics = worker.get_metrics()
return batch, metrics, w_seq_no
rollout_metrics = []
# How many episodes have we run (across all eval workers)?
num_units_done = 0
_round = 0
errors = []
while len(self._evaluation_async_req_manager.workers) > 0:
units_left_to_do = duration_fn(num_units_done)
if units_left_to_do <= 0:
break
_round += 1
# Use the AsyncRequestsManager to get ready evaluation results and
# metrics.
self._evaluation_async_req_manager.call_on_all_available(
remote_fn=remote_fn,
fn_args=[weights_ref, self._evaluation_weights_seq_number],
)
ready_requests = self._evaluation_async_req_manager.get_ready()
batches = []
i = 0
for actor, requests in ready_requests.items():
for req in requests:
try:
batch, metrics, seq_no = ray.get(req)
# Ignore results, if the weights seq-number does not match (is
# from a previous evaluation step) OR if we have already reached
# the configured duration (e.g. number of episodes to evaluate
# for).
if seq_no == self._evaluation_weights_seq_number and (
i * (1 if unit == "episodes" else rollout * num_envs)
< units_left_to_do
):
batches.append(batch)
rollout_metrics.extend(metrics)
except RayError as e:
errors.append(e)
self._evaluation_async_req_manager.remove_workers(actor)
i += 1
_agent_steps = sum(b.agent_steps() for b in batches)
_env_steps = sum(b.env_steps() for b in batches)
# 1 episode per returned batch.
if unit == "episodes":
num_units_done += len(batches)
# Make sure all batches are exactly one episode.
for ma_batch in batches:
ma_batch = ma_batch.as_multi_agent()
for batch in ma_batch.policy_batches.values():
assert np.sum(batch[SampleBatch.DONES])
# n timesteps per returned batch.
else:
num_units_done += _agent_steps if self._by_agent_steps else _env_steps
if self.reward_estimators:
all_batches.extend(batches)
agent_steps_this_iter += _agent_steps
env_steps_this_iter += _env_steps
logger.info(
f"Ran round {_round} of parallel evaluation "
f"({num_units_done}/{duration if not auto else '?'} "
f"{unit} done)"
)
num_recreated_workers = 0
if errors:
num_recreated_workers = self.try_recover_from_step_attempt(
error=errors[0],
worker_set=self.evaluation_workers,
ignore=eval_cfg.get("ignore_worker_failures"),
recreate=eval_cfg.get("recreate_failed_workers"),
)
metrics = summarize_episodes(
rollout_metrics,
keep_custom_metrics=eval_cfg["keep_per_episode_custom_metrics"],
)
metrics["num_recreated_workers"] = num_recreated_workers
metrics[NUM_AGENT_STEPS_SAMPLED_THIS_ITER] = agent_steps_this_iter
metrics[NUM_ENV_STEPS_SAMPLED_THIS_ITER] = env_steps_this_iter
# TODO: Remove this key at some point. Here for backward compatibility.
metrics["timesteps_this_iter"] = env_steps_this_iter
if self.reward_estimators:
# Compute off-policy estimates
metrics["off_policy_estimator"] = {}
total_batch = concat_samples(all_batches)
for name, estimator in self.reward_estimators.items():
estimates = estimator.estimate(total_batch)
metrics["off_policy_estimator"][name] = estimates
# Evaluation does not run for every step.
# Save evaluation metrics on trainer, so it can be attached to
# subsequent step results as latest evaluation result.
self.evaluation_metrics = {"evaluation": metrics}
# Trigger `on_evaluate_end` callback.
self.callbacks.on_evaluate_end(
algorithm=self, evaluation_metrics=self.evaluation_metrics
)
# Return evaluation results.
return self.evaluation_metrics
@OverrideToImplementCustomLogic
@DeveloperAPI
def training_step(self) -> ResultDict:
"""Default single iteration logic of an algorithm.
- Collect on-policy samples (SampleBatches) in parallel using the
Trainer's RolloutWorkers (@ray.remote).
- Concatenate collected SampleBatches into one train batch.
- Note that we may have more than one policy in the multi-agent case:
Call the different policies' `learn_on_batch` (simple optimizer) OR
`load_batch_into_buffer` + `learn_on_loaded_batch` (multi-GPU
optimizer) methods to calculate loss and update the model(s).
- Return all collected metrics for the iteration.
Returns:
The results dict from executing the training iteration.
"""
# Collect SampleBatches from sample workers until we have a full batch.
if self._by_agent_steps:
train_batch = synchronous_parallel_sample(
worker_set=self.workers, max_agent_steps=self.config["train_batch_size"]
)
else:
train_batch = synchronous_parallel_sample(
worker_set=self.workers, max_env_steps=self.config["train_batch_size"]
)
train_batch = train_batch.as_multi_agent()
self._counters[NUM_AGENT_STEPS_SAMPLED] += train_batch.agent_steps()
self._counters[NUM_ENV_STEPS_SAMPLED] += train_batch.env_steps()
# Use simple optimizer (only for multi-agent or tf-eager; all other
# cases should use the multi-GPU optimizer, even if only using 1 GPU).
# TODO: (sven) rename MultiGPUOptimizer into something more
# meaningful.
if self.config.get("simple_optimizer") is True:
train_results = train_one_step(self, train_batch)
else:
train_results = multi_gpu_train_one_step(self, train_batch)
# Update weights and global_vars - after learning on the local worker - on all
# remote workers.
global_vars = {
"timestep": self._counters[NUM_ENV_STEPS_SAMPLED],
}
with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:
self.workers.sync_weights(global_vars=global_vars)
return train_results
@staticmethod
def execution_plan(workers, config, **kwargs):
raise NotImplementedError(
"It is not longer recommended to use Trainer's `execution_plan` method/API."
" Set `_disable_execution_plan_api=True` in your config and override the "
"`Trainer.training_step()` method with your algo's custom "
"execution logic."
)
@PublicAPI
def compute_single_action(
self,
observation: Optional[TensorStructType] = None,
state: Optional[List[TensorStructType]] = None,
*,
prev_action: Optional[TensorStructType] = None,
prev_reward: Optional[float] = None,
info: Optional[EnvInfoDict] = None,
input_dict: Optional[SampleBatch] = None,
policy_id: PolicyID = DEFAULT_POLICY_ID,
full_fetch: bool = False,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
episode: Optional[Episode] = None,
unsquash_action: Optional[bool] = None,
clip_action: Optional[bool] = None,
# Deprecated args.
unsquash_actions=DEPRECATED_VALUE,
clip_actions=DEPRECATED_VALUE,
# Kwargs placeholder for future compatibility.
**kwargs,
) -> Union[
TensorStructType,
Tuple[TensorStructType, List[TensorType], Dict[str, TensorType]],
]:
"""Computes an action for the specified policy on the local worker.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_single_action() on it
directly.
Args:
observation: Single (unbatched) observation from the
environment.
state: List of all RNN hidden (single, unbatched) state tensors.
prev_action: Single (unbatched) previous action value.
prev_reward: Single (unbatched) previous reward value.
info: Env info dict, if any.
input_dict: An optional SampleBatch that holds all the values
for: obs, state, prev_action, and prev_reward, plus maybe
custom defined views of the current env trajectory. Note
that only one of `obs` or `input_dict` must be non-None.
policy_id: Policy to query (only applies to multi-agent).
Default: "default_policy".
full_fetch: Whether to return extra action fetch results.
This is always set to True if `state` is specified.
explore: Whether to apply exploration to the action.
Default: None -> use self.config["explore"].
timestep: The current (sampling) time step.
episode: This provides access to all of the internal episodes'
state, which may be useful for model-based or multi-agent
algorithms.
unsquash_action: Should actions be unsquashed according to the
env's/Policy's action space? If None, use the value of
self.config["normalize_actions"].
clip_action: Should actions be clipped according to the
env's/Policy's action space? If None, use the value of
self.config["clip_actions"].
Keyword Args:
kwargs: forward compatibility placeholder
Returns:
The computed action if full_fetch=False, or a tuple of a) the
full output of policy.compute_actions() if full_fetch=True
or we have an RNN-based Policy.
Raises:
KeyError: If the `policy_id` cannot be found in this Trainer's
local worker.
"""
if clip_actions != DEPRECATED_VALUE:
deprecation_warning(
old="Trainer.compute_single_action(`clip_actions`=...)",
new="Trainer.compute_single_action(`clip_action`=...)",
error=True,
)
clip_action = clip_actions
if unsquash_actions != DEPRECATED_VALUE:
deprecation_warning(
old="Trainer.compute_single_action(`unsquash_actions`=...)",
new="Trainer.compute_single_action(`unsquash_action`=...)",
error=True,
)
unsquash_action = unsquash_actions
# `unsquash_action` is None: Use value of config['normalize_actions'].
if unsquash_action is None:
unsquash_action = self.config["normalize_actions"]
# `clip_action` is None: Use value of config['clip_actions'].
elif clip_action is None:
clip_action = self.config["clip_actions"]
# User provided an input-dict: Assert that `obs`, `prev_a|r`, `state`
# are all None.
err_msg = (
"Provide either `input_dict` OR [`observation`, ...] as "
"args to Trainer.compute_single_action!"
)
if input_dict is not None:
assert (
observation is None
and prev_action is None
and prev_reward is None
and state is None
), err_msg
observation = input_dict[SampleBatch.OBS]
else:
assert observation is not None, err_msg
# Get the policy to compute the action for (in the multi-agent case,
# Trainer may hold >1 policies).
policy = self.get_policy(policy_id)
if policy is None:
raise KeyError(
f"PolicyID '{policy_id}' not found in PolicyMap of the "
f"Trainer's local worker!"
)
local_worker = self.workers.local_worker()
# Check the preprocessor and preprocess, if necessary.
pp = local_worker.preprocessors[policy_id]
if pp and type(pp).__name__ != "NoPreprocessor":
observation = pp.transform(observation)
observation = local_worker.filters[policy_id](observation, update=False)
# Input-dict.
if input_dict is not None:
input_dict[SampleBatch.OBS] = observation
action, state, extra = policy.compute_single_action(
input_dict=input_dict,
explore=explore,
timestep=timestep,
episode=episode,
)
# Individual args.
else:
action, state, extra = policy.compute_single_action(
obs=observation,
state=state,
prev_action=prev_action,
prev_reward=prev_reward,
info=info,
explore=explore,
timestep=timestep,
episode=episode,
)
# If we work in normalized action space (normalize_actions=True),
# we re-translate here into the env's action space.
if unsquash_action:
action = space_utils.unsquash_action(action, policy.action_space_struct)
# Clip, according to env's action space.
elif clip_action:
action = space_utils.clip_action(action, policy.action_space_struct)
# Return 3-Tuple: Action, states, and extra-action fetches.
if state or full_fetch:
return action, state, extra
# Ensure backward compatibility.
else:
return action
@PublicAPI
def compute_actions(
self,
observations: TensorStructType,
state: Optional[List[TensorStructType]] = None,
*,
prev_action: Optional[TensorStructType] = None,
prev_reward: Optional[TensorStructType] = None,
info: Optional[EnvInfoDict] = None,
policy_id: PolicyID = DEFAULT_POLICY_ID,
full_fetch: bool = False,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
episodes: Optional[List[Episode]] = None,
unsquash_actions: Optional[bool] = None,
clip_actions: Optional[bool] = None,
# Deprecated.
normalize_actions=None,
**kwargs,
):
"""Computes an action for the specified policy on the local Worker.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_actions() on it directly.
Args:
observation: Observation from the environment.
state: RNN hidden state, if any. If state is not None,
then all of compute_single_action(...) is returned
(computed action, rnn state(s), logits dictionary).
Otherwise compute_single_action(...)[0] is returned
(computed action).
prev_action: Previous action value, if any.
prev_reward: Previous reward, if any.
info: Env info dict, if any.
policy_id: Policy to query (only applies to multi-agent).
full_fetch: Whether to return extra action fetch results.
This is always set to True if RNN state is specified.
explore: Whether to pick an exploitation or exploration
action (default: None -> use self.config["explore"]).
timestep: The current (sampling) time step.
episodes: This provides access to all of the internal episodes'
state, which may be useful for model-based or multi-agent
algorithms.
unsquash_actions: Should actions be unsquashed according
to the env's/Policy's action space? If None, use
self.config["normalize_actions"].
clip_actions: Should actions be clipped according to the
env's/Policy's action space? If None, use
self.config["clip_actions"].
Keyword Args:
kwargs: forward compatibility placeholder
Returns:
The computed action if full_fetch=False, or a tuple consisting of
the full output of policy.compute_actions_from_input_dict() if
full_fetch=True or we have an RNN-based Policy.
"""
if normalize_actions is not None:
deprecation_warning(
old="Trainer.compute_actions(`normalize_actions`=...)",
new="Trainer.compute_actions(`unsquash_actions`=...)",
error=True,
)
unsquash_actions = normalize_actions
# `unsquash_actions` is None: Use value of config['normalize_actions'].
if unsquash_actions is None:
unsquash_actions = self.config["normalize_actions"]
# `clip_actions` is None: Use value of config['clip_actions'].
elif clip_actions is None:
clip_actions = self.config["clip_actions"]
# Preprocess obs and states.
state_defined = state is not None
policy = self.get_policy(policy_id)
filtered_obs, filtered_state = [], []
for agent_id, ob in observations.items():
worker = self.workers.local_worker()
preprocessed = worker.preprocessors[policy_id].transform(ob)
filtered = worker.filters[policy_id](preprocessed, update=False)
filtered_obs.append(filtered)
if state is None:
continue
elif agent_id in state:
filtered_state.append(state[agent_id])
else:
filtered_state.append(policy.get_initial_state())
# Batch obs and states
obs_batch = np.stack(filtered_obs)
if state is None:
state = []
else:
state = list(zip(*filtered_state))
state = [np.stack(s) for s in state]
input_dict = {SampleBatch.OBS: obs_batch}
# prev_action and prev_reward can be None, np.ndarray, or tensor-like structure.
# Explicitly check for None here to avoid the error message "The truth value of
# an array with more than one element is ambiguous.", when np arrays are passed
# as arguments.
if prev_action is not None:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action
if prev_reward is not None:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward
if info:
input_dict[SampleBatch.INFOS] = info
for i, s in enumerate(state):
input_dict[f"state_in_{i}"] = s
# Batch compute actions
actions, states, infos = policy.compute_actions_from_input_dict(
input_dict=input_dict,
explore=explore,
timestep=timestep,
episodes=episodes,
)
# Unbatch actions for the environment into a multi-agent dict.
single_actions = space_utils.unbatch(actions)
actions = {}
for key, a in zip(observations, single_actions):
# If we work in normalized action space (normalize_actions=True),
# we re-translate here into the env's action space.
if unsquash_actions:
a = space_utils.unsquash_action(a, policy.action_space_struct)
# Clip, according to env's action space.
elif clip_actions:
a = space_utils.clip_action(a, policy.action_space_struct)
actions[key] = a
# Unbatch states into a multi-agent dict.
unbatched_states = {}
for idx, agent_id in enumerate(observations):
unbatched_states[agent_id] = [s[idx] for s in states]
# Return only actions or full tuple
if state_defined or full_fetch:
return actions, unbatched_states, infos
else:
return actions
@PublicAPI
def get_policy(self, policy_id: PolicyID = DEFAULT_POLICY_ID) -> Policy:
"""Return policy for the specified id, or None.
Args:
policy_id: ID of the policy to return.
"""
return self.workers.local_worker().get_policy(policy_id)
@PublicAPI
def get_weights(self, policies: Optional[List[PolicyID]] = None) -> dict:
"""Return a dictionary of policy ids to weights.
Args:
policies: Optional list of policies to return weights for,
or None for all policies.
"""
return self.workers.local_worker().get_weights(policies)
@PublicAPI
def set_weights(self, weights: Dict[PolicyID, dict]):
"""Set policy weights by policy id.
Args:
weights: Map of policy ids to weights to set.
"""
self.workers.local_worker().set_weights(weights)
@PublicAPI
def add_policy(
self,
policy_id: PolicyID,
policy_cls: Optional[Type[Policy]] = None,
policy: Optional[Policy] = None,
*,
observation_space: Optional[gym.spaces.Space] = None,
action_space: Optional[gym.spaces.Space] = None,
config: Optional[PartialAlgorithmConfigDict] = None,
policy_state: Optional[PolicyState] = None,
policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None,
policies_to_train: Optional[
Union[
Container[PolicyID],
Callable[[PolicyID, Optional[SampleBatchType]], bool],
]
] = None,
evaluation_workers: bool = True,
workers: Optional[List[Union[RolloutWorker, ActorHandle]]] = None,
) -> Optional[Policy]:
"""Adds a new policy to this Algorithm.
Args:
policy_id: ID of the policy to add.
IMPORTANT: Must not contain characters that
are also not allowed in Unix/Win filesystems, such as: `<>:"/\|?*`
or a dot `.` or space ` ` at the end of the ID.
policy_cls: The Policy class to use for constructing the new Policy.
Note: Only one of `policy_cls` or `policy` must be provided.
policy: The Policy instance to add to this algorithm. If not None, the
given Policy object will be directly inserted into the Algorithm's
local worker and clones of that Policy will be created on all remote
workers as well as all evaluation workers.
Note: Only one of `policy_cls` or `policy` must be provided.
observation_space: The observation space of the policy to add.
If None, try to infer this space from the environment.
action_space: The action space of the policy to add.
If None, try to infer this space from the environment.
config: The config overrides for the policy to add.
policy_state: Optional state dict to apply to the new
policy instance, right after its construction.
policy_mapping_fn: An optional (updated) policy mapping function
to use from here on. Note that already ongoing episodes will
not change their mapping but will use the old mapping till
the end of the episode.
policies_to_train: An optional list of policy IDs to be trained
or a callable taking PolicyID and SampleBatchType and
returning a bool (trainable or not?).
If None, will keep the existing setup in place. Policies,
whose IDs are not in the list (or for which the callable
returns False) will not be updated.
evaluation_workers: Whether to add the new policy also
to the evaluation WorkerSet.
workers: A list of RolloutWorker/ActorHandles (remote
RolloutWorkers) to add this policy to. If defined, will only
add the given policy to these workers.
Returns:
The newly added policy (the copy that got added to the local
worker). If `workers` was provided, None is returned.
"""
validate_policy_id(policy_id, error=True)
# Worker list is explicitly provided -> Use only those workers (local or remote)
# specified.
if workers is not None:
# Call static utility method.
WorkerSet.add_policy_to_workers(
workers,
policy_id,
policy_cls,
policy,
observation_space=observation_space,
action_space=action_space,
config=config,
policy_state=policy_state,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
)
# Add to all our regular RolloutWorkers and maybe also all evaluation workers.
else:
self.workers.add_policy(
policy_id,
policy_cls,
policy,
observation_space=observation_space,
action_space=action_space,
config=config,
policy_state=policy_state,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
)
# Add to evaluation workers, if necessary.
if evaluation_workers is True and self.evaluation_workers is not None:
self.evaluation_workers.add_policy(
policy_id,
policy_cls,
policy,
observation_space=observation_space,
action_space=action_space,
config=config,
policy_state=policy_state,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
)
# Return newly added policy (from the local rollout worker).
return self.get_policy(policy_id)
@PublicAPI
def remove_policy(
self,
policy_id: PolicyID = DEFAULT_POLICY_ID,
*,
policy_mapping_fn: Optional[Callable[[AgentID], PolicyID]] = None,
policies_to_train: Optional[
Union[
Container[PolicyID],
Callable[[PolicyID, Optional[SampleBatchType]], bool],
]
] = None,
evaluation_workers: bool = True,
) -> None:
"""Removes a new policy from this Algorithm.
Args:
policy_id: ID of the policy to be removed.
policy_mapping_fn: An optional (updated) policy mapping function
to use from here on. Note that already ongoing episodes will
not change their mapping but will use the old mapping till
the end of the episode.
policies_to_train: An optional list of policy IDs to be trained
or a callable taking PolicyID and SampleBatchType and
returning a bool (trainable or not?).
If None, will keep the existing setup in place. Policies,
whose IDs are not in the list (or for which the callable
returns False) will not be updated.
evaluation_workers: Whether to also remove the policy from the
evaluation WorkerSet.
"""
def fn(worker):
worker.remove_policy(
policy_id=policy_id,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
)
self.workers.foreach_worker(fn)
if evaluation_workers and self.evaluation_workers is not None:
self.evaluation_workers.foreach_worker(fn)
@DeveloperAPI
def export_policy_model(
self,
export_dir: str,
policy_id: PolicyID = DEFAULT_POLICY_ID,
onnx: Optional[int] = None,
) -> None:
"""Exports policy model with given policy_id to a local directory.
Args:
export_dir: Writable local directory.
policy_id: Optional policy id to export.
onnx: If given, will export model in ONNX format. The
value of this parameter set the ONNX OpSet version to use.
If None, the output format will be DL framework specific.
Example:
>>> from ray.rllib.algorithms.ppo import PPO
>>> # Use an Algorithm from RLlib or define your own.
>>> algo = PPO(...) # doctest: +SKIP
>>> for _ in range(10): # doctest: +SKIP
>>> algo.train() # doctest: +SKIP
>>> algo.export_policy_model("/tmp/dir") # doctest: +SKIP
>>> algo.export_policy_model("/tmp/dir/onnx", onnx=1) # doctest: +SKIP
"""
self.get_policy(policy_id).export_model(export_dir, onnx)
@DeveloperAPI
def export_policy_checkpoint(
self,
export_dir: str,
filename_prefix=DEPRECATED_VALUE, # deprecated arg, do not use anymore
policy_id: PolicyID = DEFAULT_POLICY_ID,
) -> None:
"""Exports Policy checkpoint to a local directory and returns an AIR Checkpoint.
Args:
export_dir: Writable local directory to store the AIR Checkpoint
information into.
policy_id: Optional policy ID to export. If not provided, will export
"default_policy". If `policy_id` does not exist in this Algorithm,
will raise a KeyError.
Raises:
KeyError if `policy_id` cannot be found in this Algorithm.
Example:
>>> from ray.rllib.algorithms.ppo import PPO
>>> # Use an Algorithm from RLlib or define your own.
>>> algo = PPO(...) # doctest: +SKIP
>>> for _ in range(10): # doctest: +SKIP
>>> algo.train() # doctest: +SKIP
>>> algo.export_policy_checkpoint("/tmp/export_dir") # doctest: +SKIP
"""
# `filename_prefix` should not longer be used as new Policy checkpoints
# contain more than one file with a fixed filename structure.
if filename_prefix != DEPRECATED_VALUE:
deprecation_warning(
old="Algorithm.export_policy_checkpoint(filename_prefix=...)",
error=True,
)
policy = self.get_policy(policy_id)
if policy is None:
raise KeyError(f"Policy with ID {policy_id} not found in Algorithm!")
policy.export_checkpoint(export_dir)
@DeveloperAPI
def import_policy_model_from_h5(
self,
import_file: str,
policy_id: PolicyID = DEFAULT_POLICY_ID,
) -> None:
"""Imports a policy's model with given policy_id from a local h5 file.
Args:
import_file: The h5 file to import from.
policy_id: Optional policy id to import into.
Example:
>>> from ray.rllib.algorithms.ppo import PPO
>>> algo = PPO(...) # doctest: +SKIP
>>> algo.import_policy_model_from_h5("/tmp/weights.h5") # doctest: +SKIP
>>> for _ in range(10): # doctest: +SKIP
>>> algo.train() # doctest: +SKIP
"""
self.get_policy(policy_id).import_model_from_h5(import_file)
# Sync new weights to remote workers.
self._sync_weights_to_workers(worker_set=self.workers)
@override(Trainable)
def save_checkpoint(self, checkpoint_dir: str) -> str:
"""Exports AIR Checkpoint to a local directory and returns its directory path.
The structure of an Algorithm checkpoint dir will be as follows::
policies/
pol_1/
policy_state.pkl
pol_2/
policy_state.pkl
rllib_checkpoint.json
algorithm_state.pkl
Note: `rllib_checkpoint.json` contains a "version" key (e.g. with value 0.1)
helping RLlib to remain backward compatible wrt. restoring from checkpoints from
Ray 2.0 onwards.
Args:
checkpoint_dir: The directory where the checkpoint files will be stored.
Returns:
The path to the created AIR Checkpoint directory.
"""
state = self.__getstate__()
# Extract policy states from worker state (Policies get their own
# checkpoint sub-dirs).
policy_states = {}
if "worker" in state and "policy_states" in state["worker"]:
policy_states = state["worker"].pop("policy_states", {})
# Add RLlib checkpoint version.
state["checkpoint_version"] = CHECKPOINT_VERSION
# Write state (w/o policies) to disk.
state_file = os.path.join(checkpoint_dir, "algorithm_state.pkl")
with open(state_file, "wb") as f:
pickle.dump(state, f)
# Write rllib_checkpoint.json.
with open(os.path.join(checkpoint_dir, "rllib_checkpoint.json"), "w") as f:
json.dump(
{
"type": "Algorithm",
"checkpoint_version": str(state["checkpoint_version"]),
"ray_version": ray.__version__,
"ray_commit": ray.__commit__,
},
f,
)
# Write individual policies to disk, each in their own sub-directory.
for pid, policy_state in policy_states.items():
# From here on, disallow policyIDs that would not work as directory names.
validate_policy_id(pid, error=True)
policy_dir = os.path.join(checkpoint_dir, "policies", pid)
os.makedirs(policy_dir, exist_ok=True)
policy = self.get_policy(pid)
policy.export_checkpoint(policy_dir, policy_state=policy_state)
return checkpoint_dir
@override(Trainable)
def load_checkpoint(self, checkpoint: Union[Dict, str]) -> None:
# Checkpoint is provided as a directory name.
# Restore from the checkpoint file or dir.
if isinstance(checkpoint, str):
checkpoint_info = get_checkpoint_info(checkpoint)
checkpoint_data = Algorithm._checkpoint_info_to_algorithm_state(
checkpoint_info
)
# Checkpoint is a checkpoint-as-dict -> Restore state from it as-is.
else:
checkpoint_data = checkpoint
self.__setstate__(checkpoint_data)
@override(Trainable)
def log_result(self, result: ResultDict) -> None:
# Log after the callback is invoked, so that the user has a chance
# to mutate the result.
# TODO: Remove `trainer` arg at some point to fully deprecate the old signature.
self.callbacks.on_train_result(algorithm=self, result=result)
# Then log according to Trainable's logging logic.
Trainable.log_result(self, result)
@override(Trainable)
def cleanup(self) -> None:
# Stop all workers.
if hasattr(self, "workers") and self.workers is not None:
self.workers.stop()
if hasattr(self, "evaluation_workers") and self.evaluation_workers is not None:
self.evaluation_workers.stop()
@OverrideToImplementCustomLogic
@classmethod
@override(Trainable)
def default_resource_request(
cls, config: PartialAlgorithmConfigDict
) -> Union[Resources, PlacementGroupFactory]:
# Default logic for RLlib Algorithms:
# Create one bundle per individual worker (local or remote).
# Use `num_cpus_for_driver` and `num_gpus` for the local worker and
# `num_cpus_per_worker` and `num_gpus_per_worker` for the remote
# workers to determine their CPU/GPU resource needs.
# Convenience config handles.
cf = dict(cls.get_default_config(), **config)
eval_cf = cf["evaluation_config"]
local_worker = {
"CPU": cf["num_cpus_for_driver"],
"GPU": 0 if cf["_fake_gpus"] else cf["num_gpus"],
}
rollout_workers = [
{
"CPU": cf["num_cpus_per_worker"],
"GPU": cf["num_gpus_per_worker"],
**cf["custom_resources_per_worker"],
}
for _ in range(cf["num_workers"])
]
bundles = [local_worker] + rollout_workers
if cf["evaluation_interval"]:
# Evaluation workers.
# Note: The local eval worker is located on the driver CPU.
bundles += [
{
"CPU": eval_cf.get(
"num_cpus_per_worker", cf["num_cpus_per_worker"]
),
"GPU": eval_cf.get(
"num_gpus_per_worker", cf["num_gpus_per_worker"]
),
**eval_cf.get(
"custom_resources_per_worker", cf["custom_resources_per_worker"]
),
}
for _ in range(cf["evaluation_num_workers"])
]
# In case our I/O reader/writer requires conmpute resources.
bundles += get_offline_io_resource_bundles(cf)
# Return PlacementGroupFactory containing all needed resources
# (already properly defined as device bundles).
return PlacementGroupFactory(
bundles=bundles,
strategy=config.get("placement_strategy", "PACK"),
)
@DeveloperAPI
def _before_evaluate(self):
"""Pre-evaluation callback."""
pass
@staticmethod
def _get_env_id_and_creator(
env_specifier: Union[str, EnvType, None], config: PartialAlgorithmConfigDict
) -> Tuple[Optional[str], EnvCreator]:
"""Returns env_id and creator callable given original env id from config.
Args:
env_specifier: An env class, an already tune registered env ID, a known
gym env name, or None (if no env is used).
config: The Algorithm's (maybe partial) config dict.
Returns:
Tuple consisting of a) env ID string and b) env creator callable.
"""
# Environment is specified via a string.
if isinstance(env_specifier, str):
# An already registered env.
if _global_registry.contains(ENV_CREATOR, env_specifier):
return env_specifier, _global_registry.get(ENV_CREATOR, env_specifier)
# A class path specifier.
elif "." in env_specifier:
def env_creator_from_classpath(env_context):
try:
env_obj = from_config(env_specifier, env_context)
except ValueError:
raise EnvError(
ERR_MSG_INVALID_ENV_DESCRIPTOR.format(env_specifier)
)
return env_obj
return env_specifier, env_creator_from_classpath
# Try gym/PyBullet/Vizdoom.
else:
return env_specifier, functools.partial(
_gym_env_creator, env_descriptor=env_specifier
)
elif isinstance(env_specifier, type):
env_id = env_specifier # .__name__
if config.get("remote_worker_envs"):
# Check gym version (0.22 or higher?).
# If > 0.21, can't perform auto-wrapping of the given class as this
# would lead to a pickle error.
gym_version = pkg_resources.get_distribution("gym").version
if version.parse(gym_version) >= version.parse("0.22"):
raise ValueError(
"Cannot specify a gym.Env class via `config.env` while setting "
"`config.remote_worker_env=True` AND your gym version is >= "
"0.22! Try installing an older version of gym or set `config."
"remote_worker_env=False`."
)
@ray.remote(num_cpus=1)
class _wrapper(env_specifier):
# Add convenience `_get_spaces` and `_is_multi_agent`
# methods:
def _get_spaces(self):
return self.observation_space, self.action_space
def _is_multi_agent(self):
from ray.rllib.env.multi_agent_env import MultiAgentEnv
return isinstance(self, MultiAgentEnv)
return env_id, lambda cfg: _wrapper.remote(cfg)
else:
return env_id, lambda cfg: env_specifier(cfg)
# No env -> Env creator always returns None.
elif env_specifier is None:
return None, lambda env_config: None
else:
raise ValueError(
"{} is an invalid env specifier. ".format(env_specifier)
+ "You can specify a custom env as either a class "
'(e.g., YourEnvCls) or a registered env id (e.g., "your_env").'
)
def _sync_filters_if_needed(
self,
from_worker: RolloutWorker,
workers: WorkerSet,
timeout_seconds: Optional[float] = None,
):
if (
from_worker
and self.config.get("observation_filter", "NoFilter") != "NoFilter"
):
FilterManager.synchronize(
from_worker.filters,
workers.remote_workers(),
update_remote=self.config["synchronize_filters"],
timeout_seconds=timeout_seconds,
)
logger.debug("synchronized filters: {}".format(from_worker.filters))
@DeveloperAPI
def _sync_weights_to_workers(
self,
*,
worker_set: Optional[WorkerSet] = None,
workers: Optional[List[RolloutWorker]] = None,
) -> None:
"""Sync "main" weights to given WorkerSet or list of workers."""
assert worker_set is not None
# Broadcast the new policy weights to all evaluation workers.
logger.info("Synchronizing weights to workers.")
weights = ray.put(self.workers.local_worker().get_state())
worker_set.foreach_worker(lambda w: w.set_state(ray.get(weights)))
@classmethod
@override(Trainable)
def resource_help(cls, config: AlgorithmConfigDict) -> str:
return (
"\n\nYou can adjust the resource requests of RLlib agents by "
"setting `num_workers`, `num_gpus`, and other configs. See "
"the DEFAULT_CONFIG defined by each agent for more info.\n\n"
"The config of this agent is: {}".format(config)
)
@classmethod
def merge_trainer_configs(
cls,
config1: AlgorithmConfigDict,
config2: PartialAlgorithmConfigDict,
_allow_unknown_configs: Optional[bool] = None,
) -> AlgorithmConfigDict:
"""Merges a complete Algorithm config dict with a partial override dict.
Respects nested structures within the config dicts. The values in the
partial override dict take priority.
Args:
config1: The complete Algorithm's dict to be merged (overridden)
with `config2`.
config2: The partial override config dict to merge on top of
`config1`.
_allow_unknown_configs: If True, keys in `config2` that don't exist
in `config1` are allowed and will be added to the final config.
Returns:
The merged full algorithm config dict.
"""
config1 = copy.deepcopy(config1)
if "callbacks" in config2 and type(config2["callbacks"]) is dict:
deprecation_warning(
"callbacks dict interface",
"a class extending rllib.algorithms.callbacks.DefaultCallbacks; "
"see `rllib/examples/custom_metrics_and_callbacks.py` for an example.",
error=True,
)
if _allow_unknown_configs is None:
_allow_unknown_configs = cls._allow_unknown_configs
return deep_update(
config1,
config2,
_allow_unknown_configs,
cls._allow_unknown_subkeys,
cls._override_all_subkeys_if_type_changes,
cls._override_all_key_list,
)
@staticmethod
def validate_framework(config: PartialAlgorithmConfigDict) -> None:
"""Validates the config dictionary wrt the framework settings.
Args:
config: The config dictionary to be validated.
"""
_tf1, _tf, _tfv = None, None, None
_torch = None
framework = config["framework"]
tf_valid_frameworks = {"tf", "tf2", "tfe"}
if framework not in tf_valid_frameworks and framework != "torch":
return
elif framework in tf_valid_frameworks:
_tf1, _tf, _tfv = try_import_tf()
else:
_torch, _ = try_import_torch()
def check_if_correct_nn_framework_installed():
"""Check if tf/torch experiment is running and tf/torch installed."""
if framework in tf_valid_frameworks:
if not (_tf1 or _tf):
raise ImportError(
(
"TensorFlow was specified as the 'framework' "
"inside of your config dictionary. However, there was "
"no installation found. You can install TensorFlow "
"via `pip install tensorflow`"
)
)
elif framework == "torch":
if not _torch:
raise ImportError(
(
"PyTorch was specified as the 'framework' inside "
"of your config dictionary. However, there was no "
"installation found. You can install PyTorch via "
"`pip install torch`"
)
)
def resolve_tf_settings():
"""Check and resolve tf settings."""
if _tf1 and config["framework"] in ["tf2", "tfe"]:
if config["framework"] == "tf2" and _tfv < 2:
raise ValueError(
"You configured `framework`=tf2, but your installed "
"pip tf-version is < 2.0! Make sure your TensorFlow "
"version is >= 2.x."
)
if not _tf1.executing_eagerly():
_tf1.enable_eager_execution()
# Recommend setting tracing to True for speedups.
logger.info(
f"Executing eagerly (framework='{config['framework']}'),"
f" with eager_tracing={config['eager_tracing']}. For "
"production workloads, make sure to set eager_tracing=True"
" in order to match the speed of tf-static-graph "
"(framework='tf'). For debugging purposes, "
"`eager_tracing=False` is the best choice."
)
# Tf-static-graph (framework=tf): Recommend upgrading to tf2 and
# enabling eager tracing for similar speed.
elif _tf1 and config["framework"] == "tf":
logger.info(
"Your framework setting is 'tf', meaning you are using "
"static-graph mode. Set framework='tf2' to enable eager "
"execution with tf2.x. You may also then want to set "
"eager_tracing=True in order to reach similar execution "
"speed as with static-graph mode."
)
check_if_correct_nn_framework_installed()
resolve_tf_settings()
@OverrideToImplementCustomLogic_CallToSuperRecommended
@DeveloperAPI
def validate_config(self, config: AlgorithmConfigDict) -> None:
"""Validates a given config dict for this Algorithm.
Users should override this method to implement custom validation
behavior. It is recommended to call `super().validate_config()` in
this override.
Args:
config: The given config dict to check.
Raises:
ValueError: If there is something wrong with the config.
"""
model_config = config.get("model")
if model_config is None:
config["model"] = model_config = {}
# Use DefaultCallbacks class, if callbacks is None.
if config["callbacks"] is None:
config["callbacks"] = DefaultCallbacks
# Check, whether given `callbacks` is a callable.
if not callable(config["callbacks"]):
raise ValueError(
"`callbacks` must be a callable method that "
"returns a subclass of DefaultCallbacks, got "
f"{config['callbacks']}!"
)
# Multi-GPU settings.
simple_optim_setting = config.get("simple_optimizer", DEPRECATED_VALUE)
if simple_optim_setting != DEPRECATED_VALUE:
deprecation_warning(old="simple_optimizer", error=False)
# Validate "multiagent" sub-dict and convert policy 4-tuples to
# PolicySpec objects.
policies, is_multi_agent = check_multi_agent(config)
framework = config.get("framework")
# Multi-GPU setting: Must use MultiGPUTrainOneStep.
if config.get("num_gpus", 0) > 1:
if framework in ["tfe", "tf2"]:
raise ValueError(
"`num_gpus` > 1 not supported yet for "
"framework={}!".format(framework)
)
elif simple_optim_setting is True:
raise ValueError(
"Cannot use `simple_optimizer` if `num_gpus` > 1! "
"Consider not setting `simple_optimizer` in your config."
)
config["simple_optimizer"] = False
# Auto-setting: Use simple-optimizer for tf-eager or multiagent,
# otherwise: MultiGPUTrainOneStep (if supported by the algo's execution
# plan).
elif simple_optim_setting == DEPRECATED_VALUE:
# tf-eager: Must use simple optimizer.
if framework not in ["tf", "torch"]:
config["simple_optimizer"] = True
# Multi-agent case: Try using MultiGPU optimizer (only
# if all policies used are DynamicTFPolicies or TorchPolicies).
elif is_multi_agent:
from ray.rllib.policy.dynamic_tf_policy import DynamicTFPolicy
from ray.rllib.policy.torch_policy import TorchPolicy
default_policy_cls = self.get_default_policy_class(config)
if any(
(p.policy_class or default_policy_cls) is None
or not issubclass(
p.policy_class or default_policy_cls,
(DynamicTFPolicy, TorchPolicy),
)
for p in config["multiagent"]["policies"].values()
):
config["simple_optimizer"] = True
else:
config["simple_optimizer"] = False
else:
config["simple_optimizer"] = False
# User manually set simple-optimizer to False -> Error if tf-eager.
elif simple_optim_setting is False:
if framework in ["tfe", "tf2"]:
raise ValueError(
"`simple_optimizer=False` not supported for "
"framework={}!".format(framework)
)
# Check model config.
# If no preprocessing, propagate into model's config as well
# (so model will know, whether inputs are preprocessed or not).
if config["_disable_preprocessor_api"] is True:
model_config["_disable_preprocessor_api"] = True
# If no action flattening, propagate into model's config as well
# (so model will know, whether action inputs are already flattened or
# not).
if config["_disable_action_flattening"] is True:
model_config["_disable_action_flattening"] = True
# Prev_a/r settings.
prev_a_r = model_config.get("lstm_use_prev_action_reward", DEPRECATED_VALUE)
if prev_a_r != DEPRECATED_VALUE:
deprecation_warning(
"model.lstm_use_prev_action_reward",
"model.lstm_use_prev_action and model.lstm_use_prev_reward",
error=True,
)
model_config["lstm_use_prev_action"] = prev_a_r
model_config["lstm_use_prev_reward"] = prev_a_r
# Check batching/sample collection settings.
if config["batch_mode"] not in ["truncate_episodes", "complete_episodes"]:
raise ValueError(
"`batch_mode` must be one of [truncate_episodes|"
"complete_episodes]! Got {}".format(config["batch_mode"])
)
# Store multi-agent batch count mode.
self._by_agent_steps = (
self.config["multiagent"].get("count_steps_by") == "agent_steps"
)
# Metrics settings.
if (
config.get("metrics_smoothing_episodes", DEPRECATED_VALUE)
!= DEPRECATED_VALUE
):
deprecation_warning(
old="metrics_smoothing_episodes",
new="metrics_num_episodes_for_smoothing",
error=True,
)
config["metrics_num_episodes_for_smoothing"] = config[
"metrics_smoothing_episodes"
]
if config.get("min_iter_time_s", DEPRECATED_VALUE) != DEPRECATED_VALUE:
deprecation_warning(
old="min_iter_time_s",
new="min_time_s_per_iteration",
error=True,
)
config["min_time_s_per_iteration"] = config["min_iter_time_s"] or 0
if config.get("min_time_s_per_reporting", DEPRECATED_VALUE) != DEPRECATED_VALUE:
deprecation_warning(
old="min_time_s_per_reporting",
new="min_time_s_per_iteration",
error=True,
)
config["min_time_s_per_iteration"] = config["min_time_s_per_reporting"] or 0
if (
config.get("min_sample_timesteps_per_reporting", DEPRECATED_VALUE)
!= DEPRECATED_VALUE
):
deprecation_warning(
old="min_sample_timesteps_per_reporting",
new="min_sample_timesteps_per_iteration",
error=True,
)
config["min_sample_timesteps_per_iteration"] = (
config["min_sample_timesteps_per_reporting"] or 0
)
if (
config.get("min_train_timesteps_per_reporting", DEPRECATED_VALUE)
!= DEPRECATED_VALUE
):
deprecation_warning(
old="min_train_timesteps_per_reporting",
new="min_train_timesteps_per_iteration",
error=True,
)
config["min_train_timesteps_per_iteration"] = (
config["min_train_timesteps_per_reporting"] or 0
)
if config.get("collect_metrics_timeout", DEPRECATED_VALUE) != DEPRECATED_VALUE:
# TODO: Warn once all algos use the `training_iteration` method.
# deprecation_warning(
# old="collect_metrics_timeout",
# new="metrics_episode_collection_timeout_s",
# error=False,
# )
config["metrics_episode_collection_timeout_s"] = config[
"collect_metrics_timeout"
]
if config.get("timesteps_per_iteration", DEPRECATED_VALUE) != DEPRECATED_VALUE:
deprecation_warning(
old="timesteps_per_iteration",
new="`min_sample_timesteps_per_iteration` OR "
"`min_train_timesteps_per_iteration`",
error=True,
)
config["min_sample_timesteps_per_iteration"] = (
config["timesteps_per_iteration"] or 0
)
config["timesteps_per_iteration"] = DEPRECATED_VALUE
# Evaluation settings.
# Deprecated setting: `evaluation_num_episodes`.
if config.get("evaluation_num_episodes", DEPRECATED_VALUE) != DEPRECATED_VALUE:
deprecation_warning(
old="evaluation_num_episodes",
new="`evaluation_duration` and `evaluation_duration_unit=episodes`",
error=True,
)
config["evaluation_duration"] = config["evaluation_num_episodes"]
config["evaluation_duration_unit"] = "episodes"
config["evaluation_num_episodes"] = DEPRECATED_VALUE
# If `evaluation_num_workers` > 0, warn if `evaluation_interval` is
# None (also set `evaluation_interval` to 1).
if config["evaluation_num_workers"] > 0 and not config["evaluation_interval"]:
logger.warning(
f"You have specified {config['evaluation_num_workers']} "
"evaluation workers, but your `evaluation_interval` is None! "
"Therefore, evaluation will not occur automatically with each"
" call to `Algorithm.train()`. Instead, you will have to call "
"`Algorithm.evaluate()` manually in order to trigger an "
"evaluation run."
)
# If `evaluation_num_workers=0` and
# `evaluation_parallel_to_training=True`, warn that you need
# at least one remote eval worker for parallel training and
# evaluation, and set `evaluation_parallel_to_training` to False.
elif config["evaluation_num_workers"] == 0 and config.get(
"evaluation_parallel_to_training", False
):
logger.warning(
"`evaluation_parallel_to_training` can only be done if "
"`evaluation_num_workers` > 0! Setting "
"`evaluation_parallel_to_training` to False."
)
config["evaluation_parallel_to_training"] = False
# If `evaluation_duration=auto`, error if
# `evaluation_parallel_to_training=False`.
if config["evaluation_duration"] == "auto":
if not config["evaluation_parallel_to_training"]:
raise ValueError(
"`evaluation_duration=auto` not supported for "
"`evaluation_parallel_to_training=False`!"
)
# Make sure, it's an int otherwise.
elif (
not isinstance(config["evaluation_duration"], int)
or config["evaluation_duration"] <= 0
):
raise ValueError(
"`evaluation_duration` ({}) must be an int and "
">0!".format(config["evaluation_duration"])
)
@staticmethod
@ExperimentalAPI
def validate_env(env: EnvType, env_context: EnvContext) -> None:
"""Env validator function for this Algorithm class.
Override this in child classes to define custom validation
behavior.
Args:
env: The (sub-)environment to validate. This is normally a
single sub-environment (e.g. a gym.Env) within a vectorized
setup.
env_context: The EnvContext to configure the environment.
Raises:
Exception in case something is wrong with the given environment.
"""
pass
def try_recover_from_step_attempt(self, error, worker_set, ignore, recreate) -> int:
"""Try to identify and remove any unhealthy workers (incl. eval workers).
This method is called after an unexpected remote error is encountered
from a worker during the call to `self.step()`. It issues check requests to
all current workers and removes any that respond with error. If no healthy
workers remain, an error is raised.
Returns:
The number of remote workers recreated.
"""
# @ray.remote RolloutWorker failure.
if isinstance(error, RayError):
# Try to recover w/o the failed worker.
if ignore or recreate:
logger.exception(
"Error in training or evaluation attempt! Trying to recover."
)
# Error out.
else:
logger.warning(
"Worker crashed during training or evaluation! "
"To try to continue without failed "
"worker(s), set `ignore_worker_failures=True`. "
"To try to recover the failed worker(s), set "
"`recreate_failed_workers=True`."
)
raise error
# Any other exception.
else:
# Allow logs messages to propagate.
time.sleep(0.5)
raise error
removed_workers, new_workers = [], []
# Search for failed workers and try to recover (restart) them.
if recreate:
removed_workers, new_workers = worker_set.recreate_failed_workers(
local_worker_for_synching=self.workers.local_worker()
)
elif ignore:
removed_workers = worker_set.remove_failed_workers()
# If `worker_set` is the main training WorkerSet: `self.workers`.
if worker_set is getattr(self, "workers", None):
# Call the `on_worker_failures` callback.
self.on_worker_failures(removed_workers, new_workers)
# Recreate execution_plan iterator.
if not self.config.get("_disable_execution_plan_api") and callable(
self.execution_plan
):
logger.warning("Recreating execution plan after failure")
self.train_exec_impl = self.execution_plan(
worker_set, self.config, **self._kwargs_for_execution_plan()
)
elif self._evaluation_async_req_manager is not None and worker_set is getattr(
self, "evaluation_workers", None
):
self._evaluation_async_req_manager.remove_workers(removed_workers)
self._evaluation_async_req_manager.add_workers(new_workers)
return len(new_workers)
def on_worker_failures(
self, removed_workers: List[ActorHandle], new_workers: List[ActorHandle]
):
"""Called after a worker failure is detected.
Args:
removed_workers: List of removed workers.
new_workers: List of new workers.
"""
pass
@override(Trainable)
def _export_model(
self, export_formats: List[str], export_dir: str
) -> Dict[str, str]:
ExportFormat.validate(export_formats)
exported = {}
if ExportFormat.CHECKPOINT in export_formats:
path = os.path.join(export_dir, ExportFormat.CHECKPOINT)
self.export_policy_checkpoint(path)
exported[ExportFormat.CHECKPOINT] = path
if ExportFormat.MODEL in export_formats:
path = os.path.join(export_dir, ExportFormat.MODEL)
self.export_policy_model(path)
exported[ExportFormat.MODEL] = path
if ExportFormat.ONNX in export_formats:
path = os.path.join(export_dir, ExportFormat.ONNX)
self.export_policy_model(path, onnx=int(os.getenv("ONNX_OPSET", "11")))
exported[ExportFormat.ONNX] = path
return exported
def import_model(self, import_file: str):
"""Imports a model from import_file.
Note: Currently, only h5 files are supported.
Args:
import_file: The file to import the model from.
Returns:
A dict that maps ExportFormats to successfully exported models.
"""
# Check for existence.
if not os.path.exists(import_file):
raise FileNotFoundError(
"`import_file` '{}' does not exist! Can't import Model.".format(
import_file
)
)
# Get the format of the given file.
import_format = "h5" # TODO(sven): Support checkpoint loading.
ExportFormat.validate([import_format])
if import_format != ExportFormat.H5:
raise NotImplementedError
else:
return self.import_policy_model_from_h5(import_file)
@PublicAPI
def __getstate__(self) -> Dict:
"""Returns current state of Algorithm, sufficient to restore it from scratch.
Returns:
The current state dict of this Algorithm, which can be used to sufficiently
restore the algorithm from scratch without any other information.
"""
# Add config to state so complete Algorithm can be reproduced w/o it.
state = {
"algorithm_class": type(self),
"config": self.config,
}
if hasattr(self, "workers"):
state["worker"] = self.workers.local_worker().get_state()
# TODO: Experimental functionality: Store contents of replay buffer
# to checkpoint, only if user has configured this.
if self.local_replay_buffer is not None and self.config.get(
"store_buffer_in_checkpoints"
):
state["local_replay_buffer"] = self.local_replay_buffer.get_state()
if self.train_exec_impl is not None:
state["train_exec_impl"] = self.train_exec_impl.shared_metrics.get().save()
else:
state["counters"] = self._counters
return state
@PublicAPI
def __setstate__(self, state) -> None:
"""Sets the algorithm to the provided state.
Args:
state: The state dict to restore this Algorithm instance to. `state` may
have been returned by a call to an Algorithm's `__getstate__()` method.
"""
# TODO (sven): Validate that our config and the config in state are compatible.
# For example, the model architectures may differ.
# Also, what should the behavior be if e.g. some training parameter
# (e.g. lr) changed?
if hasattr(self, "workers") and "worker" in state:
self.workers.local_worker().set_state(state["worker"])
remote_state = ray.put(state["worker"])
for r in self.workers.remote_workers():
r.set_state.remote(remote_state)
if self.evaluation_workers:
# If evaluation workers are used, also restore the policies
# there in case they are used for evaluation purpose.
for r in self.evaluation_workers.remote_workers():
r.set_state.remote(remote_state)
# If necessary, restore replay data as well.
if self.local_replay_buffer is not None:
# TODO: Experimental functionality: Restore contents of replay
# buffer from checkpoint, only if user has configured this.
if self.config.get("store_buffer_in_checkpoints"):
if "local_replay_buffer" in state:
self.local_replay_buffer.set_state(state["local_replay_buffer"])
else:
logger.warning(
"`store_buffer_in_checkpoints` is True, but no replay "
"data found in state!"
)
elif "local_replay_buffer" in state and log_once(
"no_store_buffer_in_checkpoints_but_data_found"
):
logger.warning(
"`store_buffer_in_checkpoints` is False, but some replay "
"data found in state!"
)
if self.train_exec_impl is not None:
self.train_exec_impl.shared_metrics.get().restore(state["train_exec_impl"])
elif "counters" in state:
self._counters = state["counters"]
@staticmethod
def _checkpoint_info_to_algorithm_state(
checkpoint_info: dict,
policy_ids: Optional[Container[PolicyID]] = None,
policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None,
policies_to_train: Optional[
Union[
Container[PolicyID],
Callable[[PolicyID, Optional[SampleBatchType]], bool],
]
] = None,
) -> Dict:
"""Converts a checkpoint info or object to a proper Algorithm state dict.
The returned state dict can be used inside self.__setstate__().
Args:
checkpoint_info: A checkpoint info dict as returned by
`ray.rllib.utils.checkpoints.get_checkpoint_info(
[checkpoint dir or AIR Checkpoint])`.
policy_ids: Optional list/set of PolicyIDs. If not None, only those policies
listed here will be included in the returned state. Note that
state items such as filters, the `is_policy_to_train` function, as
well as the multi-agent `policy_ids` dict will be adjusted as well,
based on this arg.
policy_mapping_fn: An optional (updated) policy mapping function
to include in the returned state.
policies_to_train: An optional list of policy IDs to be trained
or a callable taking PolicyID and SampleBatchType and
returning a bool (trainable or not?) to include in the returned state.
Returns:
The state dict usable within the `self.__setstate__()` method.
"""
if checkpoint_info["type"] != "Algorithm":
raise ValueError(
"`checkpoint` arg passed to "
"`Algorithm._checkpoint_info_to_algorithm_state()` must be an "
f"Algorithm checkpoint (but is {checkpoint_info['type']})!"
)
with open(checkpoint_info["state_file"], "rb") as f:
state = pickle.load(f)
# New checkpoint format: Policies are in separate sub-dirs.
# Note: Algorithms like ES/ARS don't have a WorkerSet, so we just return
# the plain state here.
if (
checkpoint_info["checkpoint_version"] > version.Version("0.1")
and state.get("worker") is not None
):
worker_state = state["worker"]
# Retrieve the set of all required policy IDs.
policy_ids = set(
policy_ids if policy_ids is not None else worker_state["policy_ids"]
)
# Remove those policies entirely from filters that are not in
# `policy_ids`.
worker_state["filters"] = {
pid: filter
for pid, filter in worker_state["filters"].items()
if pid in policy_ids
}
# Remove policies from multiagent dict that are not in `policy_ids`.
policies_dict = state["config"]["multiagent"]["policies"]
policies_dict = {
pid: spec for pid, spec in policies_dict.items() if pid in policy_ids
}
state["config"]["multiagent"]["policies"] = policies_dict
# Prepare local `worker` state to add policies' states into it,
# read from separate policy checkpoint files.
worker_state["policy_states"] = {}
for pid in policy_ids:
policy_state_file = os.path.join(
checkpoint_info["checkpoint_dir"],
"policies",
pid,
"policy_state.pkl",
)
if not os.path.isfile(policy_state_file):
raise ValueError(
"Given checkpoint does not seem to be valid! No policy "
f"state file found for PID={pid}. "
f"The file not found is: {policy_state_file}."
)
with open(policy_state_file, "rb") as f:
worker_state["policy_states"][pid] = pickle.load(f)
if policy_mapping_fn is not None:
worker_state["policy_mapping_fn"] = policy_mapping_fn
if policies_to_train is not None:
worker_state["is_policy_to_train"] = policies_to_train
return state
@DeveloperAPI
def _create_local_replay_buffer_if_necessary(
self, config: PartialAlgorithmConfigDict
) -> Optional[MultiAgentReplayBuffer]:
"""Create a MultiAgentReplayBuffer instance if necessary.
Args:
config: Algorithm-specific configuration data.
Returns:
MultiAgentReplayBuffer instance based on algorithm config.
None, if local replay buffer is not needed.
"""
if not config.get("replay_buffer_config") or config["replay_buffer_config"].get(
"no_local_replay_buffer" or config.get("no_local_replay_buffer")
):
return
buffer_type = config["replay_buffer_config"]["type"]
return from_config(buffer_type, config["replay_buffer_config"])
@DeveloperAPI
def _kwargs_for_execution_plan(self):
kwargs = {}
if self.local_replay_buffer is not None:
kwargs["local_replay_buffer"] = self.local_replay_buffer
return kwargs
def _run_one_training_iteration(self) -> Tuple[ResultDict, "TrainIterCtx"]:
"""Runs one training iteration (self.iteration will be +1 after this).
Calls `self.training_step()` repeatedly until the minimum time (sec),
sample- or training steps have been reached.
Returns:
The results dict from the training iteration.
"""
# In case we are training (in a thread) parallel to evaluation,
# we may have to re-enable eager mode here (gets disabled in the
# thread).
if (
self.config.get("framework") in ["tf2", "tfe"]
and not tf.executing_eagerly()
):
tf1.enable_eager_execution()
results = None
# Create a step context ...
with TrainIterCtx(algo=self) as train_iter_ctx:
# .. so we can query it whether we should stop the iteration loop (e.g.
# when we have reached `min_time_s_per_iteration`).
num_recreated = 0
while not train_iter_ctx.should_stop(results):
# Try to train one step.
try:
# TODO (avnishn): Remove the execution plan API by q1 2023
with self._timers[TRAINING_ITERATION_TIMER]:
if self.config["_disable_execution_plan_api"]:
results = self.training_step()
else:
results = next(self.train_exec_impl)
# In case of any failures, try to ignore/recover the failed workers.
except Exception as e:
num_recreated += self.try_recover_from_step_attempt(
error=e,
worker_set=self.workers,
ignore=self.config["ignore_worker_failures"],
recreate=self.config["recreate_failed_workers"],
)
results["num_recreated_workers"] = num_recreated
return results, train_iter_ctx
def _run_one_evaluation(
self,
train_future: Optional[concurrent.futures.ThreadPoolExecutor] = None,
) -> ResultDict:
"""Runs evaluation step via `self.evaluate()` and handling worker failures.
Args:
train_future: In case, we are training and avaluating in parallel,
this arg carries the currently running ThreadPoolExecutor
object that runs the training iteration
Returns:
The results dict from the evaluation call.
"""
eval_results = {
"evaluation": {
"episode_reward_max": np.nan,
"episode_reward_min": np.nan,
"episode_reward_mean": np.nan,
}
}
eval_func_to_use = (
self._evaluate_async
if self.config["enable_async_evaluation"]
else self.evaluate
)
num_recreated = 0
try:
if self.config["evaluation_duration"] == "auto":
assert (
train_future is not None
and self.config["evaluation_parallel_to_training"]
)
unit = self.config["evaluation_duration_unit"]
eval_results = eval_func_to_use(
duration_fn=functools.partial(
self._automatic_evaluation_duration_fn,
unit,
self.config["evaluation_num_workers"],
self.config["evaluation_config"],
train_future,
)
)
# Run `self.evaluate()` only once per training iteration.
else:
eval_results = eval_func_to_use()
# In case of any failures, try to ignore/recover the failed evaluation workers.
except Exception as e:
num_recreated = self.try_recover_from_step_attempt(
error=e,
worker_set=self.evaluation_workers,
ignore=self.config["evaluation_config"].get("ignore_worker_failures"),
recreate=self.config["evaluation_config"].get(
"recreate_failed_workers"
),
)
# `self._evaluate_async` handles its own worker failures and already adds
# this metric, but `self.evaluate` doesn't.
if "num_recreated_workers" not in eval_results["evaluation"]:
eval_results["evaluation"]["num_recreated_workers"] = num_recreated
# Add number of healthy evaluation workers after this iteration.
eval_results["evaluation"]["num_healthy_workers"] = (
len(self.evaluation_workers.remote_workers())
if self.evaluation_workers is not None
else 0
)
return eval_results
def _run_one_training_iteration_and_evaluation_in_parallel(
self,
) -> Tuple[ResultDict, "TrainIterCtx"]:
"""Runs one training iteration and one evaluation step in parallel.
First starts the training iteration (via `self._run_one_training_iteration()`)
within a ThreadPoolExecutor, then runs the evaluation step in parallel.
In auto-duration mode (config.evaluation_duration=auto), makes sure the
evaluation step takes roughly the same time as the training iteration.
Returns:
The accumulated training and evaluation results.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
train_future = executor.submit(lambda: self._run_one_training_iteration())
# Pass the train_future into `self._run_one_evaluation()` to allow it
# to run exactly as long as the training iteration takes in case
# evaluation_duration=auto.
results = self._run_one_evaluation(train_future)
# Collect the training results from the future.
train_results, train_iter_ctx = train_future.result()
results.update(train_results)
return results, train_iter_ctx
@staticmethod
def _automatic_evaluation_duration_fn(
unit, num_eval_workers, eval_cfg, train_future, num_units_done
):
# Training is done and we already ran at least one
# evaluation -> Nothing left to run.
if num_units_done > 0 and train_future.done():
return 0
# Count by episodes. -> Run n more
# (n=num eval workers).
elif unit == "episodes":
return num_eval_workers
# Count by timesteps. -> Run n*m*p more
# (n=num eval workers; m=rollout fragment length;
# p=num-envs-per-worker).
else:
return (
num_eval_workers
* eval_cfg["rollout_fragment_length"]
* eval_cfg["num_envs_per_worker"]
)
def _compile_iteration_results(
self, *, episodes_this_iter, step_ctx, iteration_results=None
):
# Return dict.
results: ResultDict = {}
iteration_results = iteration_results or {}
# Evaluation results.
if "evaluation" in iteration_results:
results["evaluation"] = iteration_results.pop("evaluation")
# Custom metrics and episode media.
results["custom_metrics"] = iteration_results.pop("custom_metrics", {})
results["episode_media"] = iteration_results.pop("episode_media", {})
results["num_recreated_workers"] = iteration_results.pop(
"num_recreated_workers", 0
)
# Learner info.
results["info"] = {LEARNER_INFO: iteration_results}
# Calculate how many (if any) of older, historical episodes we have to add to
# `episodes_this_iter` in order to reach the required smoothing window.
episodes_for_metrics = episodes_this_iter[:]
missing = self.config["metrics_num_episodes_for_smoothing"] - len(
episodes_this_iter
)
# We have to add some older episodes to reach the smoothing window size.
if missing > 0:
episodes_for_metrics = self._episode_history[-missing:] + episodes_this_iter
assert (
len(episodes_for_metrics)
<= self.config["metrics_num_episodes_for_smoothing"]
)
# Note that when there are more than `metrics_num_episodes_for_smoothing`
# episodes in `episodes_for_metrics`, leave them as-is. In this case, we'll
# compute the stats over that larger number.
# Add new episodes to our history and make sure it doesn't grow larger than
# needed.
self._episode_history.extend(episodes_this_iter)
self._episode_history = self._episode_history[
-self.config["metrics_num_episodes_for_smoothing"] :
]
results["sampler_results"] = summarize_episodes(
episodes_for_metrics,
episodes_this_iter,
self.config["keep_per_episode_custom_metrics"],
)
# TODO: Don't dump sampler results into top-level.
results.update(results["sampler_results"])
results["num_healthy_workers"] = len(self.workers.remote_workers())
# Train-steps- and env/agent-steps this iteration.
for c in [
NUM_AGENT_STEPS_SAMPLED,
NUM_AGENT_STEPS_TRAINED,
NUM_ENV_STEPS_SAMPLED,
NUM_ENV_STEPS_TRAINED,
]:
results[c] = self._counters[c]
if self._by_agent_steps:
results[NUM_AGENT_STEPS_SAMPLED + "_this_iter"] = step_ctx.sampled
results[NUM_AGENT_STEPS_TRAINED + "_this_iter"] = step_ctx.trained
# TODO: For CQL and other algos, count by trained steps.
results["timesteps_total"] = self._counters[NUM_AGENT_STEPS_SAMPLED]
# TODO: Backward compatibility.
results[STEPS_TRAINED_THIS_ITER_COUNTER] = step_ctx.trained
else:
results[NUM_ENV_STEPS_SAMPLED + "_this_iter"] = step_ctx.sampled
results[NUM_ENV_STEPS_TRAINED + "_this_iter"] = step_ctx.trained
# TODO: For CQL and other algos, count by trained steps.
results["timesteps_total"] = self._counters[NUM_ENV_STEPS_SAMPLED]
# TODO: Backward compatibility.
results[STEPS_TRAINED_THIS_ITER_COUNTER] = step_ctx.trained
# TODO: Backward compatibility.
results["agent_timesteps_total"] = self._counters[NUM_AGENT_STEPS_SAMPLED]
# Process timer results.
timers = {}
for k, timer in self._timers.items():
timers["{}_time_ms".format(k)] = round(timer.mean * 1000, 3)
if timer.has_units_processed():
timers["{}_throughput".format(k)] = round(timer.mean_throughput, 3)
results["timers"] = timers
# Process counter results.
counters = {}
for k, counter in self._counters.items():
counters[k] = counter
results["counters"] = counters
# TODO: Backward compatibility.
results["info"].update(counters)
return results
def __repr__(self):
return type(self).__name__
def _record_usage(self, config):
"""Record the framework and algorithm used.
Args:
config: Algorithm config dict.
"""
record_extra_usage_tag(TagKey.RLLIB_FRAMEWORK, config["framework"])
record_extra_usage_tag(TagKey.RLLIB_NUM_WORKERS, str(config["num_workers"]))
alg = self.__class__.__name__
# We do not want to collect user defined algorithm names.
if alg not in ALL_ALGORITHMS:
alg = "USER_DEFINED"
record_extra_usage_tag(TagKey.RLLIB_ALGORITHM, alg)
@Deprecated(new="Algorithm.compute_single_action()", error=True)
def compute_action(self, *args, **kwargs):
return self.compute_single_action(*args, **kwargs)
@Deprecated(new="construct WorkerSet(...) instance directly", error=False)
def _make_workers(
self,
*,
env_creator: EnvCreator,
validate_env: Optional[Callable[[EnvType, EnvContext], None]],
policy_class: Type[Policy],
config: AlgorithmConfigDict,
num_workers: int,
local_worker: bool = True,
) -> WorkerSet:
return WorkerSet(
env_creator=env_creator,
validate_env=validate_env,
policy_class=policy_class,
trainer_config=config,
num_workers=num_workers,
local_worker=local_worker,
logdir=self.logdir,
)
@staticmethod
@Deprecated(new="Algorithm.validate_config()", error=True)
def _validate_config(config, trainer_or_none):
assert trainer_or_none is not None
return trainer_or_none.validate_config(config)
# TODO: Create a dict that throw a deprecation warning once we have fully moved
# to AlgorithmConfig() objects (some algos still missing).
COMMON_CONFIG: AlgorithmConfigDict = AlgorithmConfig(Algorithm).to_dict()
class TrainIterCtx:
def __init__(self, algo: Algorithm):
self.algo = algo
def __enter__(self):
# Before first call to `step()`, `results` is expected to be None ->
# Start with self.failures=-1 -> set to 0 before the very first call
# to `self.step()`.
self.failures = -1
self.time_start = time.time()
self.sampled = 0
self.trained = 0
self.init_env_steps_sampled = self.algo._counters[NUM_ENV_STEPS_SAMPLED]
self.init_env_steps_trained = self.algo._counters[NUM_ENV_STEPS_TRAINED]
self.init_agent_steps_sampled = self.algo._counters[NUM_AGENT_STEPS_SAMPLED]
self.init_agent_steps_trained = self.algo._counters[NUM_AGENT_STEPS_TRAINED]
self.failure_tolerance = self.algo.config[
"num_consecutive_worker_failures_tolerance"
]
return self
def __exit__(self, *args):
pass
def should_stop(self, results):
# Before first call to `step()`.
if results is None:
# Fail after n retries.
self.failures += 1
if self.failures > self.failure_tolerance:
raise RuntimeError(
"More than `num_consecutive_worker_failures_tolerance="
f"{self.failure_tolerance}` consecutive worker failures! "
"Exiting."
)
# Continue to very first `step()` call or retry `step()` after
# a (tolerable) failure.
return False
# Stopping criteria: Only when using the `training_iteration`
# API, b/c for the `exec_plan` API, the logic to stop is
# already built into the execution plans via the
# `StandardMetricsReporting` op.
elif self.algo.config["_disable_execution_plan_api"]:
if self.algo._by_agent_steps:
self.sampled = (
self.algo._counters[NUM_AGENT_STEPS_SAMPLED]
- self.init_agent_steps_sampled
)
self.trained = (
self.algo._counters[NUM_AGENT_STEPS_TRAINED]
- self.init_agent_steps_trained
)
else:
self.sampled = (
self.algo._counters[NUM_ENV_STEPS_SAMPLED]
- self.init_env_steps_sampled
)
self.trained = (
self.algo._counters[NUM_ENV_STEPS_TRAINED]
- self.init_env_steps_trained
)
min_t = self.algo.config["min_time_s_per_iteration"]
min_sample_ts = self.algo.config["min_sample_timesteps_per_iteration"]
min_train_ts = self.algo.config["min_train_timesteps_per_iteration"]
# Repeat if not enough time has passed or if not enough
# env|train timesteps have been processed (or these min
# values are not provided by the user).
if (
(not min_t or time.time() - self.time_start >= min_t)
and (not min_sample_ts or self.sampled >= min_sample_ts)
and (not min_train_ts or self.trained >= min_train_ts)
):
return True
else:
return False
# No errors (we got results != None) -> Return True
# (meaning: yes, should stop -> no further step attempts).
else:
return True
|
[
"noreply@github.com"
] |
vakker.noreply@github.com
|
17c7afc8245e0de1833dc9c9650a56e655b84873
|
1dc210fbb89aa82ce16e27756f4c6bda624024ab
|
/blog/migrations/0002_post_insert_date.py
|
a885948673607242952b80e2729b4e762b8ad64c
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
aabele/kasmanzvana.lv
|
68f3a118a9081639578fe29842abf50ab8c8af6c
|
409df7a776d17ea7fff5519fe270f73c02428a41
|
refs/heads/master
| 2022-11-28T00:07:24.900787
| 2018-03-21T07:23:51
| 2018-03-21T07:23:51
| 126,135,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-30 21:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='insert_date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
[
"aabele@gmail.com"
] |
aabele@gmail.com
|
8faf2068a458cf72abd7903f35a6d54f9f5fb74c
|
cdd878d50c423a8e6b282d6b1fb177ac35210880
|
/config.py
|
aeecf863e06726014ab4cbc8756829a475e1ad53
|
[] |
no_license
|
alexmochu/WeConnect-API-oop-
|
83dbb66656342ecf1aa9dd74b702fe0bbdb873bc
|
bfe48150f05b447d432819e90bc385d870b31ee2
|
refs/heads/master
| 2021-01-24T02:23:33.609665
| 2018-05-18T12:44:19
| 2018-05-18T12:44:19
| 122,846,674
| 1
| 1
| null | 2019-10-18T16:37:33
| 2018-02-25T15:06:06
|
Python
|
UTF-8
|
Python
| false
| false
| 383
|
py
|
# config.py
class Config(object):
""" Common or Parent configuration class."""
DEBUG = True
class DevelopmentConfig(Config):
""" Development Configurations"""
DEBUG = True
class TestingConfig(Config):
""" Testing Configurations"""
TESTING = True
DEBUG = True
class ProductionConfig(Config):
""" Configurations for Production."""
DEBUG = False
|
[
"Mochualex4@gmail.com"
] |
Mochualex4@gmail.com
|
6345ca5f9e91bd0fbd6aebb9ade41315802962db
|
f6bebd025f62621478401dbb75e0cf852171058c
|
/twitter-reply-notification.py
|
e53654e47062e878817a783d7d2ae4f68c280286
|
[] |
no_license
|
chmouel/twitter-reply-notification
|
9a0e683e35e9156b6a291d1f092cf6a509274732
|
6bc8d32b20f8daabe963eef825e48b17275820e7
|
refs/heads/master
| 2020-05-29T10:19:26.559288
| 2010-01-07T15:13:04
| 2010-01-07T15:13:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,515
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
#
# Chmouel Boudjnah <chmouel@chmouel.com>
import twitter
import rfc822
import datetime
import os
import ConfigParser
import stat
import sys
import urllib2
CONFIG_FILE = os.path.expanduser("~/.config/twitter-reply-notification/config.ini")
CACHE_FILE = os.path.expanduser("~/.cache/twitter-reply-notification/cache")
def setup_dir():
if not os.path.exists(os.path.dirname(CONFIG_FILE)):
os.mkdir(os.path.dirname(CONFIG_FILE), 0755)
if not os.path.exists(os.path.dirname(CACHE_FILE)):
os.mkdir(os.path.dirname(CACHE_FILE), 0755)
def send_mail(config, text):
p = os.popen("%s -t" % config['sendmail_location'], "w")
p.write("From: %s\n" % config['mail_from'])
p.write("To: %s\n" % config['mail_to'])
p.write("Subject: New Twit (%d)\n" % (len(text)))
p.write("\n") #
p.write("\n".join(text))
status = p.close()
if status == 256:
sys.exit(1)
def parse_config(config_file):
if not os.path.exists(config_file):
config = ConfigParser.ConfigParser()
config.add_section("auth")
config.set("auth", "username", "")
config.set("auth", "password", "")
config.add_section("mail")
config.set("mail", "from", "")
config.set("mail", "to", "")
config.set("mail", "sendmail_location", "/usr/sbin/sendmail")
config.write(open(config_file, 'w'))
return
filemode = stat.S_IMODE(os.stat(config_file).st_mode) & 0777
if filemode != 384:
os.chmod(config_file, 0600)
config = ConfigParser.ConfigParser()
cfh = config.read(config_file)
if not cfh:
return
username = config.get('auth', 'username').strip()
password = config.get('auth', 'password').strip()
mail_from = config.get('mail', 'from').strip()
mail_to = config.get('mail', 'to').strip()
sendmail_location = config.get('mail', 'sendmail_location').strip()
if not all([username, password, mail_from, mail_to]):
return
return {
'username' : username,
'password' : password,
'mail_from' : mail_from,
'mail_to' : mail_to,
'sendmail_location' : sendmail_location,
}
def get_replies(config):
ret = []
api = twitter.Api(config['username'], config['password'])
replies = api.GetReplies(since_id=int(config['last_seen_id']))
if not replies:
return (None, None)
last = replies[0].id
for reply in replies:
fdate = datetime.datetime(*rfc822.parsedate(
reply.created_at)[:-2]).strftime("%H:%M")
ret.append("%s - %s: %s" % (fdate, reply.user.name, reply.text))
return (ret, last)
def parse_last_seen_id():
if not os.path.exists(CACHE_FILE):
return
last_seen_id = open(CACHE_FILE, 'r').read().strip()
if last_seen_id:
return last_seen_id
return None
def main():
setup_dir()
config = parse_config(CONFIG_FILE)
if not config:
print "Configuration is missing"
sys.exit(1)
return
config['last_seen_id'] = parse_last_seen_id()
try:
text, last_seen_id = get_replies(config)
except(urllib2.URLError):
return
if not text or not last_seen_id:
return
if config['last_seen_id'] and \
(int(last_seen_id) == int(config['last_seen_id'])):
return
open(CACHE_FILE, "w").write(str(last_seen_id))
send_mail(config, text)
if __name__ == '__main__':
main()
|
[
"chmouel.boudjnah@rackspace.co.uk"
] |
chmouel.boudjnah@rackspace.co.uk
|
3c82d59c15b5c2b692af3bef9c303e878b6a2a8a
|
af2dee4ab4df778801dbe16346c76b295bcad6be
|
/Projects/bom_words/test.py
|
afe55c0eac6c0861d4f768825bc9743cad2833e5
|
[] |
no_license
|
cpalm9/PythonDataStructures
|
7420881c11bc6003fc97359653260ee028e1d555
|
ee40fe85d3ac56a7e448ba5e598ea1c55cff0233
|
refs/heads/master
| 2021-08-31T09:37:02.925765
| 2017-12-15T02:21:59
| 2017-12-15T02:21:59
| 102,979,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
from sort_api import SortMethods
from worddata import WordData
import unittest
class UnitTest(unittest.TestCase):
test_list = []
def test_bubble_sort(self):
s = SortMethods()
test_list = [5,4,2,7,6,9,1]
s.bubble_sort_test(test_list)
self.assertEqual(test_list[6], 9)
test_list = ['a', 'c', 'd', 'b', 'f', 'g', 'i', 'h', 'm', 'j']
s.bubble_sort_test(test_list)
self.assertEqual(test_list[2], 'c')
self.assertEqual(test_list[5], 'g')
def test_insert_sort(self):
s = SortMethods()
test_list = [5,4,2,7,6,9,1, 100, -1, 55]
s.insert_sort_test(test_list)
self.assertEqual(test_list[0], -1)
self.assertEqual(test_list[3], 4)
test_list = ['a', 'c', 'd', 'b', 'f', 'g', 'i', 'h', 'm', 'j']
s.insert_sort_test(test_list)
self.assertEqual(test_list[9], 'm')
def test_select_sort(self):
s = SortMethods()
test_list = [5,4,2,7,6,9,1]
s.select_sort(test_list)
self.assertEqual(test_list[1], 2)
test_list = ['a', 'c', 'd', 'b']
s.select_sort(test_list)
self.assertEqual(test_list[2], 'c')
if __name__ == '__main__':
unittest.main()
|
[
"christian_palmer@byu.edu"
] |
christian_palmer@byu.edu
|
d6405350eccf438965119c55bf9bfb85bebcba7f
|
f709a86ee1eff46e84d00ef1820f27d0d9641aba
|
/8,9일차/random_실습2.py
|
2e6edda394afd4668117db2dd7cb99c3ecc95b33
|
[] |
no_license
|
limsehui/likelionstudy
|
a697e3a306edb49fbc09700d4390f56109e4f7a5
|
10a77a2f503283eb48a314652debfa57ed2d2a78
|
refs/heads/master
| 2020-04-13T05:50:49.106004
| 2019-01-18T09:45:43
| 2019-01-18T09:45:43
| 163,004,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 73
|
py
|
import random
random_number = random.randint(2, 5)
print(random_number)
|
[
"tpgml944@gmail.com"
] |
tpgml944@gmail.com
|
6a09c9dd986fd82c23206712f58db29e1d2ab891
|
2dfad29c6c93e420ebe20168ef18c402f9a5c636
|
/tests/test_media_utils.py
|
51508d0cb0e0d4f4e4654c6ec23629e48c1eb516
|
[
"MIT",
"LicenseRef-scancode-ubuntu-font-1.0"
] |
permissive
|
susantabiswas/realtime-facial-emotion-analyzer
|
b9c3c80342df7c79ad5297407e2eb0214a0fa307
|
e9d5ee19aebe7602de91ba57ade15a6655944ed6
|
refs/heads/master
| 2021-12-15T00:42:51.190038
| 2021-12-10T15:50:10
| 2021-12-10T15:50:10
| 129,482,642
| 93
| 29
|
MIT
| 2021-12-05T17:20:50
| 2018-04-14T04:47:12
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
# ---- coding: utf-8 ----
# ===================================================
# Author: Susanta Biswas
# ===================================================
"""Description: Tests for media utils."""
# ===================================================
from emotion_analyzer.exceptions import InvalidImage
import pytest
from emotion_analyzer.media_utils import (
convert_to_dlib_rectangle,
convert_to_rgb,
load_image_path,
)
import numpy as np
import cv2
import dlib
def test_convert_to_dlib_rectangle():
""" Check if dlib rectangle is created properly"""
bbox = [1, 2, 3, 4]
dlib_box = dlib.rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
assert convert_to_dlib_rectangle(bbox) == dlib_box
def test_load_image_path():
""" Check if exception is thrown when an invalid array is given"""
path = "data/sample/1.jpg"
img = cv2.imread(path)
img = convert_to_rgb(img)
loaded_img = load_image_path(path)
assert np.all(loaded_img == img) == True
def test_convert_to_rgb_exception():
""" Check if exception is thrown when an invalid array is given"""
# create a dummy image
img = np.zeros((100, 100, 5))
with pytest.raises(InvalidImage):
convert_to_rgb(img)
def test_convert_to_rgb(img1_data):
""" Check if RGB conversion happens correctly"""
rgb = cv2.cvtColor(img1_data, cv2.COLOR_BGR2RGB)
converted_img = convert_to_rgb(img1_data)
assert np.all(rgb == converted_img) == True
|
[
"17782600+susantabiswas@users.noreply.github.com"
] |
17782600+susantabiswas@users.noreply.github.com
|
70ef858cdea40eb5fbf28ecf09e21e2cda639394
|
0499444beef10cefa5f071d652f8ac73f5436bd1
|
/simplemooc/courses/admin.py
|
aa6c08ebee37ae82a0fa6aa01ca730831fe593c7
|
[] |
no_license
|
iagoid/Django-SimpleMooc
|
42fc239a12a11f143fa86e765e3936ebdc47170b
|
0fe810230fb55af0bc41e56bae5849fea5710769
|
refs/heads/master
| 2022-11-16T11:13:45.187485
| 2020-07-11T13:28:26
| 2020-07-11T13:28:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
from django.contrib import admin
from .models import Course, Enrollment, Announcement, Comment, Lesson, Material
class CourseAdmin(admin.ModelAdmin):
list_display = ['name', 'slug', 'start_date', 'created_at']
search_fields = ['name', 'slug']
prepopulated_fields = {'slug': ('name',)}
class MaterialInlineAdmin(admin.TabularInline):
model = Material
class LessonAdmin(admin.ModelAdmin):
list_display = ['name', 'number', 'course', 'release_date']
search_fields = ['name', 'description']
list_filter = ['created_at']
inlines = [
MaterialInlineAdmin
]
admin.site.register(Course, CourseAdmin)
admin.site.register([Enrollment, Announcement, Comment, Material])
admin.site.register(Lesson, LessonAdmin)
|
[
"iagoid01@gmail.com"
] |
iagoid01@gmail.com
|
f2af3503bf7206c6d28a8f29b727061a682f9706
|
3bafaed1d12e4e1fb221a11998a7b9a858b04644
|
/App/migrations/0013_auto_20201230_1553.py
|
fb1ff2ce8bdd568a36fb4d395ecb6cc782160ba0
|
[] |
no_license
|
nian-20/AtroBOM
|
8c96e9247292b5f4a3a4f22b7d93a8749f7ed80c
|
0370636238e722489b3fddc3a65d4e9ceb7cbfb0
|
refs/heads/master
| 2023-08-15T09:13:10.042024
| 2021-09-30T19:12:03
| 2021-09-30T19:12:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
# Generated by Django 3.1.4 on 2020-12-30 12:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0012_auto_20201230_1544'),
]
operations = [
migrations.AlterField(
model_name='rate',
name='rate',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name=' ضریب مصرف '),
),
]
|
[
"nimadorostkar97@gmail.com"
] |
nimadorostkar97@gmail.com
|
228ef04b63368a02967ba7ea70cc55c1e3c00a81
|
bb62611c991456e694f00ad3229d530fd34c3ee4
|
/ch7_rvmclass.py
|
5d1da5b0ead796c45a3a02c7422b9ea78a063622
|
[] |
no_license
|
aralhekimoglu/PRMLAlgorithms
|
1afb8024181eb3d1afe4f50efdaecd86677fb7a6
|
7889cc275c456a8d8d12e9890c59dfdf88a358ce
|
refs/heads/master
| 2022-10-25T18:52:30.143037
| 2022-10-16T06:20:25
| 2022-10-16T06:20:25
| 126,392,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,531
|
py
|
import numpy as np
import matplotlib.pyplot as plt
class Kernel(object):
"""
Base class for kernel function
"""
def _pairwise(self, x, y):
"""
all pairs of x and y
Parameters
----------
x : (sample_size, n_features)
input
y : (sample_size, n_features)
another input
Returns
-------
output : tuple
two array with shape (sample_size, sample_size, n_features)
"""
return (
np.tile(x, (len(y), 1, 1)).transpose(1, 0, 2),
np.tile(y, (len(x), 1, 1))
)
class RBF(Kernel):
def __init__(self, params):
"""
construct Radial basis kernel function
Parameters
----------
params : (ndim + 1,) ndarray
parameters of radial basis function
Attributes
----------
ndim : int
dimension of expected input data
"""
assert params.ndim == 1
self.params = params
self.ndim = len(params) - 1
def __call__(self, x, y, pairwise=True):
"""
calculate radial basis function
k(x, y) = c0 * exp(-0.5 * c1 * (x1 - y1) ** 2 ...)
Parameters
----------
x : ndarray [..., ndim]
input of this kernel function
y : ndarray [..., ndim]
another input
Returns
-------
output : ndarray
output of this radial basis function
"""
assert x.shape[-1] == self.ndim
assert y.shape[-1] == self.ndim
if pairwise:
x, y = self._pairwise(x, y)
d = self.params[1:] * (x - y) ** 2
return self.params[0] * np.exp(-0.5 * np.sum(d, axis=-1))
def derivatives(self, x, y, pairwise=True):
if pairwise:
x, y = self._pairwise(x, y)
d = self.params[1:] * (x - y) ** 2
delta = np.exp(-0.5 * np.sum(d, axis=-1))
deltas = -0.5 * (x - y) ** 2 * (delta * self.params[0])[:, :, None]
return np.concatenate((np.expand_dims(delta, 0), deltas.T))
def update_parameters(self, updates):
self.params += updates
class RelevanceVectorClassifier(object):
def __init__(self, kernel, alpha=1.):
"""
construct relevance vector classifier
Parameters
----------
kernel : Kernel
kernel function to compute components of feature vectors
alpha : float
initial precision of prior weight distribution
"""
self.kernel = kernel
self.alpha = alpha
def _sigmoid(self, a):
return np.tanh(a * 0.5) * 0.5 + 0.5
def _map_estimate(self, X, t, w, n_iter=10):
for _ in range(n_iter):
y = self._sigmoid(X .dot( w))
g = X.T .dot( (y - t) )+ self.alpha * w
H = (X.T * y * (1 - y)) .dot( X) + np.diag(self.alpha)
w -= np.linalg.solve(H, g)
return w, np.linalg.inv(H)
def fit(self, X, t, iter_max=100):
"""
maximize evidence with respect ot hyperparameter
Parameters
----------
X : (sample_size, n_features) ndarray
input
t : (sample_size,) ndarray
corresponding target
iter_max : int
maximum number of iterations
Attributes
----------
X : (N, n_features) ndarray
relevance vector
t : (N,) ndarray
corresponding target
alpha : (N,) ndarray
hyperparameter for each weight or training sample
cov : (N, N) ndarray
covariance matrix of weight
mean : (N,) ndarray
mean of each weight
"""
if X.ndim == 1:
X = X[:, None]
assert X.ndim == 2
assert t.ndim == 1
Phi = self.kernel(X, X)
N = len(t)
self.alpha = np.zeros(N) + self.alpha
mean = np.zeros(N)
for _ in range(iter_max):
param = np.copy(self.alpha)
mean, cov = self._map_estimate(Phi, t, mean, 10)
gamma = 1 - self.alpha * np.diag(cov)
self.alpha = gamma / np.square(mean)
np.clip(self.alpha, 0, 1e10, out=self.alpha)
if np.allclose(param, self.alpha):
break
mask = self.alpha < 1e8
self.X = X[mask]
self.t = t[mask]
self.alpha = self.alpha[mask]
Phi = self.kernel(self.X, self.X)
mean = mean[mask]
self.mean, self.covariance = self._map_estimate(Phi, self.t, mean, 100)
def predict(self, X):
"""
predict class label
Parameters
----------
X : (sample_size, n_features)
input
Returns
-------
label : (sample_size,) ndarray
predicted label
"""
if X.ndim == 1:
X = X[:, None]
assert X.ndim == 2
phi = self.kernel(X, self.X)
label = (phi .dot( self.mean) > 0).astype(np.int)
return label
def predict_proba(self, X):
"""
probability of input belonging class one
Parameters
----------
X : (sample_size, n_features) ndarray
input
Returns
-------
proba : (sample_size,) ndarray
probability of predictive distribution p(C1|x)
"""
if X.ndim == 1:
X = X[:, None]
assert X.ndim == 2
phi = self.kernel(X, self.X)
mu_a = phi .dot(self.mean)
var_a = np.sum(phi .dot( self.covariance) * phi, axis=1)
return self._sigmoid(mu_a / np.sqrt(1 + np.pi * var_a / 8))
def create_toy_data():
x0 = np.random.normal(size=100).reshape(-1, 2) - 1.
x1 = np.random.normal(size=100).reshape(-1, 2) + 1.
x = np.concatenate([x0, x1])
y = np.concatenate([np.zeros(50), np.ones(50)]).astype(np.int)
return x, y
x_train, y_train = create_toy_data()
model = RelevanceVectorClassifier(RBF(np.array([1., 0.5, 0.5])))
model.fit(x_train, y_train)
x0, x1 = np.meshgrid(np.linspace(-4, 4, 100), np.linspace(-4, 4, 100))
x = np.array([x0, x1]).reshape(2, -1).T
plt.scatter(x_train[:, 0], x_train[:, 1], s=40, c=y_train, marker="x")
plt.scatter(model.X[:, 0], model.X[:, 1], s=100, facecolor="none", edgecolor="g")
plt.contourf(x0, x1, model.predict_proba(x).reshape(100, 100), np.linspace(0, 1, 5), alpha=0.2)
plt.colorbar()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.gca().set_aspect("equal", adjustable="box")
|
[
"noreply@github.com"
] |
aralhekimoglu.noreply@github.com
|
7cab64eb481e0b3a7c9d90be43880cc7cb881492
|
b7139c6f5a4419326e0b856f34639f64271c5a3d
|
/cloud-backup.py
|
09350d23d8eb2884f7c4029f891878551475b27c
|
[] |
no_license
|
todokku/cloud-backup
|
3f7a90bf6db42285f1d9993e9903f7b97c2e134c
|
7a2eaae1e141da91c5dbe8dec73b0c87be8c7406
|
refs/heads/master
| 2022-07-22T13:33:16.791842
| 2020-05-22T18:55:49
| 2020-05-22T18:55:49
| 266,587,310
| 1
| 0
| null | 2020-05-24T17:07:13
| 2020-05-24T17:07:12
| null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
import os
import sys
os.system('chmod +x cloud-backup.sh')
os.system('./cloud-backup.sh')
print('Fine.')
|
[
"lazzaronipaolo@outlook.it"
] |
lazzaronipaolo@outlook.it
|
4d23ee33155b9c450ac2527ebbca1bc314bb0740
|
32647bb5265c91f68dcb92cdf298c4b38d6b60ec
|
/test/test_parsing.py
|
7faeaaf57b3f10d9c7f4b164f22786765fb13b31
|
[] |
no_license
|
kevin-brown/SmokeDetector
|
a1a7b887640acad3a460c9bb7550f9ccc772b14e
|
f1c1ad69e715100a2ec64ec8ca00d6010818d9d0
|
refs/heads/master
| 2021-01-18T01:33:37.203878
| 2015-12-27T17:04:46
| 2015-12-27T17:04:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,798
|
py
|
from parsing import *
import pytest
test_data_inputs = []
with open("test/data_test_parsing.txt", "r") as f:
test_data_inputs = f.readlines()
# Large inputs should go to that file.
# Only inputs should go there, not the parsing method and the expected output,
# because the input is always a string and `parse_method` and `expected` are not.
@pytest.mark.parametrize("input_data, parse_method, expected", [
('Testing * escaping of ] special [ characters', escape_special_chars_in_title, 'Testing \* escaping of \] special \[ characters'),
('HTML ' unescaping<', unescape_title, 'HTML \' unescaping<'),
('http://physics.stackexchange.com/users/7433/manishearth', get_user_from_url, ('7433', 'physics.stackexchange.com')),
('http://softwarerecs.stackexchange.com/users/46/undo', get_user_from_url, ('46', 'softwarerecs.stackexchange.com')),
('http://earthscience.stackexchange.com/users/20/hichris123', get_user_from_url, ('20', 'earthscience.stackexchange.com')),
('http://codegolf.stackexchange.com/users/9275/programfox', get_user_from_url, ('9275', 'codegolf.stackexchange.com')),
('http://stackoverflow.com/users/1/jeff-atwood', get_user_from_url, ('1', 'stackoverflow.com')),
('http://mathoverflow.net/users/66/ben-webster', get_user_from_url, ('66', 'mathoverflow.net')),
('http://codegolf.stackexchange.com/u/9275', get_user_from_url, ('9275', 'codegolf.stackexchange.com')),
('http://codegolf.stackexchange.com/u/9275/', get_user_from_url, ('9275', 'codegolf.stackexchange.com')),
('http://codegolf.stackexchange.com/users/9275', get_user_from_url, ('9275', 'codegolf.stackexchange.com')),
('http://codegolf.stackexchange.com/users/9275/', get_user_from_url, ('9275', 'codegolf.stackexchange.com')),
('//stackoverflow.com/users/1/jeff-atwood', get_user_from_url, ('1', 'stackoverflow.com')),
('!!/addblu http://stackoverflow.com/users/0/test', get_user_from_list_command, ('0', 'stackoverflow.com')),
('!!/rmblu http://stackoverflow.com/users/0/test', get_user_from_list_command, ('0', 'stackoverflow.com')),
('!!/addwlu http://stackoverflow.com/users/0/test', get_user_from_list_command, ('0', 'stackoverflow.com')),
('!!/rmwlu http://stackoverflow.com/users/0/test', get_user_from_list_command, ('0', 'stackoverflow.com')),
('!!/addwlu http://codegolf.stackexchange.com/users/9275/programfox', get_user_from_list_command, ('9275', 'codegolf.stackexchange.com')),
('!!/addwlu http://mathoverflow.net/users/66/ben-webster', get_user_from_list_command, ('66', 'mathoverflow.net')),
('!!/rmblu 1234 stackoverflow.com', get_user_from_list_command, ('1234', 'stackoverflow.com')),
('!!/rmwlu 4321 communitybuilding.stackexchange.com', get_user_from_list_command, ('4321', 'communitybuilding.stackexchange.com')),
('!!/addblu 1 stackoverflow', get_user_from_list_command, ('1', 'stackoverflow.com')),
('http://stackoverflow.com/questions/1/title-here', url_to_shortlink, 'http://stackoverflow.com/questions/1'),
('http://stackoverflow.com/questions/1/title-here/2#2', url_to_shortlink, 'http://stackoverflow.com/a/2'),
('http://writers.stackexchange.com/questions/1/%2f%2f', url_to_shortlink, 'http://writers.stackexchange.com/questions/1'),
('http://writers.stackexchange.com/questions/1/%2f%2f/2#2', url_to_shortlink, 'http://writers.stackexchange.com/a/2'),
('http://mathoverflow.net/q/1', url_to_shortlink, 'http://mathoverflow.net/questions/1'),
('http://stackexchange.com', to_protocol_relative, '//stackexchange.com'),
('https://stackexchange.com', to_protocol_relative, '//stackexchange.com'),
('//stackexchange.com', to_protocol_relative, '//stackexchange.com'),
('sd 2tpu', preprocess_shortcut_command, 'sd tpu tpu'),
('sd - 3tpu fp', preprocess_shortcut_command, 'sd - tpu tpu tpu fp'),
('sd 3- 2fp', preprocess_shortcut_command, 'sd - - - fp fp'),
('sd tpu fp ignore delete', preprocess_shortcut_command, 'sd tpu fp ignore delete'),
('sd 5-', preprocess_shortcut_command, 'sd - - - - -'),
('sd tpu', preprocess_shortcut_command, 'sd tpu'),
('sd 2 tpu', preprocess_shortcut_command, 'sd tpu tpu'),
('sd fp 3 tpu', preprocess_shortcut_command, 'sd fp tpu tpu tpu'),
(test_data_inputs[0], fetch_post_id_and_site_from_msg_content, ('246651', 'meta.stackexchange.com', 'question')),
(test_data_inputs[0], fetch_owner_url_from_msg_content, 'http://meta.stackexchange.com/users/279263/lisa-usher'),
(test_data_inputs[0], fetch_title_from_msg_content, 'Best Weight Loss Tips For Fast Results'),
(test_data_inputs[1], fetch_post_url_from_msg_content, 'http://stackoverflow.com/questions/0/test-test'),
(test_data_inputs[1], fetch_post_id_and_site_from_msg_content, ('0', 'stackoverflow.com', 'question')),
(test_data_inputs[1], fetch_owner_url_from_msg_content, 'http://stackoverflow.com/users/0/test-test'),
(test_data_inputs[1], fetch_title_from_msg_content, 'TEST TEST TEST ]]])))'),
(test_data_inputs[2], fetch_post_url_from_msg_content, 'http://stackoverflow.com/questions/0/test-test/42#42'),
(test_data_inputs[2], fetch_post_id_and_site_from_msg_content, ('42', 'stackoverflow.com', 'answer')),
(test_data_inputs[2], fetch_owner_url_from_msg_content, 'http://stackoverflow.com/users/0/test-test'),
(test_data_inputs[3], fetch_post_id_and_site_from_msg_content, ('27954020', 'stackoverflow.com', 'question')),
(test_data_inputs[3], fetch_owner_url_from_msg_content, 'http://stackoverflow.com/users/3754535/user3754535'),
(test_data_inputs[3], fetch_title_from_msg_content, "Why I can't insert data in a model from a custom controller?"),
(test_data_inputs[4], fetch_post_id_and_site_from_msg_content, ('27954020', 'stackoverflow.com', 'question')),
(test_data_inputs[4], fetch_owner_url_from_msg_content, None),
(test_data_inputs[4], fetch_title_from_msg_content, "Why I can't insert data in a model from a custom controller?"),
(test_data_inputs[5], fetch_post_id_and_site_from_msg_content, ('246651', 'meta.stackexchange.com', 'question')),
(test_data_inputs[5], fetch_owner_url_from_msg_content, 'http://meta.stackexchange.com/users/279263/lisa-usher'),
(test_data_inputs[5], fetch_title_from_msg_content, 'Best Weight Loss Tips For Fast Results'),
(test_data_inputs[6], fetch_post_url_from_msg_content, 'http://stackoverflow.com/q/0'),
(test_data_inputs[6], fetch_post_id_and_site_from_msg_content, ('0', 'stackoverflow.com', 'question')),
(test_data_inputs[6], fetch_owner_url_from_msg_content, 'http://stackoverflow.com/users/0/test-test'),
(test_data_inputs[6], fetch_title_from_msg_content, 'TEST TEST TEST ]]])))'),
(test_data_inputs[7], fetch_post_url_from_msg_content, 'http://stackoverflow.com/a/42'),
(test_data_inputs[7], fetch_post_id_and_site_from_msg_content, ('42', 'stackoverflow.com', 'answer')),
(test_data_inputs[7], fetch_owner_url_from_msg_content, 'http://stackoverflow.com/users/0/test-test'),
(test_data_inputs[8], fetch_post_id_and_site_from_msg_content, ('27954020', 'stackoverflow.com', 'question')),
(test_data_inputs[8], fetch_owner_url_from_msg_content, 'http://stackoverflow.com/users/3754535/user3754535'),
(test_data_inputs[8], fetch_title_from_msg_content, "Why I can't insert data in a model from a custom controller?"),
(test_data_inputs[9], fetch_post_id_and_site_from_msg_content, ('27954020', 'stackoverflow.com', 'question')),
(test_data_inputs[9], fetch_owner_url_from_msg_content, None),
(test_data_inputs[9], fetch_title_from_msg_content, "Why I can't insert data in a model from a custom controller?"),
(test_data_inputs[10], fetch_post_id_and_site_from_msg_content, ('27954020', 'stackoverflow.com', 'question')),
(test_data_inputs[10], fetch_owner_url_from_msg_content, '//stackoverflow.com/users/3754535/user3754535'),
(test_data_inputs[10], fetch_title_from_msg_content, "Why I can't insert data in a model from a custom controller?"),
(test_data_inputs[11], fetch_post_id_and_site_from_msg_content, ('27954020', 'stackoverflow.com', 'question')),
(test_data_inputs[11], fetch_owner_url_from_msg_content, '//stackoverflow.com/users/3754535/user3754535'),
(test_data_inputs[11], fetch_title_from_msg_content, "Why I can't insert data in a model from a custom controller?"),
(test_data_inputs[12], fetch_post_id_and_site_from_msg_content, ('458053', 'ru.stackoverflow.com', 'question')),
(test_data_inputs[12], fetch_owner_url_from_msg_content, '//ru.stackoverflow.com/users/20555/ni55an'),
(test_data_inputs[12], fetch_title_from_msg_content, '-----------------------------'),
(test_data_inputs[13], fetch_post_id_and_site_from_msg_content, ('458053', 'ru.stackoverflow.com', 'question')),
(test_data_inputs[13], fetch_owner_url_from_msg_content, '//ru.stackoverflow.com/users/20555/ni55an'),
(test_data_inputs[13], fetch_title_from_msg_content, '-----------------------------'),
(test_data_inputs[14], fetch_post_id_and_site_from_msg_content, ('27954020', 'stackoverflow.com', 'question')),
(test_data_inputs[14], fetch_owner_url_from_msg_content, '//stackoverflow.com/users/3754535/user3754535'),
(test_data_inputs[14], fetch_title_from_msg_content, "Why I can't insert data in a model from a custom controller?"),
(test_data_inputs[15], fetch_post_id_and_site_from_msg_content, ('27954020', 'stackoverflow.com', 'question')),
(test_data_inputs[15], fetch_owner_url_from_msg_content, '//stackoverflow.com/users/3754535/user3754535'),
(test_data_inputs[15], fetch_title_from_msg_content, "Why I can't insert data in a model from a custom controller?")
])
def test_parsing(input_data, parse_method, expected):
assert parse_method(input_data.strip()) == expected
|
[
"programfox@hotmail.be"
] |
programfox@hotmail.be
|
babc707cd3d0eba6a38dddd6d813e56e3a003465
|
044c3ad600995b80aee0a8733cc518f0e5177f85
|
/Accounts1/scheduler/migrations/0002_auto_20160214_0355.py
|
fab102ae95cf5a54239757e332c6d6ab84102643
|
[] |
no_license
|
connieli4/WeddingApp
|
83c6fdb96c22be33b69f2c874df914fe22ee4ab6
|
b87a1905ec2268a405bae2d949922a3f190d89a0
|
refs/heads/master
| 2020-04-01T12:17:31.522920
| 2018-12-14T23:36:45
| 2018-12-14T23:36:45
| 153,200,142
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='User',
),
]
|
[
"noreply@github.com"
] |
connieli4.noreply@github.com
|
319088b0fb5a6e5458a9fe0225ff0b44d1d38204
|
eb656e579b59f93fdef965d1fe303f766fad19bd
|
/scripts/EpsilonResnetBase.py
|
92d3cd6a1117f7b4e0a7ed57c9416aba90218b3b
|
[] |
no_license
|
yuxwind/epsilonResnet
|
f8f961251f7f45ff63ed7c5ce14afd7935031e89
|
eaf5e33b767b31d60dd528b850faed919aadadc5
|
refs/heads/master
| 2021-06-26T17:22:41.644909
| 2020-09-27T22:57:11
| 2020-09-27T22:57:11
| 133,599,192
| 27
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: EpsilonResnetBase.py
# Author: Xin Yu <yuxwind@gmail.com>
import sys
sys.path.append('../../tensorpack')
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
import tensorflow as tf
"""
Implementation of strict identity and side output in the following paper:
Learning Strict Identity Mappings in Deep Residual Networks
(https://arxiv.org/pdf/1804.01661.pdf)
"""
# implement sparsity promting function with 4 ReLUs
# Usually, l is a 4 dimension tensor: Batch_size X Width X Height X Channel
# return 0.0 only if the absolute values of all elemenents in l are smaller than EPSILON
def strict_identity(l, EPSILON):
add_moving_summary(tf.reduce_max(tf.abs(l), name='response_abs_max'))
add_moving_summary(tf.reduce_mean(tf.abs(l), name='response_mean_max'))
l = tf.to_float(l)
s = tf.reduce_max(tf.nn.relu(l - EPSILON) +\
tf.nn.relu(-l - EPSILON))
identity_w = tf.nn.relu(tf.nn.relu(s * (-1000000) + 1.0) * (-1000000) + 1.0)
return identity_w
# implement side supervision at the intermediate of the network
# get cross entropy loss after layer l
def side_output(name, l, label, outdim):
prefix = 'side_output/'+name
with tf.variable_scope(prefix) as scope:
l = BNReLU('bnlast', l)
l = GlobalAvgPooling('gap', l)
logits = FullyConnected('linear', l, out_dim=outdim, nl=tf.identity)
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = prediction_incorrect(logits, label)
# monitor training error
add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
return cost
|
[
"yuxwind@gmail.com"
] |
yuxwind@gmail.com
|
d1ea3793a74e4332420872c9502e50e122bf9b35
|
481389bd83c40bf153d9fbad904926282ebd0cfe
|
/pylib/_mypkg_foo/__init__.py
|
494a71d652ce909f16585dab7b756602d8d0552b
|
[] |
no_license
|
leisquare/study_bigdata
|
e1cd952df484c4595d8892fef779226a31ec2ea2
|
f0a048e8b3a6ffc4507efe7ee2df40cb1ac4bb34
|
refs/heads/main
| 2023-05-06T16:09:45.903354
| 2021-05-29T07:23:36
| 2021-05-29T07:23:36
| 371,617,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 16:46:19 2021
@author: tjoeun
"""
print("_mypkg_foo loaded")
|
[
"75233145+leisquare@users.noreply.github.com"
] |
75233145+leisquare@users.noreply.github.com
|
bff3c146e033147360882ee7d2fd2224e7febb18
|
f66473c5f184e7e7fe047aed7b2902338bd9a12d
|
/app/util/model.py
|
7af3e43521093e9a8905ce2faccebe67d9f5eb7e
|
[
"LicenseRef-scancode-mulanpsl-2.0-en",
"MulanPSL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
nacei/h3blog-master
|
7a220cf549fb869d602ba421e4bfd5e1b17bd2bb
|
e433c93f56be63883538b4e03198e6e4e056ecb6
|
refs/heads/master
| 2023-07-12T05:47:30.845678
| 2021-08-20T13:23:03
| 2021-08-20T13:23:03
| 398,283,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
from flask import request
def get_obj_fields(obj):
"""获取模型对象的表字段, obj或model均可"""
if obj is None:
return []
return [column.name for column in obj.__table__.columns]
def request_form_auto_fill(model) -> None:
data = request.form.to_dict()
if data is not None:
data = {key: value for key, value in data.items()
if key in get_obj_fields(model)}
[setattr(model, key, value) for key, value in data.items()]
def get_request_valid_data(obj):
data = request.get_json()
if data is not None:
data = {key: value for key, value in request.get_json().items()
if key in get_obj_fields(obj)}
return data
|
[
"hanxj@foxmail.com"
] |
hanxj@foxmail.com
|
a08ec483cb563c809d060d26db913d295ae9ee97
|
d7aaa8b5ccb20cd4ed9ba81c52d276191d52afad
|
/aws_nsm_interface/structs.py
|
de665057427b3f492599e4b3fa5e5aca6a69dd68
|
[
"MIT"
] |
permissive
|
russelmahmud/aws-nsm-interface
|
edf550ddcfa903f7c8becf27154d2b3d15b0fbb1
|
ab80440abb72d2c69c755c377f72d064b0514d07
|
refs/heads/main
| 2023-02-16T03:51:33.793457
| 2021-01-19T05:22:40
| 2021-01-19T05:22:40
| 330,871,762
| 0
| 0
|
MIT
| 2021-01-19T05:16:30
| 2021-01-19T05:16:29
| null |
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
"""Structs for NSM API."""
# Standard library imports
import ctypes
class IoVec(ctypes.Structure):
"""
IoVec struct for use in the NsmMessage struct.
The IoVec struct has two fields: iov_base, which is a pointer to a buffer,
and iov_len, which defines the length of the contents in the buffer.
The IoVec is used both to send data to /dev/nsm (in which case the length
of the buffer is defined by the sender) and to receive data from /dev/nsm
(in which case the length is set by /dev/nsm).
"""
iov_base: ctypes.c_void_p
iov_len: ctypes.c_size_t
_fields_ = [
('iov_base', ctypes.c_void_p),
('iov_len', ctypes.c_size_t)
]
class NsmMessage(ctypes.Structure):
"""
NsmMessage struct to interface with /dev/nsm.
The NsmMessage struct has two fields: request, which contains the data
sent to /dev/nsm, and response, which contains the data returned by /dev/nsm
after the call has completed.
"""
request: IoVec
response: IoVec
_fields_ = [
('request', IoVec),
('response', IoVec)
]
|
[
"lucvandonkersgoed@gmail.com"
] |
lucvandonkersgoed@gmail.com
|
7d721c03caa26629e29120c9c88caf4b817914fe
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/codeInsight/smartEnter/colonAfterFinalCaseClauseWithPrecedingIncompleteCaseClause.py
|
ff245238744da24b5bebf8391bf5e8c4d1ab488c
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
match x:
case 1:
pass
case
case 3:
pass
case<caret>
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
6aa7e3d975d5bf066350200717a911882e17e7eb
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02572/s151845218.py
|
31aa5234e9d20d7b4ae01fd2cf130eac5d0d9908
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
N = int(input()) #入力する整数
A = list(map(int,input().split())) #入力する数列A
SUMA = sum(A) #数列の和
MOD = 10**9 + 7 # mod
C = [0] * (N-1) #累積和数列
for i in range(N-1): #\sum_{j = i+1}^{N}を求めて数列に代入する
SUMA -= A[i]
C[i] = SUMA
ans = 0 #求める答え
for i in range(N-1):
ans += A[i]*C[i]
ans %= MOD #その都度modで割った余りにする
print(ans) #答えを出力する
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
421db7ccdd9e41909059dc08bf8ce7640d20deee
|
65f469808cd1d408524ff70ff42d2a8f7276e805
|
/swagger-gen/python/swagger_client/models/order_res_base.py
|
ec564b12fb79e83230663a35133b7dc7b76bd787
|
[] |
no_license
|
CryptoGnome/api-connectors
|
94cabc32b8af7d9a35d72336eb4e8f919adc69ad
|
c4f5dba196b0d2bb321b5a068481f34d02ae40fa
|
refs/heads/master
| 2020-09-26T23:00:49.861037
| 2019-12-06T08:38:15
| 2019-12-06T08:38:15
| 226,361,891
| 5
| 0
| null | 2019-12-06T15:52:50
| 2019-12-06T15:52:49
| null |
UTF-8
|
Python
| false
| false
| 6,300
|
py
|
# coding: utf-8
"""
Bybit API
## REST API for the Bybit Exchange. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@bybit.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OrderResBase(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ret_code': 'float',
'ret_msg': 'str',
'ext_code': 'str',
'ext_info': 'str',
'result': 'object',
'time_now': 'str'
}
attribute_map = {
'ret_code': 'ret_code',
'ret_msg': 'ret_msg',
'ext_code': 'ext_code',
'ext_info': 'ext_info',
'result': 'result',
'time_now': 'time_now'
}
def __init__(self, ret_code=None, ret_msg=None, ext_code=None, ext_info=None, result=None, time_now=None): # noqa: E501
"""OrderResBase - a model defined in Swagger""" # noqa: E501
self._ret_code = None
self._ret_msg = None
self._ext_code = None
self._ext_info = None
self._result = None
self._time_now = None
self.discriminator = None
if ret_code is not None:
self.ret_code = ret_code
if ret_msg is not None:
self.ret_msg = ret_msg
if ext_code is not None:
self.ext_code = ext_code
if ext_info is not None:
self.ext_info = ext_info
if result is not None:
self.result = result
if time_now is not None:
self.time_now = time_now
@property
def ret_code(self):
"""Gets the ret_code of this OrderResBase. # noqa: E501
:return: The ret_code of this OrderResBase. # noqa: E501
:rtype: float
"""
return self._ret_code
@ret_code.setter
def ret_code(self, ret_code):
"""Sets the ret_code of this OrderResBase.
:param ret_code: The ret_code of this OrderResBase. # noqa: E501
:type: float
"""
self._ret_code = ret_code
@property
def ret_msg(self):
"""Gets the ret_msg of this OrderResBase. # noqa: E501
:return: The ret_msg of this OrderResBase. # noqa: E501
:rtype: str
"""
return self._ret_msg
@ret_msg.setter
def ret_msg(self, ret_msg):
"""Sets the ret_msg of this OrderResBase.
:param ret_msg: The ret_msg of this OrderResBase. # noqa: E501
:type: str
"""
self._ret_msg = ret_msg
@property
def ext_code(self):
"""Gets the ext_code of this OrderResBase. # noqa: E501
:return: The ext_code of this OrderResBase. # noqa: E501
:rtype: str
"""
return self._ext_code
@ext_code.setter
def ext_code(self, ext_code):
"""Sets the ext_code of this OrderResBase.
:param ext_code: The ext_code of this OrderResBase. # noqa: E501
:type: str
"""
self._ext_code = ext_code
@property
def ext_info(self):
"""Gets the ext_info of this OrderResBase. # noqa: E501
:return: The ext_info of this OrderResBase. # noqa: E501
:rtype: str
"""
return self._ext_info
@ext_info.setter
def ext_info(self, ext_info):
"""Sets the ext_info of this OrderResBase.
:param ext_info: The ext_info of this OrderResBase. # noqa: E501
:type: str
"""
self._ext_info = ext_info
@property
def result(self):
"""Gets the result of this OrderResBase. # noqa: E501
:return: The result of this OrderResBase. # noqa: E501
:rtype: object
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this OrderResBase.
:param result: The result of this OrderResBase. # noqa: E501
:type: object
"""
self._result = result
@property
def time_now(self):
"""Gets the time_now of this OrderResBase. # noqa: E501
:return: The time_now of this OrderResBase. # noqa: E501
:rtype: str
"""
return self._time_now
@time_now.setter
def time_now(self, time_now):
"""Sets the time_now of this OrderResBase.
:param time_now: The time_now of this OrderResBase. # noqa: E501
:type: str
"""
self._time_now = time_now
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OrderResBase, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrderResBase):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"simonzgx@gmail.com"
] |
simonzgx@gmail.com
|
22921a548069e69a88ed036d29bfcc83ba21a4ef
|
cd208b4a40be8bf166da79fdc126dbcb71e95a7d
|
/app/states/state_expiration_date.py
|
71b993036530fc25dce01aef9c241befa95b0714
|
[
"MIT"
] |
permissive
|
Moirted/MyPersonalKitchenBot
|
63a2b1be6e21e90ed908c9f3162bd085162cd83f
|
03de0beeaf2665e8b3ddd1709da3d4edcd422b80
|
refs/heads/main
| 2023-04-21T12:17:52.486113
| 2021-05-16T13:00:22
| 2021-05-16T13:00:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
from aiogram.dispatcher.filters.state import State, StatesGroup
class ExpirationDateState(StatesGroup):
number = State()
|
[
"nickshel@yandex.ru"
] |
nickshel@yandex.ru
|
d58122e4b9eb67fb24fb0ea49da965e4144d324c
|
8944fdc8b6fc8ab8aac7d7a69b30630dded3aac8
|
/BTS.py
|
8968a9487d0b9c1562178dd5cb964d3481cbd60a
|
[] |
no_license
|
d4nielix/BTS_simulator
|
ae62b67f3d5f355ca454afcc0fabefdf9e2b42a3
|
3858732b1d8552861e2cafaf0cd3b0e4741e3f42
|
refs/heads/master
| 2023-08-14T17:32:50.518556
| 2021-10-06T06:50:02
| 2021-10-06T06:50:02
| 414,099,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,498
|
py
|
from ResourceBlock import ResourceBlock as rb
from User import User
from typing import List
import logging
# from numpy.random import randint, exponential
import time
import matplotlib.pyplot as plt
from Generator import Generator
gnrt = Generator()
def nonzero(p: int):
tmp = round(gnrt.rndexp(p))
while tmp <= 0:
tmp = round(gnrt.rndexp(p))
return tmp
class BTS:
def __init__(self, epsilon_: float, k_: int, s_: int, l_: int, logs: logging, iterations: bool, runtime: int,
clock: int):
self.log: logging.Logger = logs.getChild(__name__)
self.iterations = iterations
self.epsilon: float = epsilon_
self.k: int = k_
self.taken_ks: int = 0
self.s: int = s_
self.l: int = l_
self.tau: float = gnrt.randminmax(1, 10)
self.lambda1 = 2
self.lambda2 = 2
self.t: float = nonzero(self.lambda1)
self.t2: float = nonzero(self.lambda2)
self.cycles: int = 0
self.user_list: List[User] = list()
self.iterations: bool
self.users_served: int = 0
self.users_counter: int = 0
self.runtime = runtime * 1000
self.clock = clock
self.user_avg_time = 0
self.transmitted_data = 0
self.wait_list = []
self.user_throughput = 0
self.throughput_list = []
self.retransmissions_time = 0
self.retransmissions_time_list = []
self.retransmitted_data = 0
self.retransmissions_counter = 0
self.log.log(msg='BTS has been created', level=1)
def iteration(self):
self.log.log(msg='Number of users in user list: ' + str(len(self.user_list)), level=1)
if not self.cycles % self.t:
self.user_add()
if not self.cycles % self.t2:
self.user_add()
if not self.cycles % self.s and self.user_list:
self.distribute_resources()
self.log.log(msg='Resources has been distributed.', level=1)
if not self.cycles % self.tau and self.user_list:
self.users_update_bitrate()
self.log.log(msg='Bitrate has been updated.', level=1)
for user in self.user_list:
for rb in user.user_rb_list:
if rb.issent:
user.data -= rb.bitrate
self.transmitted_data += rb.bitrate
self.user_throughput += rb.bitrate
self.log.log(msg='Data has been transmitted.', level=1)
else:
self.retransmissions_counter += 1
self.retransmissions_time = time.time()
rb.rb_issent()
self.retransmitted_data += rb.bitrate
self.retransmissions_time_list.append(time.time() - self.retransmissions_time)
self.retransmissions_time = 0
self.log.log(msg='Updating "issent"', level=1)
if user.data <= 0:
self.users_served += 1
self.taken_ks -= len(user.user_rb_list)
self.user_avg_time += time.time() - user.start_time
self.wait_list.append(time.time() - user.start_time)
self.throughput_list.append(self.user_throughput)
self.user_throughput = 0
self.user_remove(user)
self.cycles += self.clock
def run(self):
logging.getLogger('matplotlib.font_manager').disabled = True
now = time.localtime()
sim_start = time.strftime("%H:%M:%S", now)
start_time = time.time()
while self.cycles < self.runtime:
self.iteration()
if self.iterations:
c = input("Press ENTER for next step, or type 'quit' to quit the simulator.")
if c == "quit":
break
print("Simulation started: " + sim_start)
sim_time = round((time.time() - start_time), 2)
print("Simulation took: %s seconds." % sim_time)
print("Users at all: " + str(self.users_counter))
print("Users served: " + str(self.users_served))
print("Ratio of served users to users at all: " + str(
round(self.users_served / self.users_counter * 100, 2)) + "%")
print("Finished cycles: " + str(self.cycles))
print("Average wait time for being served: %s ms." % round(self.user_avg_time / self.users_served * 1000, 2))
print("Average system throughput: " + str(round(self.transmitted_data / sim_time, 2)))
print("Average user throughput: " + str(round(self.transmitted_data / sim_time / self.users_served, 2)))
print("Average retransmissions counter: " + str(round(self.retransmissions_counter / self.users_counter, 2)))
print("Average user retransmitted data: " + str(round(self.retransmitted_data / self.users_counter, 2)))
plt.title('Average wait time for being served')
plt.xlabel('Average wait time')
plt.ylabel('Count')
plt.hist(self.wait_list, 10)
plt.show()
plt.close()
plt.title('Average user throughput')
plt.xlabel('Average user throughput')
plt.ylabel('Count')
plt.hist(self.throughput_list, bins=10)
plt.show()
plt.close()
plt.title('Average retransmissions time')
plt.xlabel('Average retransmissions time')
plt.ylabel('Count')
plt.plot(self.retransmissions_time_list)
# plt.show()
# plt.close()
with open("simdata.csv", "a", newline='') as simdata:
simdata.write(str(sim_start) + ";" + str(sim_time) + ";" + str(self.users_counter) + ";" + str(
self.users_served) + ";" + str(
round(self.users_served / self.users_counter * 100, 2)) + "%;" + str(self.cycles) + ";" + str(
round(self.user_avg_time / self.users_served * 1000, 2)) + ";" + str(
round(self.transmitted_data / sim_time, 2)) + ";" + str(
round(self.transmitted_data / sim_time / self.users_served, 2)) + ";" + str(
round(self.retransmissions_counter / self.users_counter, 2)) + ";" + str(
round(self.retransmitted_data / self.users_counter, 2)) + "\r\n")
# with open("lambdas.csv", "a", newline='') as lambdas: lambdas.write(str(self.lambda1) + ";" + str(
# self.lambda2) + ";" + str(round(self.user_avg_time / self.users_served * 1000, 2)) + "\r\n")
with open("throughput.csv", "a", newline='') as throughputfile:
throughputfile.write(str(self.throughput_list) + "\r\n")
with open("waittime.csv", "a", newline='') as waittime:
waittime.write(str(self.wait_list) + "\r\n")
def user_add(self):
self.user_list.append(User(logs=self.log, _epsilon=self.epsilon))
self.log.log(msg='New user has appeared', level=1)
self.users_counter += 1
def users_update_bitrate(self):
for user in self.user_list:
user.rb_update()
def user_remove(self, user: User):
self.user_list.remove(user)
self.log.log(msg='User has been deleted', level=1)
def distribute_resources(self):
for user in self.user_list:
if not user.has_rb():
for _ in range(self.l):
if self.taken_ks < self.k:
user.rb_add_to_list(rb(_epsilon=self.epsilon, logs=user.log))
self.taken_ks += 1
|
[
"daniel.cichosz1@gmail.com"
] |
daniel.cichosz1@gmail.com
|
79c19d888d893e972115162a390efd937500f92b
|
90f39575e1164e928359dd9afb602999bf68a71c
|
/valuenode.py
|
b747c749b403c752a05962ee9c650c90a253b6e9
|
[] |
no_license
|
sachinjose/Programming_Language
|
bcf4cbaa147f236b29be4b97936d3540b6e399fe
|
1d5749e7339a95b25ce37a93987b447e7e46e85c
|
refs/heads/main
| 2023-06-01T23:56:39.824175
| 2021-06-17T04:39:55
| 2021-06-17T04:39:55
| 374,157,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,381
|
py
|
from strings_with_arrows import *
import string
import os
import math
import constants
from error import *
from position import *
from lexer import *
from nodes import *
from rtresult import *
########################################################
## Value
########################################################
##for storing numbers and operating on them with other numbers.
class Value:
def __init__(self):
self.set_pos()
self.set_context()
def set_pos(self, pos_start = None, pos_end = None): ## if we face an error we need to know where the error is in
self.pos_start = pos_start
self.pos_end = pos_end
return self
def set_context(self, context=None):##Context for error handling
self.context = context
return self
def added_to(self, other):
return None, self.illegal_operation(other)
def subbed_by(self, other):
return None, self.illegal_operation(other)
def multed_by(self, other):
return None, self.illegal_operation(other)
def dived_by(self, other):
return None, self.illegal_operation(other)
def powed_by(self, other):
return None, self.illegal_operation(other)
def get_comparison_eq(self, other):
return None, self.illegal_operation(other)
def get_comparison_ne(self, other):
return None, self.illegal_operation(other)
def get_comparison_lt(self, other):
return None, self.illegal_operation(other)
def get_comparison_gt(self, other):
return None, self.illegal_operation(other)
def get_comparison_lte(self, other):
return None, self.illegal_operation(other)
def get_comparison_gte(self, other):
return None, self.illegal_operation(other)
def anded_by(self, other):
return None, self.illegal_operation(other)
def ored_by(self, other):
return None, self.illegal_operation(other)
def notted(self):
return None, self.illegal_operation(other)
def execute(self, args):
return RTResult().failure(self.illegal_operation())
def copy(self):
raise Exception('No copy method defined')
def is_true(self):
return False
def illegal_operation(self, other=None):
if not other:
other = self
return RTError(self.pos_start, other.pos_end,'Illegal operation',self.context)
class String(Value):
def __init__(self, value):
super().__init__()
self.value = value
def added_to(self, other): ##concatenate
if isinstance(other, String):
return String(self.value + other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def multed_by(self, other):##repeat the string other.values number of time
if isinstance(other, Number):
return String(self.value * other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def is_true(self):
return len(self.value) > 0
def copy(self):
copy = String(self.value)
copy.set_pos(self.pos_start, self.pos_end)
copy.set_context(self.context)
return copy
def __str__(self):
return self.value
def __repr__(self):
return f'"{self.value}"'
class Number(Value):
def __init__(self,value):
self.value = value
self.set_pos()
self.set_context()
def added_to(self,other):
if isinstance(other, Number): ##check if the value that we are operating on is a number
return Number(self.value + other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def subbed_by(self,other):
if isinstance(other, Number): ##check if the value that we are operating on is a number
return Number(self.value - other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def multed_by(self,other):
if isinstance(other, Number): ##check if the value that we are operating on is a number
return Number(self.value * other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def dived_by(self,other):
if isinstance(other, Number): ##check if the value that we are operating on is a number
if other.value == 0:
return None,
return Number(self.value / other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def powed_by(self, other):
if isinstance(other, Number): ##return poer
return Number(self.value ** other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_eq(self, other):
if isinstance(other, Number): ## comparison operator ==
return Number(int(self.value == other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_ne(self, other):
if isinstance(other, Number): ## comparison for !=
return Number(int(self.value != other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lt(self, other):
if isinstance(other, Number): ##compairon for <
return Number(int(self.value < other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gt(self, other):
if isinstance(other, Number): ##comparion for >
return Number(int(self.value > other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lte(self, other):
if isinstance(other, Number): ##comparison for less than or equal to <=
return Number(int(self.value <= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gte(self, other):
if isinstance(other, Number): ##comparison for greater than or equal to >=
return Number(int(self.value >= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def anded_by(self, other):
if isinstance(other, Number): ##comparison for and
return Number(int(self.value and other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def ored_by(self, other):
if isinstance(other, Number): ##comparison for or
return Number(int(self.value or other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def notted(self): ##comparison for not function
if self.value == 0:
return Number(1).set_context(self.context), None
else :
return Number(0).set_context(self.context), None
def is_true(self):
return self.value != 0
def copy(self):
copy = Number(self.value)
copy.set_pos(self.set_pos,self.pos_end)
copy.set_context(self.context)
return copy
def __repr__(self):
return str(self.value)
Number.null = Number(0)
Number.true = Number(1)
Number.false = Number(0)
Number.math_PI = Number(math.pi)
class BaseFunction(Value):
def __init__(self, name):
super().__init__()
self.name = name or "<anonymous>" ##anonymous if it doesnt have a name
def generate_new_context(self): ##new context for new function
new_context = Context(self.name, self.context, self.pos_start)
new_context.symbol_table = SymbolTable(new_context.parent.symbol_table)
return new_context
def check_args(self, arg_names, args): ##check if there are the correct numbr of arguments
res = RTResult()
if len(args) > len(arg_names):
return res.failure(RTError(self.pos_start, self.pos_end,f"{len(args) - len(arg_names)} too many args passed into {self}",self.context))
if len(args) < len(arg_names):
return res.failure(RTError(self.pos_start, self.pos_end,f"{len(arg_names) - len(args)} too few args passed into {self}",self.context))
return res.success(None)
def populate_args(self, arg_names, args, exec_ctx): ##put all arguments to symbol table
for i in range(len(args)):
arg_name = arg_names[i]
arg_value = args[i]
arg_value.set_context(exec_ctx)
exec_ctx.symbol_table.set(arg_name, arg_value)
def check_and_populate_args(self, arg_names, args, exec_ctx): ##check the args and populate them
res = RTResult()
res.register(self.check_args(arg_names, args))
if res.error:
return res
self.populate_args(arg_names, args, exec_ctx)
return res.success(None)
class Function(BaseFunction):
def __init__(self, name, body_node, arg_names, should_auto_return):
super().__init__(name)
self.body_node = body_node
self.arg_names = arg_names
self.should_auto_return = should_auto_return
def execute(self, args): ##execute functions
res = RTResult()
interpreter = Interpreter()
exec_ctx = self.generate_new_context()
res.register(self.check_and_populate_args(self.arg_names, args, exec_ctx))
if res.error:
return res
value = res.register(interpreter.visit(self.body_node, exec_ctx))
if res.should_return() and res.func_return_value == None: return res
ret_value = (value if self.should_auto_return else None) or res.func_return_value or Number.null
return res.success(ret_value)
def copy(self):
copy = Function(self.name, self.body_node, self.arg_names, self.should_auto_return)
copy.set_context(self.context)
copy.set_pos(self.pos_start, self.pos_end)
return copy
def __repr__(self):
return f"<function {self.name}>"
class BuiltInFunction(BaseFunction):
def __init__(self, name):
super().__init__(name)
def execute(self, args):
res = RTResult()
exec_ctx = self.generate_new_context() ##create new exec context
method_name = f'execute_{self.name}' ##create seperate function for every
method = getattr(self, method_name, self.no_visit_method)
res.register(self.check_and_populate_args(method.arg_names, args, exec_ctx))
if res.should_return():
return res
return_value = res.register(method(exec_ctx))
if res.should_return():
return res
return res.success(return_value)
def no_visit_method(self, node, context): ## if method ist defined
raise Exception(f'No execute_{self.name} method defined')
def copy(self):
copy = BuiltInFunction(self.name)
copy.set_context(self.context)
copy.set_pos(self.pos_start, self.pos_end)
return copy
def __repr__(self):
return f"<built-in function {self.name}>"
#####################################
def execute_print(self, exec_ctx):
print(str(exec_ctx.symbol_table.get('value'))) ##print from symbol table
return RTResult().success(Number.null)
execute_print.arg_names = ['value'] ## we get the arg_name methods from the method
def execute_print_ret(self, exec_ctx):
return RTResult().success(String(str(exec_ctx.symbol_table.get('value')))) ##return value that should be printed
execute_print_ret.arg_names = ['value']
def execute_input(self, exec_ctx): ##take inpute
text = input()
return RTResult().success(String(text))
execute_input.arg_names = []
def execute_input_int(self, exec_ctx):
while True:
text = input()
try:
number = int(text)
break
except ValueError:
print(f"'{text}' must be an integer. Try again!")
return RTResult().success(Number(number))
execute_input_int.arg_names = []
def execute_clear(self, exec_ctx):
os.system('cls' if os.name == 'nt' else 'cls') ##clear the terminal
return RTResult().success(Number.null)
execute_clear.arg_names = []
def execute_is_number(self, exec_ctx):
is_number = isinstance(exec_ctx.symbol_table.get("value"), Number)
return RTResult().success(Number.true if is_number else Number.false)
execute_is_number.arg_names = ["value"]
def execute_is_string(self, exec_ctx):
is_number = isinstance(exec_ctx.symbol_table.get("value"), String)
return RTResult().success(Number.true if is_number else Number.false)
execute_is_string.arg_names = ["value"]
def execute_is_list(self, exec_ctx):
is_number = isinstance(exec_ctx.symbol_table.get("value"), List)
return RTResult().success(Number.true if is_number else Number.false)
execute_is_list.arg_names = ["value"]
def execute_is_function(self, exec_ctx):
is_number = isinstance(exec_ctx.symbol_table.get("value"), BaseFunction)
return RTResult().success(Number.true if is_number else Number.false)
execute_is_function.arg_names = ["value"]
def execute_append(self, exec_ctx):
list_ = exec_ctx.symbol_table.get("list")
value = exec_ctx.symbol_table.get("value")
if not isinstance(list_, List):
return RTResult().failure(RTError(self.pos_start, self.pos_end,"First argument must be list",exec_ctx))
list_.elements.append(value)
return RTResult().success(Number.null)
execute_append.arg_names = ["list", "value"]
def execute_pop(self, exec_ctx):
list_ = exec_ctx.symbol_table.get("list")
index = exec_ctx.symbol_table.get("index")
if not isinstance(list_, List):
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
"First argument must be list",
exec_ctx
))
if not isinstance(index, Number):
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
"Second argument must be number",
exec_ctx
))
try:
element = list_.elements.pop(index.value)
except:
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
'Element at this index could not be removed from list because index is out of bounds',
exec_ctx
))
return RTResult().success(element)
execute_pop.arg_names = ["list", "index"]
def execute_extend(self, exec_ctx):
listA = exec_ctx.symbol_table.get("listA")
listB = exec_ctx.symbol_table.get("listB")
if not isinstance(listA, List):
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
"First argument must be list",
exec_ctx
))
if not isinstance(listB, List):
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
"Second argument must be list",
exec_ctx
))
listA.elements.extend(listB.elements)
return RTResult().success(Number.null)
execute_extend.arg_names = ["listA", "listB"]
def execute_len(self, exec_ctx): ##length of a list
list_ = exec_ctx.symbol_table.get("list")
if not isinstance(list_, List):
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
"Argument must be list",
exec_ctx
))
return RTResult().success(Number(len(list_.elements)))
execute_len.arg_names = ["list"]
def execute_run(self, exec_ctx):
fn = exec_ctx.symbol_table.get("fn") ##get file name from symbol table
if not isinstance(fn, String):##raise error if it isnt a string
return RTResult().failure(RTError(self.pos_start, self.pos_end,"Second argument must be string",exec_ctx))
fn = fn.value
try:
with open(fn, "r") as f: ##open file in readmode and assign it to variable f
script = f.read() ##script content of faile
except Exception as e:
return RTResult().failure(RTError(self.pos_start, self.pos_end,f"Failed to load script \"{fn}\"\n" + str(e),exec_ctx))
_, error = run(fn, script)
if error:
return RTResult().failure(RTError(self.pos_start, self.pos_end,f"Failed to finish executing script \"{fn}\"\n" +error.as_string(),exec_ctx))
return RTResult().success(Number.null)
execute_run.arg_names = ["fn"]
BuiltInFunction.print = BuiltInFunction("print")
BuiltInFunction.print_ret = BuiltInFunction("print_ret")
BuiltInFunction.input = BuiltInFunction("input")
BuiltInFunction.input_int = BuiltInFunction("input_int")
BuiltInFunction.clear = BuiltInFunction("clear")
BuiltInFunction.is_number = BuiltInFunction("is_number")
BuiltInFunction.is_string = BuiltInFunction("is_string")
BuiltInFunction.is_list = BuiltInFunction("is_list")
BuiltInFunction.is_function = BuiltInFunction("is_function")
BuiltInFunction.append = BuiltInFunction("append")
BuiltInFunction.pop = BuiltInFunction("pop")
BuiltInFunction.extend = BuiltInFunction("extend")
BuiltInFunction.len = BuiltInFunction("len")
BuiltInFunction.run = BuiltInFunction("run")
class List(Value):
def __init__(self, elements):
super().__init__()
self.elements = elements
def added_to(self, other):
new_list = self.copy()
new_list.elements.append(other)
return new_list, None
def subbed_by(self, other):
if isinstance(other, Number):
new_list = self.copy()
try: ##if the element doesnt exist
new_list.elements.pop(other.value)
return new_list, None
except:
return None, RTError(other.pos_start, other.pos_end,'Element at this index could not be removed from list because index is out of bounds',self.context)
else:
return None, Value.illegal_operation(self, other)
def dived_by(self, other):
if isinstance(other, Number):
try:
return self.elements[other.value], None
except:
return None, RTError(other.pos_start, other.pos_end,'Element at this index could not be retrieved from list because index is out of bounds',self.context)
else:
return None, Value.illegal_operation(self, other)
def copy(self):
copy = List(self.elements)
copy.set_pos(self.pos_start, self.pos_end)
copy.set_context(self.context)
return copy
def __repr__(self):
return f'[{", ".join([str(x) for x in self.elements])}]'
|
[
"sachinjose16@gmail.com"
] |
sachinjose16@gmail.com
|
d13d6cb9808efd0098b69557877ebcab3b1fb584
|
94a3afadfc4d89cf19cca0ee966007ae09f7f81c
|
/lnk.py
|
01c62bc706a7ba3f4163aae85cb7187a76a58f2b
|
[] |
no_license
|
devendermathu/Rahul-Amantya
|
db354ffa3d70d924030988bccf61c41b251e9662
|
70d3a2b56d2b5b604a0af7dfc8a2b1b5dd5e6d91
|
refs/heads/main
| 2023-04-17T08:58:47.469047
| 2021-04-18T12:55:18
| 2021-04-18T12:55:18
| 323,076,878
| 0
| 0
| null | 2021-04-18T13:07:28
| 2020-12-20T13:21:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,238
|
py
|
from bs4 import BeautifulSoup as bs
from requests import Session
import csv
from lxml import html
s = Session()
s.headers['User-Agent']='Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:82.0) Gecko/20100101 Firefox/82.0'
f = open('fnl_lnk', 'r',encoding='utf-8').read().split('\n')
fw = open('toyoto_data.csv','w',encoding='utf-8')
erro_lnk = open('erro_links.txt','w',encoding='utf-8')
# l = ['4RUNNER(2)','ALLEX(58)','bB(351)','C-HR(210)','DUET(3)','ESQUIRE(168)','FJ CRUISER(46)','GAIA(1)','HARRIER(543)','IPSUM(74)','KLUGER(52)','LAND CRUISER(237)','MAJESTA(1)','NADIA(2)','OPA(1)','PASSO(692)','RACTIS(598)','SAI(133)','TANK(31)','URBAN CRUISER(1)','VANGUARD(130)','WILL CYPHA(11)','YARIS(43)']
# url = 'https://www.sbtjapan.com/used-cars/toyota/{}}#listbox'
# url = "https://www.sbtjapan.com/used-cars/nissan/note/?model_code=&steering=all&drive=0&year_f=&month_f=&year_t=&month_t=&price_f=&price_t=&cc_f=0&cc_t=0&mile_f=0&mile_t=0&trans=0&savel=0&saveu=0&fuel=0&color=0&bodyLength=0&loadClass=0&engineType=0&truck_size=&location=&port=0&search_box=1&sold=&p_years=&bid_code=&pdate_f=&pdate_t=&locationIds=0&stock_ids=&d_country=76&d_port=119&ship_type=0&FreightChk=yes¤cy=2&insurance=1&sort=0&psize=100&custom_search=&p_num={}#listbox"
# url = 'https://www.sbtjapan.com/used-cars/toyota/86#listbox'
row = csv.writer(fw)
for i in f[5000:]:
try:
r = s.get(i)
# soup = bs(r.content,'html,parser')
tree = html.fromstring(r.content)
full_name = ''.join(tree.xpath('//h1//text()')).strip()
full2_name = ''.join(tree.xpath('//ul[@class="title"]//p//text()')).strip()
tran = ''.join(tree.xpath("""//th[contains(string(),"Transmission:")]//following-sibling::td//text()""")[0]).strip().replace('\n','')
year = ''.join(tree.xpath("""//th[contains(string(),"Year:")]//following-sibling::td//text()""")).strip().replace('\n','')
location = ''.join(tree.xpath("""//th[contains(string(),"Location:")]//following-sibling::td//text()""")).strip().replace('\n','')
drive = ''.join(tree.xpath("""//th[contains(string(),"Drive:")]//following-sibling::td//text()""")[0]).strip().replace('\n','')
doors = ''.join(tree.xpath("""//th[contains(string(),"Door:")]//following-sibling::td//text()""")).strip().replace('\n','')
steering = ''.join(tree.xpath("""//th[contains(string(),"Steering:")]//following-sibling::td//text()""")[0]).strip().replace('\n','')
seats = ''.join(tree.xpath("""//th[contains(string(),"Seats:")]//following-sibling::td//text()""")).strip().replace('\n','')
engine_type = ''.join(tree.xpath("""//th[contains(string(),"Engine Type:")]//following-sibling::td//text()""")[0]).strip().replace('\n','')
drive_chain = ''.join(tree.xpath("""//th[contains(string(),"Body Type:")]//following-sibling::td//text()""")).strip().replace('\n','')
fuel = ''.join(tree.xpath("""//th[contains(string(),"Fuel:")]//following-sibling::td//text()""")[0]).strip().replace('\n','')
mileage = ''.join(tree.xpath("""//th[contains(string(),"Mileage:")]//following-sibling::td//text()""")).strip().replace('\n','')
cars_weight = ''.join(tree.xpath("""//th[contains(string(),"Gross Vehicle Weight:")]//following-sibling::td//text()""")).strip().replace('\n','')
max_cars_weight = ''.join(tree.xpath("""//th[contains(string(),"Max Loading Capacity:")]//following-sibling::td//text()""")).strip().replace('\n','')
color = ''.join(tree.xpath("""//th[contains(string(),"Color:")]//following-sibling::td//text()""")).strip().replace('\n','')
model_code = ''.join(tree.xpath("""//th[contains(string(),"Model Code:")]//following-sibling::td//text()""")[0]).strip().replace('\n','')
row.writerow([
full_name,
full2_name,
tran,
year,
location,
drive,
doors,
steering,
seats,
engine_type,
drive_chain,
fuel,
mileage,
cars_weight,
max_cars_weight,
color,
model_code,
i
])
print(i)
except:
print("exeption!!")
erro_lnk.write(i+'\n')
pass
|
[
"rahulmath846@gmail.com"
] |
rahulmath846@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.