blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0179e7a8a04e4b16368086eadecffb7dd7768d15
|
d51010a7f51a9cb8bf307f7d6ebed8a9903cd7be
|
/backend/base/urls/product_urls.py
|
6bf8f12f47dc186906b94797e0489eb0facebea2
|
[] |
no_license
|
seidiv/ecommerce
|
d435fed53187316baf944f54632e7579372ea075
|
b5c7de1f635ec2f12213dbbe6367f890465f2f7b
|
refs/heads/master
| 2023-07-13T19:30:14.831156
| 2021-08-24T06:25:01
| 2021-08-24T06:25:01
| 392,608,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
from django.urls import path
from base.views import product_views as views
urlpatterns = [
path('', views.getProducts, name="products"),
path('create/', views.createProduct, name="product-create"),
path('upload/', views.uploadImage, name="image-upload"),
path('<str:pk>/reviews/', views.createProductReview, name="create-review"),
path('top/', views.getTopProducts, name='top-products'),
path('<str:pk>/', views.getProduct, name="product"),
path('update/<str:pk>/', views.updateProduct, name="product-update"),
path('delete/<str:pk>/', views.deleteProduct, name="product-delete"),
]
|
[
"sajadeydi8@gmail.com"
] |
sajadeydi8@gmail.com
|
6900fdaae92eb7e538bb2dc5b81957fb00c5b18e
|
b7449f1162b5fb8ea371b80ef0d99154fac35620
|
/Users/migrations/0001_initial.py
|
bf5f8dbe0ee72f9f6b0b3fab5414812eb9576641
|
[] |
no_license
|
shimaa3434/SafeBook
|
93f69e5228adeae33adfb5a21d2c666b47d1a2b6
|
8ede2f9da4f6daf224fe203454525ff3d811ed51
|
refs/heads/master
| 2022-12-27T02:01:14.987227
| 2020-10-16T18:12:49
| 2020-10-16T18:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
# Generated by Django 2.2.5 on 2019-10-23 00:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('current_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='owner', to=settings.AUTH_USER_MODEL)),
('users', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"30625967+Shreyansh7499@users.noreply.github.com"
] |
30625967+Shreyansh7499@users.noreply.github.com
|
2d26d02539e1e4894e20095347999b7d5f48a0bd
|
bbe96b7552494b6baf4e84d6cd84fe9cb8192eb8
|
/src/data/data_class.py
|
074a6af2b213eb5e5033db83c39726fa314db432
|
[
"MIT"
] |
permissive
|
FFFinale/DeepFeatureIV
|
889fe63a5b38f92474cbf7845c9a2a920406dcdd
|
54b04e9e9e4c88d4859ea65d34ceb69dd1b58bc2
|
refs/heads/master
| 2022-12-29T16:24:50.134938
| 2020-10-17T06:26:55
| 2020-10-17T06:26:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,588
|
py
|
from typing import NamedTuple, Optional
import numpy as np
import torch
class TrainDataSet(NamedTuple):
treatment: np.ndarray
instrumental: np.ndarray
covariate: Optional[np.ndarray]
outcome: np.ndarray
structural: np.ndarray
class TestDataSet(NamedTuple):
treatment: np.ndarray
covariate: Optional[np.ndarray]
structural: np.ndarray
class TrainDataSetTorch(NamedTuple):
treatment: torch.Tensor
instrumental: torch.Tensor
covariate: torch.Tensor
outcome: torch.Tensor
structural: torch.Tensor
@classmethod
def from_numpy(cls, train_data: TrainDataSet):
covariate = None
if train_data.covariate is not None:
covariate = torch.tensor(train_data.covariate, dtype=torch.float32)
return TrainDataSetTorch(treatment=torch.tensor(train_data.treatment, dtype=torch.float32),
instrumental=torch.tensor(train_data.instrumental, dtype=torch.float32),
covariate=covariate,
outcome=torch.tensor(train_data.outcome, dtype=torch.float32),
structural=torch.tensor(train_data.structural, dtype=torch.float32))
def to_gpu(self):
covariate = None
if self.covariate is not None:
covariate = self.covariate.cuda()
return TrainDataSetTorch(treatment=self.treatment.cuda(),
instrumental=self.instrumental.cuda(),
covariate=covariate,
outcome=self.outcome.cuda(),
structural=self.structural.cuda())
class TestDataSetTorch(NamedTuple):
treatment: torch.Tensor
covariate: torch.Tensor
structural: torch.Tensor
@classmethod
def from_numpy(cls, test_data: TestDataSet):
covariate = None
if test_data.covariate is not None:
covariate = torch.tensor(test_data.covariate, dtype=torch.float32)
return TestDataSetTorch(treatment=torch.tensor(test_data.treatment, dtype=torch.float32),
covariate=covariate,
structural=torch.tensor(test_data.structural, dtype=torch.float32))
def to_gpu(self):
covariate = None
if self.covariate is not None:
covariate = self.covariate.cuda()
return TestDataSetTorch(treatment=self.treatment.cuda(),
covariate=covariate,
structural=self.structural.cuda())
|
[
""
] | |
07770f3574d74405c9660790d89873ae61cebd92
|
b2e2277208f22fdd1654e7a2a19d49a0bdcb0ef6
|
/twitterstream3.py
|
0e5a9245d51619a2176e62ef1002a82e392e7b3c
|
[] |
no_license
|
13537875570/General-Urban-Evaluation
|
504d3fa3c32f69940c454f13ac401be12d3d03ea
|
513922d01d5b23ba9244f3704dab5d0793ecf165
|
refs/heads/master
| 2020-10-02T10:25:24.572538
| 2019-12-13T05:19:05
| 2019-12-13T05:19:05
| 227,756,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import time
consumerkey='qgEq1bQqaPBtE9MUe9iXjel5J'
consumersecret='gZOzN5oQswfcfqkdTzLd49DgibiCKdVNY2hYuzQakwX4GYCnIR'
accesstoken='2780294182-MvbzCoYYsdiCgr5I2tzT9FSbqObkQhaYfbNlSA9'
accesssecret='kR7TQ3yNkCkArHVwrzxgNUUjGelDejEfJBocMB0gw2ke1'
class listener(StreamListener):
def on_data(self,data):
try:
if 'virginia' in data:
print (data)
saveFile=open('twitDB3.csv','a')
saveFile.write(data)
saveFile.write('\n')
saveFile.close()
return True
except BaseException (e):
print ('failed ondata,') ,str(e)
time.sleep(5)
def on_error(self,status):
print (status)
auth=OAuthHandler(consumerkey,consumersecret)
auth.set_access_token(accesstoken,accesssecret)
twitterstream=Stream(auth,listener())
twitterstream.filter(track=["car"])
|
[
"noreply@github.com"
] |
noreply@github.com
|
2da5ce9852293d22aeae8c7605f8082ca24e70ee
|
1ba58b17f33122abf4236e9e430a51d375e0eb53
|
/km73/Zeleniy_Dmytro/4/task9.py
|
8b6465f8fc2d6fdbe15585e505253054fa9dbeed
|
[] |
no_license
|
igortereshchenko/amis_python
|
c4f8d86b88ab036d08ff0ce35c9b42ebeabecc42
|
c6f0f2a70c82d5f269b3078eb296f82271b5bb10
|
refs/heads/master
| 2021-10-22T16:21:19.990650
| 2017-11-01T07:26:54
| 2017-11-01T07:26:54
| 104,785,028
| 0
| 139
| null | 2020-04-21T21:27:09
| 2017-09-25T18:11:42
|
Python
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
start_row = int(input("Enter start row: "))
start_column = int(input("Enter start column: "))
finish_row = int(input("Enter finish row: "))
finish_column = int(input("Enter finish column: "))
if (start_row > 0 and start_row <= 8
and start_column > 0 and start_column <= 8
and finish_row > 0 and finish_row <= 8
and finish_column > 0 and finish_column <= 8):
if (abs(start_row - start_column) == abs(finish_row - finish_column)
or (start_column + start_row) == (finish_column + finish_row)):
answer = "Yes"
else:
answer = "No"
else:
answer = "NOT CORRET DATA!"
print(answer)
|
[
"dzeleniy9@gmail.com"
] |
dzeleniy9@gmail.com
|
df82e709433df0b153edd7d9aea14060851ad2cf
|
c31c8095ce4d4e9686e3e7ad6b004342e49671fa
|
/forum/classes/archives/CLASS_Lieu.py
|
c5b8db114583e2f045264fd8b45f2735706e116e
|
[] |
no_license
|
Lionalisk/arrakambre
|
7bcc96dea2ca2a471572bfb1646256f1382ce25b
|
2caece9be5eebf21ddfa87a6c821c32b5d5019a2
|
refs/heads/master
| 2020-12-07T19:31:24.471090
| 2020-01-09T10:14:29
| 2020-01-09T10:14:29
| 232,782,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
from django.db import models
from forum.models import Maison
from forum.classes.CLASS_Perso import *
print('BBBB')
class Lieu(models.Model):
nom = models.CharField(max_length=100, unique=True)
description = models.TextField(default='')
image = models.CharField(max_length=40, default = 'lieu_none.jpg')
maison = models.ForeignKey(Maison, verbose_name="Maison", null=True, on_delete=models.SET_NULL, blank=True)
passages = models.ManyToManyField('self', blank=True)
lieu_parent = models.ForeignKey('self', verbose_name="Lieu", null=True, on_delete=models.DO_NOTHING, blank=True)
dissimulation = models.SmallIntegerField(default=0)
defense_garde = models.SmallIntegerField(default=0)
defense_assault = models.SmallIntegerField(default=0)
defense_intrusion = models.SmallIntegerField(default=0)
perso_autorise = models.ManyToManyField('Perso', blank=True, related_name = 'persos_autorises') # liste des personnes autorisees par le maitre des lieux a entrer
secret = models.BooleanField(default=False)
proprietaire = models.ForeignKey('Perso', null=True, on_delete=models.SET_NULL, blank=True, related_name = 'proprietaire')
#action =
def __str__(self):
return self.nom
|
[
"lionel.varaire@free.fr"
] |
lionel.varaire@free.fr
|
e3a72bd363e5d37a2c58181444706b9ab6a4d68f
|
6170d8451dffcbf0b0a3d5606b33ab4467070640
|
/Python/SCRIPTS/Python/UPDATA/host_seedfile.py
|
c6df67df9f72f0cd44fdc4e8b81b7ffb5d7cca35
|
[] |
no_license
|
jb26444/lazynet
|
5ab848d998e0ddb23dc8362596ac06b47c0315cb
|
6a39ed09e36e5deeca21714ce133f938dec7bf3d
|
refs/heads/master
| 2021-05-01T16:18:25.804995
| 2018-03-17T21:39:24
| 2018-03-17T21:39:24
| 121,050,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#
#
# Add the host addresses you want to log into
#
#network_devices = ['x.x.x.1', 'x.x.x.2', 'x.x.x.3', 'x.x.x.4']
#
#network_devices = ['10.205.205.1', '10.205.205.2', '10.205.205.3', '10.205.205.4', '10.205.205.5', '10.205.205.6','10.205.205.7','10.205.205.8', '10.205.205.9', '10.205.205.10']
network_devices = ['10.205.7.10', '10.205.7.11']
|
[
"jan.blahuta@gmail.com"
] |
jan.blahuta@gmail.com
|
25358c69ffe54ebd92674d7e9523c7316fbd6bfc
|
aa86784b429c01778342dc60e29d3b3a1356d481
|
/Exam2675.py
|
18930967c09b0007213753c803bc5e3288396149
|
[] |
no_license
|
kayjayk/algorithm-solution
|
7997be6143c84a72ab9ff3bb59d03e9037bc1b29
|
e529b7aa1cfa2c261a3adaedc2d8c3809003bf39
|
refs/heads/master
| 2022-12-10T17:53:17.033617
| 2020-09-05T17:19:18
| 2020-09-05T17:19:18
| 269,100,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
import sys
T = int(sys.stdin.readline())
for i in range(T):
tmp = sys.stdin.readline().split(sep=' ')
R = int(tmp[0])
S = tmp[1].rstrip()
P = ''
for j in range(len(S)):
P += S[j]*R
print(P)
|
[
"noreply@github.com"
] |
noreply@github.com
|
fca24cecd75975e7ff41a3ea139467d5f9774921
|
7177a8a9eb2030fa871f19f36144b7d055d5c5b3
|
/main.py
|
e19f7da7cfa27a35074ea8d14b9b789db4f37925
|
[] |
no_license
|
Ziyu98/YOLOv3
|
5efb2bc809917041093cf61bfb7d52acbacb9fd7
|
4dfe50cf4a83bf0dde83ec3de8f6995461d6ce12
|
refs/heads/master
| 2021-01-06T19:26:56.256263
| 2020-02-18T20:32:17
| 2020-02-18T20:32:17
| 241,459,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,454
|
py
|
from __future__ import division, print_function
import tensorflow as tf
import numpy as np
import argparse
import cv2
import time
import shapely.geometry as sg
import shapely.ops as so
import math
import os
from utils.misc_utils import parse_anchors, read_class_names
from utils.nms_utils import gpu_nms
from utils.plot_utils import get_color_table, plot_one_box
from utils.data_aug import letterbox_resize
from shapely.geometry import Polygon
from model import yolov3
parser = argparse.ArgumentParser(description="YOLO-V3 video test procedure.")
parser.add_argument("input_video", type=str,
help="The path of the input video.")
parser.add_argument("--anchor_path", type=str, default="./data/yolo_anchors.txt",
help="The path of the anchor txt file.")
parser.add_argument("--new_size", nargs='*', type=int, default=[416, 416],
help="Resize the input image with `new_size`, size format: [width, height]")
parser.add_argument("--letterbox_resize", type=lambda x: (str(x).lower() == 'true'), default=True,
help="Whether to use the letterbox resize.")
parser.add_argument("--class_name_path", type=str, default="./data/coco.names",
help="The path of the class names.")
parser.add_argument("--restore_path", type=str, default="./data/darknet_weights/yolov3.ckpt",
help="The path of the weights to restore.")
parser.add_argument("--save_video", type=lambda x: (str(x).lower() == 'true'), default=False,
help="Whether to save the video detection results.")
args = parser.parse_args()
args.anchors = parse_anchors(args.anchor_path)
args.classes = read_class_names(args.class_name_path)
args.num_class = len(args.classes)
color_table = get_color_table(args.num_class)
vid = cv2.VideoCapture(args.input_video)
video_frame_cnt = int(vid.get(7))
video_width = int(vid.get(3))
video_height = int(vid.get(4))
video_fps = int(vid.get(5))
if args.save_video:
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
videoWriter = cv2.VideoWriter('video_result.mp4', fourcc, video_fps, (video_width, video_height))
#if os.path.exists("percentage.txt"):
# os.remove("percentage.txt")
#if os.path.exists("info_black_width_100_v1.txt"):
# os.remove("info_black_width_100_v1.txt")
with tf.Session() as sess:
input_data = tf.placeholder(tf.float32, [1, args.new_size[1], args.new_size[0], 3], name='input_data')
yolo_model = yolov3(args.num_class, args.anchors)
with tf.variable_scope('yolov3'):
l1, l3, l5, l7, l9, l11, f_m_1, f_m_2, f_m_3 = yolo_model.forward(input_data, False)
pred_feature_maps = f_m_1, f_m_2, f_m_3
pred_boxes, pred_confs, pred_probs = yolo_model.predict(pred_feature_maps)
pred_scores = pred_confs * pred_probs
boxes, scores, labels = gpu_nms(pred_boxes, pred_scores, args.num_class, max_boxes=200, score_thresh=0.3, nms_thresh=0.45)
saver = tf.train.Saver()
saver.restore(sess, args.restore_path)
#fileper=open("percentage.txt","a")
info_new=open("verify_file.txt","a")
for i in range(video_frame_cnt):
ret, img_ori = vid.read()
height_ori, width_ori = img_ori.shape[:2]
size=height_ori*width_ori
if args.letterbox_resize:
img, resize_ratio, dw, dh = letterbox_resize(img_ori, args.new_size[0], args.new_size[1])
else:
height_ori, width_ori = img_ori.shape[:2]
img = cv2.resize(img_ori, tuple(args.new_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.asarray(img, np.float32)
img = img[np.newaxis, :] / 255.
start_time = time.time()
filen1=open('res_n1/n1_{}.txt'.format(i+1),'a')
filen3=open('res_n3/n3_{}.txt'.format(i+1),'a')
filen5=open('res_n5/n5_{}.txt'.format(i+1),'a')
filer1=open('res_r1/r1_{}.txt'.format(i+1),'a')
filer2=open('res_r2/r2_{}.txt'.format(i+1),'a')
filer3=open('res_r3/r3_{}.txt'.format(i+1),'a')
filef1=open('res_f1/f1_{}.txt'.format(i+1),'a')
filef2=open('res_f2/f2_{}.txt'.format(i+1),'a')
filef3=open('res_f3/f3_{}.txt'.format(i+1),'a')
print("********",i,"-th frame")
n1, n3, n5, r1, r2, r3, f1, f2, f3 = sess.run([l1, l3, l5, l7, l9, l11, f_m_1, f_m_2, f_m_3],feed_dict={input_data: img})
f_total = f1, f2, f3
data1=n1[0]
filen1.write('# Array shape: {0}'.format(data1.shape))
for data_slice in data1:
np.savetxt(filen1,data_slice,fmt='%.3f')
filen1.write('# New slice')
data3=n3[0]
filen3.write('# Array shape: {0}'.format(data3.shape))
for data_slice in data3:
np.savetxt(filen3,data_slice,fmt='%.3f')
filen3.write('# New slice')
data5=n5[0]
filen5.write('# Array shape: {0}'.format(data5.shape))
for data_slice in data5:
np.savetxt(filen5,data_slice,fmt='%.3f')
filen5.write('# New slice')
data7=r1[0]
filer1.write('# Array shape: {0}'.format(data7.shape))
for data_slice in data7:
np.savetxt(filer1,data_slice,fmt='%.3f')
filer1.write('# New slice')
data9=r2[0]
filer2.write('# Array shape: {0}'.format(data9.shape))
for data_slice in data9:
np.savetxt(filer2,data_slice,fmt='%.3f')
filer2.write('# New slice')
data11=r3[0]
filer3.write('# Array shape: {0}'.format(data11.shape))
for data_slice in data11:
np.savetxt(filer3,data_slice,fmt='%.3f')
filer3.write('# New slice')
data_f1=f1[0]
filef1.write('# Array shape: {0}'.format(data_f1.shape))
for data_slice in data_f1:
np.savetxt(filef1,data_slice,fmt='%.3f')
filef1.write('# New slice')
data_f2=f2[0]
filef2.write('# Array shape: {0}'.format(data_f2.shape))
for data_slice in data_f2:
np.savetxt(filef2,data_slice,fmt='%.3f')
filef2.write('# New slice')
data_f3=f3[0]
filef3.write('# Array shape: {0}'.format(data_f3.shape))
for data_slice in data_f3:
np.savetxt(filef3,data_slice,fmt='%.3f')
filef3.write('# New slice')
filen1.close()
filen3.close()
filen5.close()
filer1.close()
filer2.close()
filer3.close()
filef1.close()
filef2.close()
filef3.close()
boxes_, scores_, labels_ = sess.run([boxes, scores, labels], feed_dict={input_data: img})
#boxes_, scores_, labels_ = [], [] ,[] #sess.run([boxes, scores, labels], feed_dict={input_data: img})
end_time = time.time()
# rescale the coordinates to the original image
if args.letterbox_resize:
boxes_[:, [0, 2]] = (boxes_[:, [0, 2]] - dw) / resize_ratio
boxes_[:, [1, 3]] = (boxes_[:, [1, 3]] - dh) / resize_ratio
else:
boxes_[:, [0, 2]] *= (width_ori/float(args.new_size[0]))
boxes_[:, [1, 3]] *= (height_ori/float(args.new_size[1]))
boxes_[boxes_< 0] = 0
count=i+1
#get information on boxes
res=np.arange(len(labels_)*7).reshape(len(labels_), 7)
res=res.astype(np.float32)
res[:,0]=np.around(np.ones(len(labels_))*count,decimals=0)
res[:,1]=np.around(labels_,decimals=0)
res[:,2]=np.around(scores_,decimals=3)
res[:,3:7]=np.around(boxes_,decimals=3)
#print(res)
np.savetxt(info_new,res,fmt='%.3f')
#height_ori, width_ori = img_ori.shape[:2]
#print("Loop Time:", (end_time_loop - start_time_loop) * 1000)
#print("scores:")
#print(scores_)
"""print(r1)"""
"""for i in range(len(boxes_)):
x0, y0, x1, y1 = boxes_[i]
plot_one_box(img_ori, [x0, y0, x1, y1], label=args.classes[labels_[i]] + ', {:.2f}%'.format(scores_[i] * 100), color=color_table[labels_[i]])
cv2.putText(img_ori, '{:.2f}ms'.format((end_time - start_time) * 1000), (40, 40), 0,
fontScale=1, color=(0, 255, 0), thickness=2)
cv2.imshow('image', img_ori)"""
if args.save_video:
videoWriter.write(img_ori)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#fileper.close()
info_new.close()
vid.release()
if args.save_video:
videoWriter.release()
|
[
"noreply@github.com"
] |
noreply@github.com
|
c04720b7f2c90ddef000767741021aff00156ee6
|
f05a08881b606d593bb76fa725d62187fb8e6cc0
|
/cache_ensembl/cache_ensembl_version.py
|
ddb8c6109f3c0db85deb10e5082eaa4b9b65cad7
|
[] |
no_license
|
bunbun/cache-ensembl
|
6cf109dd0a9f6dad15744d4583ab701f7bda5a35
|
02ce50016321fecb5f9f784c63ce4f8e5066d74b
|
refs/heads/master
| 2021-01-23T13:58:36.493124
| 2011-12-06T21:45:04
| 2011-12-06T21:45:04
| 32,793,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
#!/usr/bin/env python
################################################################################
#
# version.py
#
#
# Copyright (c) 11/3/2010 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
__version__ = "1.0"
|
[
"bunbun68@localhost"
] |
bunbun68@localhost
|
13fc2d742161aea7d1a51f351cac30e21bcd181e
|
172eb751b879d36d95b04d81db87a501cd18d8a1
|
/ImageNetGroundTruth/utils.py
|
ade17f70a1d804e602c97056b666102575e5f3e0
|
[] |
no_license
|
byh1321/Pruning_Quantization_Estimation
|
447bd3d806fe17611d665e56d7796af4e05ee400
|
772969c5a58259e387c88829dd936274199212e8
|
refs/heads/master
| 2023-05-03T19:25:29.957732
| 2021-06-03T17:53:38
| 2021-06-03T17:53:38
| 289,804,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,298
|
py
|
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import scipy.misc
from scipy import ndimage
import numpy as np
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 40.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
##########################################################################
# Codes under this line is written by YH.Byun
def print_4Dtensor_to_png(tensor, filename):
npimg = np.array(tensor,dtype=float)
npimg = npimg.squeeze(0)
scipy.misc.toimage(npimg).save(filename+".png")
def genblurkernel(sigma):
order = 0
radius = int(4 * float(sigma) + 0.5)
kernel = scipy.ndimage.filters._gaussian_kernel1d(sigma, order, radius)
return kernel
def setMask(net, area, val):
mask = maskGen(net)
for i in range(len(mask)):
num_filter = mask[i].size()[0]
depth = mask[i].size()[1]
if len(mask[i].size()) == 2:
if i == (len(mask)-1):
mask[i][:,0:int(depth*area)] = val
#print(mask[i].size())
#print('0, ',depth*area)
else:
mask[i][0:int(num_filter*area),0:int(depth*area)] = val
#print(mask[i].size())
#print(num_filter*area,',',depth*area)
elif len(mask[i].size()) == 4:
if i == 0:
mask[i][0:int(num_filter*area),:,:,:] = val
#print(mask[i].size())
#print(num_filter*area,',0,0,0')
else:
mask[i][0:int(num_filter*area),0:int(depth*area),:,:] = val
#print(mask[i].size())
#print(num_filter*area,',',depth*area,',0,0')
return mask
def saveInitialParameter(net, initparam):
net_param = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
net_param.append(m.weight.data)
elif isinstance(m, nn.Linear):
net_param.append(m.weight.data)
torch.save(net_param, initparam)
print("saving initial parameters")
def quantize(net, pprec):
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.round(m.weight.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.weight.data = torch.clamp(m.weight.data, -2, 2 - 2**(-pprec))
elif isinstance(m, nn.Linear):
m.weight.data = torch.round(m.weight.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.weight.data = torch.clamp(m.weight.data, -2, 2 - 2**(-pprec))
return net
def printLayers(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
print(m)
elif isinstance(m, nn.Linear):
print(m)
def maskGen(net, isbias=0, isempty = 1):
mask = []
if isempty:
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append(torch.zeros(m.weight.data.size()))
if isbias == 1:
mask.append(torch.zeros(m.bias.data.size()))
#print(torch.zeros(m.weight.data.size()).size())
elif isinstance(m, nn.Linear):
mask.append(torch.zeros(m.weight.data.size()))
if isbias == 1:
mask.append(torch.zeros(m.bias.data.size()))
#print(torch.zeros(m.weight.data.size()).size())
else:
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append(torch.ones(m.weight.data.size()))
if isbias == 1:
mask.append(torch.ones(m.bias.data.size()))
#print(torch.ones(m.weight.data.size()).size())
elif isinstance(m, nn.Linear):
mask.append(torch.ones(m.weight.data.size()))
if isbias == 1:
mask.append(torch.zeros(m.bias.data.size()))
#print(torch.ones(m.weight.data.size()).size())
return mask
def pruneNetwork(net, mask):
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.grad.data = torch.mul(m.weight.grad.data,mask[index].cuda())
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.grad.data = torch.mul(m.weight.grad.data,mask[index].cuda())
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
return net
def paramsGet(net):
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
if index == 0:
params = m.weight.view(-1,)
index += 1
else:
params = torch.cat((params,m.weight.view(-1,)),0)
index += 1
elif isinstance(m, nn.Linear):
params = torch.cat((params,m.weight.view(-1,)),0)
index += 1
return params
def findThreshold(params, pr):
thres=0
while 1:
tmp = (torch.abs(params.data)<thres).type(torch.FloatTensor)
result = torch.sum(tmp)/params.size()[0]
if (pr/100)<result:
#print("threshold : {}".format(thres))
return thres
else:
thres += 0.0001
#def findThreshold(params, pr):
# params_sorted, indice = torch.sort(params)
# index = int(pr * params.size()[0] / 100)
# print(params_sorted[13228760])
# print(params.size())
# print(index)
# return params_sorted[index].item()
def getPruningMask(net, thres):
index = 0
mask = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append((torch.abs(m.weight.data)>thres).type(torch.FloatTensor))
index += 1
elif isinstance(m, nn.Linear):
mask.append((torch.abs(m.weight.data)>thres).type(torch.FloatTensor))
index += 1
return mask
def netMaskMul(net, mask, isbias=0, isbatch=0):
index = 0
if isbatch:
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
m.bias.data = torch.mul(m.bias.data,mask[index].cuda())
index += 1
else:
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.mul(m.bias.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.mul(m.bias.data,mask[index].cuda())
index += 1
return net
def addNetwork(net, net2, isbias=0):
index = 0
mask = saveNetwork(net2, isbias)
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
return net
def netMaskAdd(net, mask, isbias=0, isbatch=0):
index = 0
if isbatch:
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
else:
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
return net
def saveNetwork(net, isbias=0):
mask = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append(m.weight.data)
if isbias:
mask.append(m.bias.data)
elif isinstance(m, nn.Linear):
mask.append(m.weight.data)
if isbias:
mask.append(m.bias.data)
return mask
def saveBatch(net, isempty=0):
mask = []
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
if isempty:
mask.append(torch.zeros(m.weight.size()))
mask.append(torch.zeros(m.bias.size()))
else:
mask.append(m.weight.data)
mask.append(m.bias.data)
return mask
def printLayerName(net):
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
print(index, " : Conv2d layer, ", m.weight.size())
index += 1
elif isinstance(m, nn.Linear):
print(index, " : FC layer, ", m.weight.size())
index += 1
elif isinstance(m, nn.BatchNorm2d):
print(index, " : BatchNorm2d layer, ", m.weight.size())
index += 1
return net
def freezeNetwork(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
for param in m.parameters():
param.requires_grad = False
elif isinstance(m, nn.Linear):
for param in m.parameters():
param.requires_grad = False
elif isinstance(m, nn.BatchNorm2d):
for param in m.parameters():
param.requires_grad = False
return net
def absorb_bn(module, bn_module):
w = module.weight.data
if module.bias is None:
zeros = torch.Tensor(module.out_channels).zero_().type(w.type())
module.bias = nn.Parameter(zeros)
b = module.bias.data
invstd = bn_module.running_var.clone().add_(bn_module.eps).pow_(-0.5)
w.mul_(invstd.view(w.size(0), 1, 1, 1).expand_as(w))
b.add_(-bn_module.running_mean).mul_(invstd)
if bn_module.affine:
w.mul_(bn_module.weight.data.view(w.size(0), 1, 1, 1).expand_as(w))
b.mul_(bn_module.weight.data).add_(bn_module.bias.data)
bn_module.register_buffer('running_mean', torch.zeros(module.out_channels).cuda())
bn_module.register_buffer('running_var', torch.ones(module.out_channels).cuda())
bn_module.register_parameter('weight', None)
bn_module.register_parameter('bias', None)
bn_module.affine = False
def is_bn(m):
return isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)
def is_absorbing(m):
return isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear)
def search_absorbe_bn(model):
prev = None
for m in model.children():
if is_bn(m) and is_absorbing(prev):
m.absorbed = True
absorb_bn(prev, m)
search_absorbe_bn(m)
prev = m
#swap bias in net with bias in net2
def swapBias(net, net2):
mask_bias = saveBias(net2)
mask_bias_null = saveBias(net2, isempty=1)
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
return net
def saveBias(net, isempty=0):
mask = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
if isempty:
mask.append(torch.zeros(m.bias.data.size()))
else:
mask.append(m.bias.data)
elif isinstance(m, nn.Linear):
if isempty:
mask.append(torch.zeros(m.bias.data.size()))
else:
mask.append(m.bias.data)
return mask
def concatMask(mask1, mask2):
index = 0
for i in range(len(mask1)):
mask1[index] = ((mask1[index] + mask2[index]) != 0).type(torch.FloatTensor)
index += 1
return mask1
def getExtendedMask(mask):
index = torch.FloatTensor()
for i in range(len(mask)):
if mask[i].dim() == 4:
mask_size = mask[i].size()[0] * mask[i].size()[1] * mask[i].size()[2] * mask[i].size()[3]
if mask[i].size()[2] == 1:
if mask[i].size()[1] % 3 == 1:
index_for_print = torch.zeros(mask[i].size()[0], mask[i].size()[1]+2,1,1)
index_for_print[:,:-2,:,:] = mask[i].data
elif mask[i].size()[1] % 3 == 2:
index_for_print = torch.zeros(mask[i].size()[0], mask[i].size()[1]+1,1,1)
index_for_print[:,:-1,:,:] = mask[i].data
else:
index_for_print = mask[i].data
index_for_print = index_for_print.view(-1,3)
index_for_print = (torch.sum(index_for_print, dim=1) != 0).type(torch.FloatTensor)
index = torch.cat((index, index_for_print),0)
else:
index_for_print = mask[i].data
index_for_print = index_for_print.view(-1,3)
index_for_print = (torch.sum(index_for_print, dim=1) != 0).type(torch.FloatTensor)
index = torch.cat((index, index_for_print),0)
else:
mask_size = mask[i].size()[0] * mask[i].size()[1]
index_for_print = torch.zeros(mask[i].size()[0], mask[i].size()[1] + 1)
index_for_print[:,:-1] = mask[i].data
index_for_print = index_for_print.view(-1,3)
index_for_print = (torch.sum(index_for_print, dim=1) != 0).type(torch.FloatTensor)
index = torch.cat((index, index_for_print),0)
return index
def quantBatch(net, intbit, pprec):
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.running_var.data = torch.round(m.running_var.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.running_var.data = torch.clamp(m.running_var.data, max=1, min=2**(-intbit))
m.weight.data = torch.round(m.weight.data / (2 ** -(15))) * (2 ** -(15))
m.weight.data = torch.clamp(m.weight.data,-(2) ** intbit, 2 ** intbit)
m.bias.data = torch.round(m.bias.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.bias.data = torch.clamp(m.bias.data,-(2) ** intbit, 2 ** intbit)
m.running_mean.data = torch.round(m.running_mean.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.running_mean.data = torch.clamp(m.running_mean.data,-(2) ** intbit, 2 ** intbit)
return net
def swapBiasandBatch(net, net2):
mask_bias = saveBias(net2, isbatch=1)
mask_bias_null = saveBias(net2, isempty=1, isbatch=1)
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
elif isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.mul(m.weight.data,mask_weight_null[index].cuda())
m.weight.data = torch.add(m.weight.data,mask_weight[index].cuda())
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
m.running_mean.data = torch.mul(m.running_mean.data,mask_running_mean_null[index].cuda())
m.running_mean.data = torch.add(m.running_mean.data,mask_running_mean[index].cuda())
m.running_var.data = torch.mul(m.running_var.data,mask_running_var_null[index].cuda())
m.running_var.data = torch.add(m.running_var.data,mask_running_var[index].cuda())
return net
def swapBatch(net, net2):
mask_batch = saveBatch(net2)
mask_batch_null = saveBatch(net2, isempty=1)
index = 0
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.mul(m.weight.data,mask_batch_null[index].cuda())
m.weight.data = torch.add(m.weight.data,mask_batch[index].cuda())
index += 1
m.bias.data = torch.mul(m.bias.data,mask_batch_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_batch[index].cuda())
index += 1
m.running_mean.data = torch.mul(m.running_mean.data,mask_batch_null[index].cuda())
m.running_mean.data = torch.add(m.running_mean.data,mask_batch[index].cuda())
index += 1
m.running_var.data = torch.mul(m.running_var.data,mask_batch_null[index].cuda())
m.running_var.data = torch.add(m.running_var.data,mask_batch[index].cuda())
index += 1
return net
def saveBatch(net, isempty=0):
mask = []
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
if isempty:
mask.append(torch.zeros(m.weight.data.size()))
mask.append(torch.zeros(m.bias.data.size()))
mask.append(torch.zeros(m.running_mean.data.size()))
mask.append(torch.zeros(m.running_var.data.size()))
else:
mask.append(m.weight.data)
mask.append(m.bias.data)
mask.append(m.running_mean.data)
mask.append(m.running_var.data)
return mask
def printFeature(feature, filename):
f = open(filename, 'w')
for i in range(feature.data.size()[1]):
for j in range(feature.data.size()[2]):
for k in range(feature.data.size()[3]):
print(feature.data[0,i,j,k].item(), file=f, end=',')
print('',file=f)
print('',file=f)
f.close()
return
def printconv1_0(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
print(m.weight[0])
try:
print(m.bias[0])
except:
print("There is no bias")
pass
return
def printbatch1(net):
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
print(m.weight)
print(m.bias)
print(m.running_mean)
print(m.running_var)
return
def printlinear1_0(net):
for m in net.modules():
if isinstance(m, nn.Linear):
print(m.weight[0])
try:
print(m.bias[0])
except:
print("There is no bias")
pass
return
def float_to_hex(float_):
temp = float_ * 2**7 # Scale the number up.
temp = torch.round(temp) # Turn it into an integer.
temp = int(temp)
temp = temp & 0xff
return '{:02x}'.format(temp)
def float_to_hex_16(float_):
temp = float_ * 2**8 # Scale the number up.
temp = torch.round(temp) # Turn it into an integer.
temp = int(temp)
temp = temp & 0xffff
return '{:04x}'.format(temp)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
from math import cos, pi
def adjust_learning_rate(optimizer, epoch, iteration, num_iter, ne, init_lr):
lr = optimizer.param_groups[0]['lr']
warmup_epoch = 5
warmup_iter = warmup_epoch * num_iter
current_iter = iteration + epoch * num_iter
max_iter = ne * num_iter
lr = init_lr * (1 + cos(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))) / 2
if epoch < warmup_epoch:
lr = init_lr * current_iter / warmup_iter
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
[
"byh1321@naver.com"
] |
byh1321@naver.com
|
da6fa81c852b746e1fded343f4e04a7e146e335e
|
39b8aa964883b2bde4349e0c9c38e3233c310548
|
/src/Power of Four.py
|
96d2db9a48b59d6376e2dbcb8be1027d9d34085f
|
[] |
no_license
|
orifake/leetcode-python
|
053b82491e0b8d6197dd12d92eec5883211285db
|
8e375ebebe0a0285efefc33ed61afb22f41d0c75
|
refs/heads/master
| 2023-03-09T14:32:17.833456
| 2021-02-26T16:09:31
| 2021-02-26T16:09:31
| 264,466,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
import math
class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
return num > 0 and (num & (num - 1)) == 0 and \
((num & 0b01010101010101010101010101010101) == num)
class Solution2:
def isPowerOfFour(self, num: int) -> bool:
if num <= 0:
return False
return (math.log10(num) / math.log10(4)) % 1 == 0
t = Solution()
print(t.isPowerOfFour(4))
|
[
"349758699@qq.com"
] |
349758699@qq.com
|
abc2fe52b390b7c640ccb2ff87cb1d20b07a358a
|
f4c39ea03255886185d72f4871f92cc9538b2ad3
|
/crm/admin.py
|
52199d99d740927aa64e82448255195775028a41
|
[] |
no_license
|
lgkiemde/Maverick-Food-Service
|
359430b99588a2077736f81a50c4c663b7e65637
|
38a17e515941ae7471a5ca9cabd8cad9228e68d7
|
refs/heads/main
| 2023-08-30T15:12:23.512109
| 2021-10-23T01:27:59
| 2021-10-23T01:27:59
| 420,088,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
from django.contrib import admin
from .models import Customer, Service, Product
class CustomerList(admin.ModelAdmin):
list_display = ('cust_name', 'organization', 'phone_number')
list_filter = ('cust_name', 'organization')
search_fields = ('cust_name',)
ordering = ['cust_name']
class ServiceList(admin.ModelAdmin):
list_display = ( 'cust_name', 'service_category', 'setup_time')
list_filter = ( 'cust_name', 'setup_time')
search_fields = ('cust_name', )
ordering = ['cust_name']
class ProductList(admin.ModelAdmin):
list_display = ( 'cust_name', 'product', 'pickup_time')
list_filter = ( 'cust_name', 'pickup_time')
search_fields = ('cust_name', )
ordering = ['cust_name']
admin.site.register(Customer, CustomerList)
admin.site.register(Service, ServiceList)
admin.site.register(Product, ProductList)
|
[
"74085491+lgkiemde@users.noreply.github.com"
] |
74085491+lgkiemde@users.noreply.github.com
|
9ba8abccd4ecd06af19b8b0d1cb92d449e9cdbf9
|
c0b9b12e5a5dc3d143fe13a80d4fe52c3ac97355
|
/example_test/example_data_split.py
|
8401270e0a51b94e87a5b30ff93fbc45c455786d
|
[] |
no_license
|
liufei0820/anheng
|
afccbe7221dc292f110122e3181a3cf2fdb0cbfc
|
27c33dde4d5f44f56b23ddb472b80817487e78ff
|
refs/heads/main
| 2023-07-22T03:59:51.616987
| 2021-09-12T08:58:06
| 2021-09-12T08:58:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/9/12 3:02 下午
# @Author : Alioth
# @File : example_data_split.py
# @Email : thxthx1999@gmail.com
# @Software: PyCharm
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import requests
if __name__ == '__main__':
path_project = os.path.abspath('..')
path_callbackurl = os.path.join(path_project, 'data', 'callbackurl' + '.json')
print(path_callbackurl)
with open(path_callbackurl, "r") as f:
json_callbackurl = json.load(f)
callBackUrl = json_callbackurl['callBackUrl']
print(callBackUrl)
path_data = os.path.join(path_project, 'data', 'loan_data' + '.csv')
data_set = pd.read_csv(path_data, header=0, index_col=0)
# Initiate a list for categoricals
categ_list = ['purpose']
# create new df with dummy variables
data_set = pd.get_dummies(data_set, columns=categ_list, drop_first=True)
# # print(data_set)
percent = 0.8
random = 1234
test_data = data_set.sample(frac=(1 - percent), replace=False, random_state=random, axis=0)
train_data = data_set[~data_set.index.isin(test_data.index)]
print(test_data.head())
path_train = os.path.join(path_project, 'data', 'train_data' + '.csv')
path_test = os.path.join(path_project, 'data', 'test_data' + '.csv')
train_data.to_csv(path_train)
test_data.to_csv(path_test)
dict_path = {
"path_train": path_train,
"path_test": path_test
}
r = requests.post(callBackUrl, json=dict_path) # does json.dumps(your_json) automatically
|
[
"1094168447@qq.com"
] |
1094168447@qq.com
|
42d9a5f7b77cd5d5e697db8aab5835e9505444fc
|
ce7e7085b6bc07abf5eab5d4345e045a16ee0d56
|
/*backup/backup.py
|
63630e15cc53af9d5c5dbd5bc1213fe4f7079b78
|
[] |
no_license
|
WHjiangxiaolin/Python
|
bad4a5edc80f2dc96e6256ab2761437f93e666ab
|
2e6b4a02dba7d6016f846d6914eee9af61146860
|
refs/heads/master
| 2022-01-19T22:43:03.864235
| 2019-07-20T09:45:39
| 2019-07-20T09:45:39
| 197,872,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,322
|
py
|
#-将/tmp/demo/security备份到/tmp/demo/backup中
#- 需要支持完全和增量备份
#- 周一执行完全备份
#- 其他时间执行增量备份
#分析:
#- 完全备份需要执行备份目录、计算每个文件的md5值
#- 增量备份需要计算文件的md5值,把md5值与前一天的md5值比较,有变化的文件要备份;目录中新增的文件也要备份
#- 备份的文件名,应该体现出:备份的是哪个目录,是增量还是完全,哪一天备份的
import tarfile
from time import strftime
import os
import hashlib
import pickle
def check_md5(fname): #生成文件MD5值,该函数下面给下面函数用
m = hashlib.md5()
with open(fname, 'rb') as fobj:
while True:
data = fobj.read(4096)
if not data:
break
m.update(data)
return m.hexdigest()
def full_backup(src, dst, md5file): #完全备份
# 将完全备份文件名组合起来,os.path.basename(src)可以取目录最后的目录名
fname = '%s_full_%s.tar.gz' % (os.path.basename(src), strftime('%Y%m%d'))
fname = os.path.join(dst, fname) #将完全备份文件绝对路径组合起来
tar = tarfile.open(fname, 'w:gz') #打包备份文件
tar.add(src)
tar.close()
# 计算每个文件的md5值
md5dict = {}
for path, folders, files in os.walk(src):
#os.walk返回值由多个元祖构成,每个元祖有三项,第一项时路径字符串,第二项是该路径下的目录列表,第三项时该目录下的文件列表.path, folders, files对应此三项,由path和file组合成文件绝对路径
for file in files:
key = os.path.join(path, file)
md5dict[key] = check_md5(key) #生成文件MD5值,并保存为字典的值,字典的键为文件名
# 把md5值字典保存到文件
with open(md5file, 'wb') as fobj:
pickle.dump(md5dict, fobj)
def incr_backup(src, dst, md5file): #增量备份
#将增量备份文件名组合起来
fname = '%s_incr_%s.tar.gz' % (os.path.basename(src), strftime('%Y%m%d'))
fname = os.path.join(dst, fname) #将增量备份文件绝对路径组合起来
# 取出前一天的文件md5值
with open(md5file, 'rb') as fobj:
old_md5 = pickle.load(fobj)
# 计算当前下文件的md5值
md5dict = {}
for path, folders, files in os.walk(src):
for file in files:
key = os.path.join(path, file)
md5dict[key] = check_md5(key) #生成文件MD5值,并保存为字典的值,字典的键为文件名
# 找出变化的文件和新增的文件,把它们压缩
tar = tarfile.open(fname, 'w:gz')
for key in md5dict:
# get 如果key不在字典中返回None则表示判断不成立,则之前的目录中没有这个文件
if old_md5.get(key) != md5dict[key]:
tar.add(key)
tar.close()
# 把当前的md5字典写到文件中,以便下一次比较使用
with open(md5file, 'wb') as fobj:
pickle.dump(md5dict, fobj)
if __name__ == '__main__':
src = '/tmp/demo/security'
dst = '/tmp/demo/backup'
md5file = '/tmp/demo/backup/md5.data'
if strftime('%a') == 'Mon': #星期几时%a
full_backup(src, dst, md5file)
else:
incr_backup(src, dst, md5file)
|
[
"jxl@163.com"
] |
jxl@163.com
|
c4fe3b1f9b8f103406a394b5030f956677734043
|
20ad94b7bc15dc76ad7a78133b52f75fd3381470
|
/C++ dasturlash asoslari/42. Tanlangan masalalar yechimi/f6.py
|
44a12fa412552d42633a6c1cc6aefce98442c657
|
[] |
no_license
|
ilmfan/MohirdevAlgoritm
|
8de810879b660561dd3582e486d58e1a342ad655
|
d58ded8bc6aa3d348c27d26b3856ca223829800e
|
refs/heads/main
| 2023-07-14T17:42:26.698190
| 2021-08-25T11:06:39
| 2021-08-25T11:06:39
| 395,877,110
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
"""
author: Shodmonov Zafar
date and time: 09:00 14.08.2021
information about the algorithm:
InPut: n
OutPut: prime numbers up to n
"""
def prime_numbers(n):
output_list = [2]
for num in range(3, n+1, 2):
divided_into = []
does_not_divide = []
for i in range(2, num):
if num % i == 0:
divided_into.append(1)
else:
does_not_divide.append(1)
if len(does_not_divide) == num - 2:
output_list.append(num)
return output_list
|
[
"dasturchi.uzbek@gmail.com"
] |
dasturchi.uzbek@gmail.com
|
466e2b548dafa31a902439b94559d4cce8d115ec
|
051a5b30752d60b2f40c28c8440c1d59ff8d6f53
|
/lab2/01_linear_regression.py
|
a78eff81a9fb58946fd4e6547b48db366f720184
|
[] |
no_license
|
yungbyun/Study_Tensorflow
|
e20c0de76e820898600c28fec2da3a88502f8403
|
8e2bcd191fd670068aaabe9845146df90da88182
|
refs/heads/master
| 2021-01-17T08:44:13.813361
| 2017-03-18T06:37:23
| 2017-03-18T06:37:23
| 83,952,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
from __future__ import print_function
import tensorflow as tf
import matplotlib.pyplot as plot
x_data = [1, 2, 3]
y_data = [1, 2, 3]
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
hypothesis = W * x_data + b
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
a = tf.Variable(0.1)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
costs = []
weights = []
bs = []
for step in range(2001):
sess.run(train)
if step % 40 == 0:
val_c = sess.run(cost)
val_w = sess.run(W)
val_b = sess.run(b)
print(step, val_c, val_w, val_b)
costs.append(val_c)
weights.append(val_w)
bs.append(val_b)
print("Learning finished!")
plot.plot(costs, 'o-')
plot.xlabel('Step')
plot.ylabel('Error')
plot.show()
plot.plot(weights, 'o-')
plot.xlabel('Step')
plot.ylabel('Weight')
plot.show()
plot.plot(bs, 'o-')
plot.xlabel('Step')
plot.ylabel('Bias')
plot.show()
|
[
"byclink@gmail.com"
] |
byclink@gmail.com
|
ee88edd0ac690cc450f39f6384e744c016c895de
|
92ca965a167316bb531671d8e28c58bc1decb7e8
|
/rbac/middlewares/rbac.py
|
bd4b4ab5038583dbb78a7d0266946e3dafcbafa7
|
[] |
no_license
|
yaozhengjie/crm-1
|
b879a095af54d720ae6ab4b73efa7758b6760093
|
89d72631b6065cfb390a0d4fa0331c5da01a080e
|
refs/heads/master
| 2020-04-08T16:25:31.169742
| 2018-11-28T11:16:34
| 2018-11-28T11:16:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from django.utils.deprecation import MiddlewareMixin
from django.shortcuts import HttpResponse
import re
from django.conf import settings
class RbacMiddleware(MiddlewareMixin):
'''
1.获取当前用户的url
2.获取当前用户在session中的url权限列表
3.权限信息进行匹配
'''
def process_request(self, request):
'''
当用户请求刚进入时执行
:param request:
:return:
'''
# 获取当前用户的url
current_url = request.path_info
# 如果当前用户访问的url在白名单内则可以访问
for valid in settings.VALID_URL_LIST:
if re.match(valid, current_url):
return None
# print(current_url)
# 获取当前用户session中的存放的url
permission_list = request.session.get(settings.PERMISSION_SESSION_KEY)
# print('permission', permission_list)
# 如果没有session中不存在当前用户的信息则返回错误
if not permission_list:
return HttpResponse('未获取到用户信息,请登陆')
flag = False
# 循环session中的url,判断url是否与当前用户访问的url匹配,如果匹配则可以访问,匹配不成功则返回错误信息
for url in permission_list:
reg = '^%s$' % url
if re.match(reg, current_url):
flag = True
break
if not flag:
return HttpResponse('无权访问')
|
[
"41354304+yjiu1990@users.noreply.github.com"
] |
41354304+yjiu1990@users.noreply.github.com
|
17eb256179da0f291fdd0e5d21d32169501672e1
|
e21ed71610f9d1004dfa21206300c0e9f3887e89
|
/modulo_2/Codewars/dev-junior/find_even_array.py
|
beb4a2bad5d9a8b39ec87d16249da6a0ba36113a
|
[] |
no_license
|
hpfn/wttd-2017-exerc
|
c0c79ee0cb3b5b331932787d280deee679357bc1
|
b1bf1394d2e2adc29257b7c4273af21b8509335f
|
refs/heads/master
| 2020-12-30T11:29:13.218980
| 2017-10-03T19:04:03
| 2017-10-03T19:04:03
| 91,572,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
# coding=utf-8
def find_even_index(arr):
tam_arr = len(arr)
for x in range(tam_arr):
if sum(arr[:x]) == sum(arr[x+1:]):
return x
return -1
|
[
"hpfn@debian.org"
] |
hpfn@debian.org
|
7fb4ea8ca62ee742cb03add25202bb3018bba0d6
|
8562adfbeb7cf901aeeaf004dc1e53c286a24d48
|
/beg86.py
|
ba49d5c28d0528682212047ecc0dd3986de5a4fc
|
[] |
no_license
|
sarureddi/isogram
|
1d4f8a7566a1df0f4a7b42502be60a1fafaabc10
|
3aca7e1172977cd116c0902761d70ded84402310
|
refs/heads/master
| 2020-06-03T11:03:43.392152
| 2019-06-12T09:54:11
| 2019-06-12T09:54:11
| 191,544,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
si1=str(input())
l=len(si1)
s=set(si1)
if(l==len(s)):
print("Yes")
else:
print("No")
|
[
"noreply@github.com"
] |
noreply@github.com
|
be24fff7640880924ac1b8352d63c9ce128039bd
|
49beeee0d9aff3b776545cb553ef1bf15dd9f190
|
/example/example/views.py
|
6c06b12a01b8dad493049a74201b5a5b9af1ada9
|
[
"MIT"
] |
permissive
|
bluedisk/django-korean-fields
|
238364cf4f766db824adec832aaa2d83619cded1
|
b655e23d9a73e61cb217e34719ee6a2509f8f475
|
refs/heads/master
| 2020-03-19T09:55:10.974426
| 2018-11-10T15:02:02
| 2018-11-10T15:02:02
| 136,327,803
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# -*- coding: utf-8 -*-
from django.forms import forms, CharField
from django.http import HttpResponse
from django.shortcuts import render
from korean.fields import JuminFormField
class TestForm(forms.Form):
jumin1 = JuminFormField()
jumin2 = JuminFormField()
def demo(request):
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
return HttpResponse('success : ' + form.cleaned_data['jumin'])
else:
form = TestForm(initial={'jumin1': '010203-4567890'})
return render(request, 'demo.html', {'form': form})
|
[
"bluedisk@gmail.com"
] |
bluedisk@gmail.com
|
07e2550e41d1f8ee6112f46da821e1ab0852682c
|
01ab6c9aa8f877cef36160b65b959019cece62df
|
/FullCopy/src/utils.py
|
9612ea294f0921f8d8d9e06e5e2a96f012f57db2
|
[] |
no_license
|
kiscsonti/DPwithTorches
|
40f693c77dd38860037d671a07f51c10ab9de185
|
3892c8fcf1436711691c65d23f63da5372349a92
|
refs/heads/master
| 2020-03-12T00:06:11.593266
| 2018-05-19T09:26:08
| 2018-05-19T09:26:08
| 130,341,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,953
|
py
|
import os
import json
import string
import wikiwords
import unicodedata
import numpy as np
from collections import Counter
from nltk.corpus import stopwords
words = frozenset(stopwords.words('english'))
punc = frozenset(string.punctuation)
def is_stopword(w):
return w.lower() in words
def is_punc(c):
return c in punc
baseline = wikiwords.freq('the')
def get_idf(w):
return np.log(baseline / (wikiwords.freq(w.lower()) + 1e-10))
def load_data(path):
from doc import Example
data = []
for line in open(path, 'r', encoding='utf-8'):
if path.find('race') < 0 or np.random.random() < 0.6:
data.append(Example(json.loads(line)))
print('Load %d examples from %s...' % (len(data), path))
return data
class Dictionary(object):
NULL = '<NULL>'
UNK = '<UNK>'
START = 2
@staticmethod
def normalize(token):
return unicodedata.normalize('NFD', token)
def __init__(self):
self.tok2ind = {self.NULL: 0, self.UNK: 1}
self.ind2tok = {0: self.NULL, 1: self.UNK}
def __len__(self):
return len(self.tok2ind)
def __iter__(self):
return iter(self.tok2ind)
def __contains__(self, key):
if type(key) == int:
return key in self.ind2tok
elif type(key) == str:
return self.normalize(key) in self.tok2ind
def __getitem__(self, key):
if type(key) == int:
return self.ind2tok.get(key, self.UNK)
if type(key) == str:
return self.tok2ind.get(self.normalize(key),
self.tok2ind.get(self.UNK))
def __setitem__(self, key, item):
if type(key) == int and type(item) == str:
self.ind2tok[key] = item
elif type(key) == str and type(item) == int:
self.tok2ind[key] = item
else:
raise RuntimeError('Invalid (key, item) types.')
def add(self, token):
token = self.normalize(token)
if token not in self.tok2ind:
index = len(self.tok2ind)
self.tok2ind[token] = index
self.ind2tok[index] = token
def tokens(self):
"""Get dictionary tokens.
Return all the words indexed by this dictionary, except for special
tokens.
"""
tokens = [k for k in self.tok2ind.keys()
if k not in {'<NULL>', '<UNK>'}]
return tokens
vocab, pos_vocab, ner_vocab, rel_vocab, char_vocab = Dictionary(), Dictionary(), Dictionary(), Dictionary(), Dictionary()
def gen_race_vocab(data):
race_vocab = Dictionary()
build_vocab()
cnt = Counter()
for ex in data:
cnt += Counter(ex.passage.split())
cnt += Counter(ex.question.split())
cnt += Counter(ex.choice.split())
for key, val in cnt.most_common(30000):
if key not in vocab:
race_vocab.add(key)
print('Vocabulary size: %d' % len(race_vocab))
writer = open('./data/race_vocab', 'w', encoding='utf-8')
writer.write('\n'.join(race_vocab.tokens()))
writer.close()
def build_vocab(data=None):
global vocab, pos_vocab, ner_vocab, rel_vocab, char_vocab
# build word vocabulary
if os.path.exists('./data/vocab'):
print('Load vocabulary from ../data/vocab...')
for w in open('./data/vocab', encoding='utf-8'):
vocab.add(w.strip())
print('Vocabulary size: %d' % len(vocab))
else:
cnt = Counter()
for ex in data:
cnt += Counter(ex.passage.split())
cnt += Counter(ex.question.split())
cnt += Counter(ex.choice.split())
for key, val in cnt.most_common():
vocab.add(key)
print('Vocabulary size: %d' % len(vocab))
writer = open('./data/vocab', 'w', encoding='utf-8')
writer.write('\n'.join(vocab.tokens()))
writer.close()
# build part-of-speech vocabulary
if os.path.exists('./data/pos_vocab'):
print('Load pos vocabulary from ../data/pos_vocab...')
for w in open('./data/pos_vocab', encoding='utf-8'):
pos_vocab.add(w.strip())
print('POS vocabulary size: %d' % len(pos_vocab))
else:
cnt = Counter()
for ex in data:
cnt += Counter(ex.d_pos)
cnt += Counter(ex.q_pos)
for key, val in cnt.most_common():
if key: pos_vocab.add(key)
print('POS vocabulary size: %d' % len(pos_vocab))
writer = open('./data/pos_vocab', 'w', encoding='utf-8')
writer.write('\n'.join(pos_vocab.tokens()))
writer.close()
# build named entity vocabulary
if os.path.exists('./data/ner_vocab'):
print('Load ner vocabulary from ../data/ner_vocab...')
for w in open('./data/ner_vocab', encoding='utf-8'):
ner_vocab.add(w.strip())
print('NER vocabulary size: %d' % len(ner_vocab))
else:
cnt = Counter()
for ex in data:
cnt += Counter(ex.d_ner)
for key, val in cnt.most_common():
if key: ner_vocab.add(key)
print('NER vocabulary size: %d' % len(ner_vocab))
writer = open('./data/ner_vocab', 'w', encoding='utf-8')
writer.write('\n'.join(ner_vocab.tokens()))
writer.close()
# Load conceptnet relation vocabulary
assert os.path.exists('./data/rel_vocab')
print('Load relation vocabulary from ../data/rel_vocab...')
for w in open('./data/rel_vocab', encoding='utf-8'):
rel_vocab.add(w.strip())
print('Rel vocabulary size: %d' % len(rel_vocab))
if os.path.exists('./data/char_vocab.txt'):
print('Load character vocabulary from ../data/char_vocab...')
with open("./data/char_vocab.txt", "r") as f:
for line in f.readlines():
char_vocab.add(line[:1])
print('Character vocabulary size: %d' % len(char_vocab))
else:
print("There is no character vocab file dudi, do something about it")
def gen_submission(data, prediction):
assert len(data) == len(prediction)
writer = open('out-%d.txt' % np.random.randint(10**18), 'w', encoding='utf-8')
for p, ex in zip(prediction, data):
p_id, q_id, c_id = ex.id.split('_')[-3:]
writer.write('%s,%s,%s,%f\n' % (p_id, q_id, c_id, p))
writer.close()
def gen_debug_file(data, prediction):
writer = open('./data/output.log', 'w', encoding='utf-8')
cur_pred, cur_choices = [], []
for i, ex in enumerate(data):
if i + 1 == len(data):
cur_pred.append(prediction[i])
cur_choices.append(ex.choice)
if (i > 0 and ex.id[:-1] != data[i - 1].id[:-1]) or (i + 1 == len(data)):
writer.write('Passage: %s\n' % data[i - 1].passage)
writer.write('Question: %s\n' % data[i - 1].question)
for idx, choice in enumerate(cur_choices):
writer.write('%s %f\n' % (choice, cur_pred[idx]))
writer.write('\n')
cur_pred, cur_choices = [], []
cur_pred.append(prediction[i])
cur_choices.append(ex.choice)
writer.close()
def gen_final_submission(data):
import glob
proba_list = []
for f in glob.glob('./out-*.txt'):
print('Process %s...' % f)
lines = open(f, 'r', encoding='utf-8').readlines()
lines = map(lambda s: s.strip(), lines)
lines = list(filter(lambda s: len(s) > 0, lines))
assert len(lines) == len(data)
proba_list.append(lines)
avg_proba, p_q_id = [], []
for i in range(len(data)):
cur_avg_p = np.average([float(p[i].split(',')[-1]) for p in proba_list])
cur_p_q_id = ','.join(data[i].id.split('_')[-3:-1])
if i == 0 or cur_p_q_id != p_q_id[-1]:
avg_proba.append([cur_avg_p])
p_q_id.append(cur_p_q_id)
else:
avg_proba[-1].append(cur_avg_p)
gen_debug_file(data, [p for sublist in avg_proba for p in sublist])
writer = open('answer.txt', 'w', encoding='utf-8')
assert len(avg_proba) == len(p_q_id)
cnt = 0
for probas, cur_p_q_id in zip(avg_proba, p_q_id):
cnt += 1
assert len(probas) > 1
pred_ans = np.argmax(probas)
writer.write('%s,%d' % (cur_p_q_id, pred_ans))
if cnt < len(p_q_id):
writer.write('\n')
writer.close()
os.system('zip final_output.zip answer.txt')
print('Please submit final_output.zip to codalab.')
def eval_based_on_outputs(path):
dev_data = load_data('../data/dev-data-processed.json')
label = [int(ex.label) for ex in dev_data]
gold, cur_gold = [], []
for i, ex in enumerate(dev_data):
if i + 1 == len(dev_data):
cur_gold.append(label[i])
if (i > 0 and ex.id[:-1] != dev_data[i - 1].id[:-1]) or (i + 1 == len(dev_data)):
gy = np.argmax(cur_gold)
gold.append(gy)
cur_gold = []
cur_gold.append(label[i])
prediction = [s.strip() for s in open(path, 'r', encoding='utf-8').readlines() if len(s.strip()) > 0]
prediction = [int(s.split(',')[-1]) for s in prediction]
assert len(prediction) == len(gold)
acc = sum([int(p == g) for p, g in zip(prediction, gold)]) / len(gold)
print('Accuracy on dev_data: %f' % acc)
def text_to_char_index(text):
indexed = []
for char in text:
indexed.append(char_vocab[char])
return indexed
def text_to_grams(text, length=5):
partials = []
if len(text) < length:
partials.append(text)
else:
for i in range(length, len(text)):
partials.append(text[i-length:i])
return partials
if __name__ == '__main__':
# build_vocab()
trial_data = load_data('./data/trial-data-processed.json')
train_data = load_data('./data/train-data-processed.json')
dev_data = load_data('./data/dev-data-processed.json')
test_data = load_data('./data/test-data-processed.json')
build_vocab(trial_data + train_data + dev_data + test_data)
|
[
"kiscsonti@vipmail.hu"
] |
kiscsonti@vipmail.hu
|
191fb84e33cb5a9226de5e021a42e6bc6fb12eb0
|
48700c7222de631fc2ea4463abbb1b0b9aaebeec
|
/nodered-api-client-basic/get-json.py
|
b213550ee405012eb1ab0e95e123e110b401ad71
|
[] |
no_license
|
kopikaki/python_examples
|
cf4daf6d1ccac53e5910872b93f994e0c99c3de4
|
3395e9fe176014d404aaa9797f75e5c445805e55
|
refs/heads/master
| 2021-01-05T05:36:06.682296
| 2020-03-28T04:16:35
| 2020-03-28T04:16:35
| 240,898,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
import json
import requests
#这里用 HTTPS End Point 代替
nodeUrl = 'https://nr02.d1.zetez.com/node'
apiUrl = nodeUrl + '/data'
resp = requests.get(
apiUrl
)
if resp.status_code != 200:
# This means something went wrong.
print('HTTP Error: ' + resp.status_code)
else:
respJson = resp.json()
print('HTTP Response: '+json.dumps(respJson))
|
[
"jeffqu08@gmail.com"
] |
jeffqu08@gmail.com
|
d47c3724879680967f10765f503c820e7982fb3f
|
714d4d2796e9b5771a1850a62c9ef818239f5e77
|
/components/metrics/DEPS
|
2f4d413d44817a460d2dc1304dd4027f1f530765
|
[
"BSD-3-Clause"
] |
permissive
|
CapOM/ChromiumGStreamerBackend
|
6c772341f815d62d4b3c4802df3920ffa815d52a
|
1dde005bd5d807839b5d45271e9f2699df5c54c9
|
refs/heads/master
| 2020-12-28T19:34:06.165451
| 2015-10-21T15:42:34
| 2015-10-23T11:00:45
| 45,056,006
| 2
| 0
| null | 2015-10-27T16:58:16
| 2015-10-27T16:58:16
| null |
UTF-8
|
Python
| false
| false
| 243
|
# This component is shared with the Chrome OS build, so it's important to limit
# dependencies to a minimal set.
include_rules = [
"-components",
"+components/compression",
"+components/metrics",
"+components/variations",
"-net",
]
|
[
"j.isorce@samsung.com"
] |
j.isorce@samsung.com
|
|
8899018c3b57d2dc6e0f8fc1b71cb7428223e45c
|
b38abaa3b35f8c465be470d2240db515b460d469
|
/blog/admin.py
|
52f4623ff358530be5144a08ef1d4f2791309765
|
[] |
no_license
|
ninestep/mysite
|
fc44d12f0f2f69c802e83c829128f2a9420944cb
|
57c9a9ef3401f80aa1c07ae81dc7cd64185ec544
|
refs/heads/master
| 2022-07-18T06:09:33.870245
| 2022-06-26T00:44:36
| 2022-06-26T00:44:36
| 59,069,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
from django.contrib import admin
from . import models
from markdownx.admin import MarkdownxModelAdmin
# Register your models here.
admin.site.register(models.articles,MarkdownxModelAdmin)
admin.site.register(models.comments)
admin.site.register(models.system)
|
[
"859696354@qq.com"
] |
859696354@qq.com
|
d380fe52b1c521e8ecdac7ec5218fc2ce599e77d
|
34188f655a121b6db7c029c5da93779411ee92bc
|
/7a.Stos i kolejka/czynawiasowaniepoprawne.py
|
2314d35fece5bdf050ca66135a6502608160bb6a
|
[] |
no_license
|
aniagut/ASD-2020
|
3f0760f28888bdb0a6d689c357c8444bf09ff48b
|
f1a084d4f8175a76fd4274f270eab2ddc7a5e172
|
refs/heads/master
| 2023-03-08T07:51:05.351562
| 2021-02-28T17:30:24
| 2021-02-28T17:30:24
| 343,164,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
#jesli wchodzi nawias otwierajacy, to wrzucamy na stos
#jesli zmaykajacy to zdejmujemy ze stosu i sprawdzamy czy sie pokrywaja
class Stack:
def __init__(self):
self.s=[]
self.top=-1
self.size=0
def push(self,x):
self.top+=1
self.size+=1
if self.top==len(self.s):
self.s.append(x)
else:
self.s[self.top]=x
def pop(self):
self.size-=1
res=self.s[self.top]
self.top-=1
return res
def is_empty(self):
return self.size==0
def funkcja(nawiasy):
s=Stack()
n=len(nawiasy)
for i in range(n):
if nawiasy[i]=="(" or nawiasy[i]=="[":
s.push(nawiasy[i])
else:
if s.is_empty(): return False
res=s.pop()
if nawiasy[i]==")":
if res!="(":
return False
elif nawiasy[i]=="]":
if res!="[":
return False
if not s.is_empty(): return False
return True
nawiasy="((([][])))"
print(funkcja(nawiasy))
|
[
"noreply@github.com"
] |
noreply@github.com
|
ef5014df5a01fb40ab483a448b2b532e3c791cd5
|
ca680d06086cef25a28bf7e5e7678b179bf99497
|
/14032020 1day 1commit.py
|
ed1a37ded5db15802c85235fb3da1fda7631d8d9
|
[] |
no_license
|
kierenmihaly/worldwebproject
|
660f09471c44c8db59bb49b16d41180026633df7
|
34578ffbac29a115bb731065c469f930831d28bd
|
refs/heads/master
| 2020-09-29T23:17:23.300891
| 2020-08-31T01:46:51
| 2020-08-31T01:46:51
| 227,146,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
#14032020
#파트3 조건문 - 파이썬 if else
SCISSOR = '가위'
ROCK = '바위'
PAPER = '보'
WIN = "win"
DRAW = 'draw'
LOSE = 'lose...'
mine = '가위'
yours = '바위'
if mine == yours:
result = DRAW
#짧은 if 와 else를 많이 쓰는방법 ))
else:
if mine == SICSSOR: #내가 낸게 가위
if yours == ROCK:
result = LOSE
else: #아니라면 이겼다
result = WIN
else: #가위가 아닌경우
if mine == ROCK:
if yours == PAPER:
result = LOSE
else:
result = WIN
else:
if mine == PAPER:
if yours == SCISSOR:
result = LOSE
else:
result = WIN
else:
print('weird')
print(result)
#elif
#else 와 if 블럭두개를 파이썬에서는 한개로 합칠 수 있다
#else 와 elif
#if의 조건이 맞지 않을 때 실행하는 코드
# else는 조건이 맞지 않을 경우 항상 실행되는 경우
#elif 는 조건이 맞지 않을 경우 다른조건을 검사하게 해주는 코드
|
[
"noreply@github.com"
] |
noreply@github.com
|
3b2e792a01d05f90502f8647222c52e55e4095ee
|
ffc5257d66a581ed18d3ed024e263c2430f27cf3
|
/noi/noi/settings.py
|
0e261bf7f8a22230dfc1cd1d843e349b23424edd
|
[] |
no_license
|
ShadowLore/wow
|
e7456ff4702d94e522ff435c5893a4fa7b299e9a
|
d3e1a3d52d4ef2ae492910c2313e54fbfc37e54f
|
refs/heads/master
| 2023-08-20T02:56:14.059858
| 2021-10-22T13:44:57
| 2021-10-22T13:44:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,242
|
py
|
"""
Django settings for noi project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-m_$wkt$)!=2ism%()r62@r_&*4+4c@v_moyw5kz2yce&(ab_(w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'main',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'noi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'noi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"67012072+ShadowLore@users.noreply.github.com"
] |
67012072+ShadowLore@users.noreply.github.com
|
d3b6e9f0e660a6ab3559ab5e2029a46b8e10bf27
|
255efb54075eb8cc2412bf1d5c936a97a003337e
|
/xt/environment/__init__.py
|
69338935f833cbdd1def7455667f8075e68b8eed
|
[
"MIT"
] |
permissive
|
jinqiuzhao/xingtian
|
914a4d48c62fd8b3d4ddd0479e9bab54bbe5cba7
|
95953dc6109c96e68dcdeb9755b3679ff51742d4
|
refs/heads/master
| 2023-06-06T06:20:28.815549
| 2021-07-02T10:00:42
| 2021-07-02T10:00:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Build environment module.
Do encapsulation for different simulations.
Unify the single and multi-agents.
"""
from __future__ import division, print_function
from xt.framework import Registers
def env_builder(env_name, env_info, **kwargs):
"""
Build the interface func for creating environment.
:param env_name:the name of environment
:param env_info: the config info of environment
:return:environment instance
"""
return Registers.env[env_name](env_info, **kwargs)
|
[
"hustqj@126.com"
] |
hustqj@126.com
|
a62cffaf25c5e7ee992b973d0e3635e1296188ff
|
fbcb3c05e34e21573fc926282c9dbae1c0a36021
|
/Level 1/prison-labor-dodgers/solution.py
|
174f16bfac9f70e585ff1b24281b40dba58458ac
|
[] |
no_license
|
mattany/google-foobar
|
deb806f27505a98fed52c3eddf228dfa282ec0fa
|
33549bb6041fefcd0556de8583c5a7fca7d7508b
|
refs/heads/master
| 2023-01-03T19:57:46.159094
| 2020-11-01T00:03:22
| 2020-11-01T00:03:22
| 305,119,929
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
def solution(x, y):
sorted_x = sorted(x)
sorted_y = sorted(y)
for i in range(min(len(x), len(y))):
if sorted_x[i] != sorted_y[i]:
return min(sorted_x[i], sorted_y[i])
if len(x) > len(y):
return x[-1]
else:
return y[-1]
|
[
"mattany@gmali.com"
] |
mattany@gmali.com
|
ab88b8234f344ef4469f84313c26e2edc8cec90b
|
d56a3ebea066bdd10e8f554be13be7260118ddad
|
/Server Code/server.py
|
d7e4a81da83d92f9398b9e34de9e6672797d1183
|
[
"MIT"
] |
permissive
|
Shanjiith-Pranov/AOGS-Code
|
20ce7d003f80521ff0d98c8c43a873539075a3c9
|
ed4c1b15a16fdb336da42eb838f83aaa16151b0d
|
refs/heads/main
| 2023-06-01T21:36:04.786653
| 2021-06-19T05:42:37
| 2021-06-19T05:42:37
| 378,325,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,497
|
py
|
import unittest
from datetime import datetime
from math import log, sin, cos, atan2, asin, degrees, radians, sqrt
import numpy
earth_radius = 6371 # kilometers
def haversine(point1, point2):
"""
Calculates the distance between two points on the earth.
haversine((52.2296756, 21.0122287), (52.406374, 16.9251681))
278.4581750754194
"""
lat1, lat2 = radians(point1[0]), radians(point2[0])
lon1, lon2 = radians(point1[1]), radians(point2[1])
delta_lat = lat2 - lat1
delta_lon = lon2 - lon1
a = (sin(delta_lat/2)*2) + (cos(lat1)*cos(lat2)*sin(delta_lon/2)*2)
c = 2*atan2(sqrt(a), sqrt(1-a))
distance = earth_radius * c
return distance
class SeismicStation:
"""
Class that creates the objects for a seismic station with a 'name', and
a set of gps coordinates, lat and lon (degrees)
"""
def _init_(self, name, coords: tuple):
self.name = name
self.coords = coords
self.latitude = coords[0]
self.longitude = coords[1]
self.events = list()
def add_event(self, event):
"""
Adds a single event to the events list.
"""
self.events.append(event)
return None
def _str_(self):
result = '{0.name} at {0.coords}'.format(self)
return result
def _repr_(self):
result = '{0.name}'.format(self)
return result
class StationEvent:
"""
An object pertaining to a single seismic event at a single seismic recording
station.
"""
def _init_(self, p_arrival_time, s_arrival_time, max_amplitude):
p_time, s_time = self.parse_station_time(p_arrival_time, s_arrival_time)
self.delta = s_time - p_time
self.delta_sec = self.delta.seconds
self.p_arrival_time = p_time
self.s_arrival_time = s_time
self.max_amplitude = max_amplitude
self.Vsp = self.wave_velocity()
self.dist_to_eq = self.calc_distance()
self.magnitude = self.calc_magnitude()
self.seismic_moment = self.calc_seismic_moment()
self.energy = self.calc_seismic_energy()
def _str_(self):
message = "{} | Tsp(s): {}, Amp(mm): {}"
return message.format(self.p_arrival_time, self.delta_sec, self.max_amplitude)
def _repr_(self):
message = "{} | Tsp(s): {}, Amp(mm): {}"
return message.format(self.p_arrival_time, self.delta_sec, self.max_amplitude)
def wave_velocity(self, VS=3.67, VP=6.34):
"""
Calculates the wave velocity based upon assumptions VS and VP.
VS = avg velocity of s-waves in CA crustal rocks (km/sec)
VP = avg velocity of p-waves in CA crustal rocks (km/sec)
"""
Vsp = (VS*VP) / (VP-VS)
return Vsp
def parse_station_time(self, p_time, s_time):
"""
parse_station_time("08:00:00", "08:00:49")
"""
p_time = datetime.strptime(p_time, "%H:%M:%S")
s_time = datetime.strptime(s_time, "%H:%M:%S")
return p_time, s_time
def calc_distance(self):
"""
Calculates the distance from the epicenter of the earthquake from
one seismic station. Using assumption of average velocity in California
crustal rocks for Vsp. (adaptable for location of stations or earthquake)
"""
self.dist_to_eq = float(self.delta_sec * self.Vsp)
return self.dist_to_eq
def calc_magnitude(self):
"""
Calculates the magnitude of the Earthquake on the Richter Scale.
source: http://crack.seismo.unr.edu/ftp/pub/louie/class/100/magnitude.html
"""
result = log(self.max_amplitude) + (3*log(8*self.delta_sec)-2.92)
self.magnitude = result
return self.magnitude
def calc_seismic_moment(self):
"""
Calculates the seismic moment (dyne-cm) of the earthquake based upon relationship
with Magnitude. source: https://goo.gl/lLpS9x
"""
result = 10 * ((3/2)(self.magnitude+16))
self.seismic_moment = result
return self.seismic_moment
def calc_seismic_energy(self, method='moment'):
"""
Calculates the amount of Energy (ergs) released by the earthquake, based on
either the magnitude or the seismic moment.
"""
if method == 'magnitude':
"""
E = 10 ^ (11.8 + (1.5 * Magnitude))
"""
result = 10 ** (11.8+(1.5*self.magnitude))
elif method == 'moment':
"""
E = Moment / 20,000
"""
result = self.seismic_moment / 20000
else:
print("Error, available methods are 'moment' or 'magnitude'.")
result = None
self.energy = result
return self.energy
def print_report(self):
"""
Prints out the results. :)
"""
message = 'The difference between p- and s-wave arrival times was: {} seconds.\
\nThe distance to the earthquake is {} kilometers.'
print(message.format(self.delta_sec, self.dist_to_eq))
class Earthquake:
"""
Compiles data from at least three seismic station events to determine
the epicenter of the earthquake.
"""
def _init_(self, *args):
self.station1 = args[0]
self.station2 = args[1]
self.station3 = args[2]
self.epicenter = Earthquake.calc_epicenter(self)
def calc_epicenter(self):
'''
Calculates the epicenter of the Earthquake with the following steps:
1. Gets the latitude (radians), longitude (radians), and radius (km) of each of the 3 seismic station events given
2. Converts the geodetic latitude and longitude to ECEF xyz coordinates.
3. Apply each X, Y, Z set of coordinates for each of the 3 points to it's own numpy array.
4. Individually calculate the X, Y, and Z coordinates of the epicenter.
5. Convert the ECEF xyz coordinates of the epicenter back to Geodetic Latitude and Longitude.
returns the location of the epicenter as a tuple (latitude, longitude)
'''
lat1 = radians(self.station1.coords[0])
lon1 = radians(self.station1.coords[1])
r1 = self.station1.events[0].dist_to_eq
lat2 = radians(self.station2.coords[0])
lon2 = radians(self.station2.coords[1])
r2 = self.station2.events[0].dist_to_eq
lat3 = radians(self.station3.coords[0])
lon3 = radians(self.station3.coords[1])
r3 = self.station3.events[0].dist_to_eq
x1 = earth_radius * (cos(lat1) * cos(lon1))
y1 = earth_radius * (cos(lat1) * sin(lon1))
z1 = earth_radius * (sin(lat1))
x2 = earth_radius * (cos(lat2) * cos(lon2))
y2 = earth_radius * (cos(lat2) * sin(lon2))
z2 = earth_radius * (sin(lat2))
x3 = earth_radius * (cos(lat3) * cos(lon3))
y3 = earth_radius * (cos(lat3) * sin(lon3))
z3 = earth_radius * (sin(lat3))
P1 = numpy.array([x1, y1, z1])
P2 = numpy.array([x2, y2, z2])
P3 = numpy.array([x3, y3, z3])
ex = (P2 - P1)/(numpy.linalg.norm(P2 - P1))
i = numpy.dot(ex, P3 - P1)
ey = (P3 - P1 - i*ex)/(numpy.linalg.norm(P3 - P1 - i*ex))
ez = numpy.cross(ex, ey)
d = float(numpy.linalg.norm(P2 - P1))
j = numpy.dot(ey, P3 - P1)
x = ((r1*2) - (r22) + (d*2)) / (2*d)
y = (((r1*2) - (r32) + (i2) + (j*2))/(2*j)) - ((i/j)*x)
z = sqrt(abs((r1*2) - (x2) - (y*2)))
tri_point = P1 + (x*ex) + (y*ey) + (z*ez)
lat = degrees(asin(tri_point[2] / earth_radius))
lon = degrees(atan2(tri_point[1], tri_point[0]))
epicenter = (lat, lon)
self.epicenter = epicenter
return self.epicenter
sensor1 = SeismicStation('sensor1', (40.8021, -124.1637))
sensor2 = SeismicStation('sensor2', (40.8324, -115.7631))
sensor3 = SeismicStation('sensor3', (36.1699, -115.1398))
event1 = StationEvent("00:00:00", "00:01:08", 250)
event2 = StationEvent("00:00:00", "00:01:14", 50)
event3 = StationEvent("00:00:00", "00:01:04", 100)
sensor1.add_event(event1)
sensor2.add_event(event2)
sensor3.add_event(event3)
eq=Earthquake(sensor1, sensor2, sensor3)
print("The epicenter of the earthquake is: " + str(eq.calc_epicenter()))
print("The magnitude of the eathquake is: " + str(eq.calc_magnitude()))
|
[
"62892238+Shanjiith-Pranov@users.noreply.github.com"
] |
62892238+Shanjiith-Pranov@users.noreply.github.com
|
92378b9d2b6ae21a09ab5425517a89f70af2e4f6
|
e8503af6e8c8b7c10b93a76dcf0cbb141074361e
|
/pswa_django/pswa_django/urls.py
|
2bcd250b9cffc4ca636ab62a350aadf613f498e5
|
[] |
no_license
|
jjbyrne1/Project-Scheduler-Web-App
|
ea5e15ebe6627c1f619b6182bddd359362d7f67f
|
ef15fbb5853bda83dd2d11efeb6ae8625f5ba103
|
refs/heads/main
| 2023-04-21T02:36:16.726708
| 2021-05-13T18:09:25
| 2021-05-13T18:09:25
| 340,113,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
"""pswa_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("mainForm.urls")),
]
# Credit to https://stackoverflow.com/questions/5871730/how-to-upload-a-file-in-django
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
[
"jjbyrne@ksu.edu"
] |
jjbyrne@ksu.edu
|
2023b3467ceed0986d27fb4c617037077678dc8d
|
7d58be2bbd4fed35a604b3732eecd1013e255bb8
|
/modules/exit.py
|
2289d2e1231c089ac8eeae575e642a45e7e8261d
|
[] |
no_license
|
Tianchai/to-do-list
|
b02c645020a65a10e0b5d3716dd0fca32f8f6177
|
3485d1d7ce79226e78d78fc40f80b285db281640
|
refs/heads/master
| 2021-07-24T15:10:35.454499
| 2018-07-25T18:15:05
| 2018-07-25T18:15:05
| 142,311,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
from pyfiglet import Figlet
import sys
def exit(redis, style):
exit_msg = Figlet(font='slant')
print(exit_msg.renderText('Good Bye . . .'))
sys.exit()
|
[
"tianchai.riengviwat@gmail.com"
] |
tianchai.riengviwat@gmail.com
|
bdef4180111df6d6c82feab386dc5b173629453f
|
40ac650d3eeec0e4951dcc21d9da1f09a11de9ff
|
/test_leetcode05.py
|
0d2e19f28eaefa8e65f730685d048adc6ea12beb
|
[] |
no_license
|
liuyufei-pia/BR
|
499a65ecd398cd259f5cb17d405d0b17c89a94e4
|
1861716f5dfca78a3c69ba56a827e225a4d9b800
|
refs/heads/master
| 2020-07-23T07:27:18.668754
| 2019-11-27T09:06:03
| 2019-11-27T09:06:03
| 207,485,295
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
def longestPalindrome(s: str) -> str:
# 马拉车算法
# 先在字符串中间加符号隔开,使得奇偶回文数的形式统一
# 然后用kmp的思想去优化中心扩散
if len(s) == 0:
return ""
s_new = '#' + '#'.join(s) + '#'
print(s_new)
# 已遍历的最大右边界
mx = 0
# 对应的中心点
mid = 0
l = len(s_new)
# 扩散半径数组,初始值1或者0都可以,只是代表刚开始的时候扩散半径是多少而已
p = [1] * l
for i in range(l):
if i < mx:
# 这个时候可以用已经计算过的值
# 不能超过已遍历的右边界
# i对应的镜像 = 2*mid - i
# 由mx定义可知半径最长不会超过mx-i
p[i] = min(mx - i, p[2 * mid - i])
# 主要的优化已经在上面节省了时间,接下来就是正常的扩散
while (i - p[i] >= 0 and i + p[i] < l and s_new[i - p[i]] == s_new[i + p[i]]):
p[i] += 1
# 记录一下mx和mid
if i + p[i] > mx:
mx = i + p[i]
mid = i
maxr = max(p)
ans = p.index(maxr)
# 因为跳出循环的时候多加了1,所以实际上的扩散半径应该减1
maxr -= 1
return s_new[ans - maxr:ans + maxr + 1].replace('#', "")
if __name__ == '__main__':
s = 'abcba'
print(longestPalindrome(s))
|
[
"q.we85273@163.com"
] |
q.we85273@163.com
|
881fdd4284165a6767a1d165b25cff1d89237f6f
|
469fc3043fc99969d16cee36d299f5944e21225d
|
/plugin.video.D17Replay/default.py
|
9d019f005f3994a8077d1205d57b10bc849a3f43
|
[] |
no_license
|
quatsch/JUL1EN094-xbmc-addons
|
313371d5a37569fa7d6db4bd866fc9d9779640c1
|
907671229ee018962d3a7c291cf8afe3dc0d959c
|
refs/heads/master
| 2021-01-18T11:38:27.451256
| 2014-04-14T17:23:50
| 2014-04-14T17:23:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,346
|
py
|
# -*- coding: utf-8 -*-
# xbmc modules
import xbmc
import xbmcplugin
import xbmcgui
import xbmcaddon
# os and lib modules
import os
import sys
import urllib
import urllib2
import re
# print_exc
from traceback import print_exc
# parseDOM
import CommonFunctions
common = CommonFunctions
common.plugin = "plugin.video.D17Replay"
__addonID__ = "plugin.video.D17Replay"
__author__ = "JUL1EN094"
__date__ = "01-02-2013"
__version__ = "1.0.6"
__credits__ = "Merci aux auteurs des autres addons replay du dépôt Passion-XBMC pour leur inspiration"
__addon__ = xbmcaddon.Addon( __addonID__ )
__settings__ = __addon__
__language__ = __addon__.getLocalizedString
__addonDir__ = __settings__.getAddonInfo( "path" )
# Global Variable
ROOTDIR = __settings__.getAddonInfo('path')
BASE_RESOURCE_PATH = os.path.join( ROOTDIR, "resources" )
MEDIA_PATH = os.path.join( BASE_RESOURCE_PATH, "media" )
ADDON_DATA = xbmc.translatePath( "special://profile/addon_data/%s/" % __addonID__ )
CACHEDIR = os.path.join( ADDON_DATA, "cache")
THUMB_CACHE_PATH = os.path.join( xbmc.translatePath( "special://profile/" ), "Thumbnails", "Video" )
WEBROOT = "http://www.d17.tv"
CANAL_VIDEOINFO_URL = "http://service.canal-plus.com/video/rest/getVideosLiees/"
FANART_PATH = os.path.join( ROOTDIR, "fanart.jpg" )
# List of directories to check at startup
dirCheckList = (CACHEDIR,)
class D17Replay:
"""
main plugin class
"""
debug_mode = False # Debug mode
def __init__( self, *args, **kwargs ):
print "==============================="
print " D17 Replay - Version: %s"%__version__
print "==============================="
print
self.set_debug_mode()
if self.debug_mode:
print "Python version:"
print sys.version_info
print "ROOTDIR: %s"%ROOTDIR
print "ADDON_DATA: %s"%ADDON_DATA
print "CACHEDIR: %s"%CACHEDIR
params = self.get_params()
url = None
name = None
mode = None
iconimage = None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
if self.debug_mode:
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
print "Iconimage: "+str(iconimage)
# Check if directories in user data exist
for i in range(len(dirCheckList)):
self.checkfolder(dirCheckList[i])
if mode==None or url==None or len(url)<1:
if self.debug_mode:
print "GET_CATEGORIES("+WEBROOT+")"
self.GET_CATEGORIES(WEBROOT)
self.clean_thumbnail(str(url))
xbmcplugin.setPluginCategory(handle=int(sys.argv[1]),category=__language__(30000))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==1:
if self.debug_mode:
print "GET_EMISSIONS_DIR : "+url
self.GET_EMISSIONS_DIR(url)
self.clean_thumbnail(str(url))
xbmcplugin.setPluginCategory(handle=int(sys.argv[1]),category=__language__(30000))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==2:
if self.debug_mode:
print "GET_EPISODES("+url+")"
self.GET_EPISODES(url,name)
self.clean_thumbnail(str(url))
xbmcplugin.setPluginCategory(handle=int(sys.argv[1]),category=__language__(30000))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==3:
if self.debug_mode:
print "PLAY_VIDEO"
print "vid :"+str(url)
video_url = self.GET_VIDEO_CANAL(str(url),'d17/')
item = xbmcgui.ListItem(path=video_url)
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=item)
def GET_CATEGORIES(self,url):
soup = self.get_soup(url)
html = soup.decode("iso-8859-1")
main_menu_s = common.parseDOM(html,"ul",attrs={"class":"main-menu"})
if main_menu_s :
main_menu = main_menu_s[0]
li_s = common.parseDOM(main_menu,"li")
for li in li_s :
links = re.findall(u"""<a href="(.*)">(.*)</a>""",li)
if links:
for anchor in links :
if self.debug_mode:
print "categorie : "+anchor[1].encode("utf-8")
self.addDir(anchor[1].encode("utf-8"),WEBROOT+(anchor[0].encode("utf-8")),1,"")
def GET_EMISSIONS_DIR(self,url,iconimage=''): # Olala mal de crâne!!
soup = self.get_soup(url)
html = soup.decode("iso-8859-1")
main_s = common.parseDOM(html,"div",attrs={"id":"main"})
if main_s :
main = main_s[0]
block_videos_s = common.parseDOM (main,"div",attrs={"class":"block-videos"})
for block in block_videos_s :
bvh_titles_s = common.parseDOM(block,"h3",attrs={"class":"bvh-title"})
for bvh in bvh_titles_s :
self.addDir(bvh.encode("utf-8"),url,2,"")
def GET_EPISODES(self,url,name):
xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
soup = self.get_soup(url)
html = soup.decode("iso-8859-1")
main_s = common.parseDOM(html,"div",attrs={"id":"main"})
if main_s :
main = main_s[0]
block_videos_s = common.parseDOM (main,"div",attrs={"class":"block-videos"})
for block in block_videos_s :
bvh_titles_s = common.parseDOM(block,"h3",attrs={"class":"bvh-title"})
for bvh in bvh_titles_s :
if bvh.encode("utf-8")==name :
Mylist = common.parseDOM(block,"ul",attrs={"class":"bv-list MYlist"})[0]
li_s = common.parseDOM(Mylist,"li")
for li in li_s :
episode_vid = common.parseDOM(li,"a",ret="href")[0]
episode_vid = str(re.findall("""\?vid=(.*)""",episode_vid)[0])
episode_name = common.parseDOM(li,"h4")[0].encode("utf-8")
episode_image = common.parseDOM(li,"img",ret="src")[0].encode("utf-8")
self.addLink(episode_name,episode_vid,3,episode_image)
def GET_VIDEO_CANAL(self,vid,canal):
soup = self.get_soup(CANAL_VIDEOINFO_URL+canal+vid)
xml = soup.decode("utf-8")
video_s = common.parseDOM(xml,"VIDEO")
for video in video_s :
id = common.parseDOM(video,'ID') [0]
if str(id) == str(vid) :
video_url = common.parseDOM(video,"HD")[0]
return video_url
def set_debug_mode(self):
debug =__settings__.getSetting('debug')
if debug == 'true':
self.debug_mode = True
else:
self.debug_mode = False
print "D17 Replay: debug Mode:"
print self.debug_mode
def addLink(self,name,url,mode,iconimage,info={},fanart=FANART_PATH):
u =sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
ok =True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('IsPlayable', 'true')
liz.setProperty( "Fanart_Image", fanart)
ok =xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz)
return ok
def addDir(self,name,url,mode,iconimage,info={},fanart=FANART_PATH):
if info == {} :
info = {"Title":name}
u =sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
ok =True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels=info )
liz.setProperty( "Fanart_Image", fanart)
ok =xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def get_params(self):
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def get_soup(self,url):
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 5.1; rv:15.0) Gecko/20100101 Firefox/15.0.1')
req.add_header('Referer',url)
soup = urllib2.urlopen(req).read()
if (self.debug_mode):
print str(soup)
return soup
def checkfolder(self,folder):
try:
if not os.path.exists(folder):
print "checkfolder Impossible to find the directory - trying to create the directory: "+folder
os.makedirs(folder)
except Exception, e:
print "Exception while creating folder "+folder
print str(e)
def clean_thumbnail(self,video_url):
try:
filename = xbmc.getCacheThumbName(video_url)
filepath = xbmc.translatePath(os.path.join(THUMB_CACHE_PATH,filename[0],filename))
if os.path.isfile(filepath):
os.remove(filepath)
if self.debug_mode:
print "Deleted %s thumb matching to %s video"%(filepath,video_url)
elif self.debug_mode:
print "No thumb found %s thumb matching to %s video"%(filepath,video_url)
return True
except:
print "Error: clean_thumbnail()"
print_exc()
return False
#######################################################################################################################
# BEGIN !
#######################################################################################################################
if ( __name__ == "__main__" ):
try:
D17Replay()
except:
print_exc()
|
[
"jujul1en094@gmail.com"
] |
jujul1en094@gmail.com
|
98da7301ee8877e6ff6c1b20ba1b0043c82e30e9
|
a2db2ed8f6e982b4d2d1a743e824964ffa386148
|
/accounts/migrations/0022_auto_20171029_1555.py
|
0956576e79cb70e5a12af42bc44a580d25b2ef54
|
[] |
no_license
|
defydef/forum_board
|
ffae964dc9c877963dc1984a29fff15a9f424e53
|
41a46cb58fdc1757ed9329782aefa105849e9c32
|
refs/heads/master
| 2022-12-24T08:27:23.707497
| 2017-11-19T06:48:45
| 2017-11-19T06:48:45
| 111,266,814
| 0
| 0
| null | 2022-12-08T00:34:52
| 2017-11-19T05:20:05
|
Python
|
UTF-8
|
Python
| false
| false
| 760
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-29 04:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0021_auto_20171028_2111'),
]
operations = [
migrations.RemoveField(
model_name='newskill',
name='category',
),
migrations.RemoveField(
model_name='profile',
name='skillcategory',
),
migrations.AddField(
model_name='profile',
name='skill',
field=models.ManyToManyField(to='accounts.NewSkill'),
),
migrations.DeleteModel(
name='SkillCategory',
),
]
|
[
"devy.f.sihaloho@gmail.com"
] |
devy.f.sihaloho@gmail.com
|
5913c16ac7eff4c10d1d7a3590760b8884e2bfc5
|
f857a029ca13d7bcfa957b75c9d73a39ef10703f
|
/Python Brasil/Estrutura sequencial/2.py
|
c10690064e6703b84eda9058318fc9cddd9c486a
|
[] |
no_license
|
Matheus-Morais/Atividades_treino
|
c011989de9cb1dd74bfae873f191e6af546a740f
|
6fceb1c39a23f992e0845e65e8a76eb53b6ff30d
|
refs/heads/master
| 2023-02-24T00:09:58.064600
| 2021-01-27T14:13:05
| 2021-01-27T14:13:05
| 333,433,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
numero = int(input('Digite um numero:'))
print(numero)
|
[
"matheus2992morais@gmail.com"
] |
matheus2992morais@gmail.com
|
d98e426c5ffa96200e49a63c91cbb1ac43216323
|
220e3fe31f00df908dc8d00c507400425f924cc3
|
/examples/multi_system/act6/unload.py
|
bf0fcc574b45c2f7fcf2d21c030c21e4aa89ff1f
|
[
"MIT"
] |
permissive
|
danielmitterdorfer/Thespian
|
3ed700d9fc6da35becfe801d3ab3bb68c86bddbc
|
f59439df8a6147b90ec31b44924d6a1b620f09d9
|
refs/heads/master
| 2021-01-19T05:06:33.005708
| 2017-07-31T04:44:03
| 2017-07-31T04:44:03
| 65,544,862
| 0
| 0
| null | 2016-08-12T10:22:29
| 2016-08-12T10:22:29
| null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from thespian.actors import ActorSystem, Actor, ValidateSource, ValidatedSource
import sys
portnum = int(sys.argv[1])
srchash = sys.argv[2]
asys = ActorSystem('multiprocTCPBase', {'Admin Port': portnum})
asys.unloadActorSource(srchash)
|
[
"kquick@godaddy.com"
] |
kquick@godaddy.com
|
ab9de07f610e712458e834dd574d3d92370c62d3
|
70b176a173825ba46a3688bb1f7a98046093f201
|
/SongGrapher.py
|
4261c18adf32c72e831e4c889b4fb8d22cbac5f7
|
[] |
no_license
|
ebovio/MusicMiner
|
b767871db4de47ff9e6411deecac1a5707ba68f5
|
8be7ceb9a31e24344b39b3c86ab03a84a4a9060d
|
refs/heads/master
| 2020-05-18T21:57:33.534620
| 2019-08-14T03:54:16
| 2019-08-14T03:54:16
| 184,677,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
import pymongo
import numpy as np
import matplotlib.pyplot as plt
dbPath = 'mongodb://localhost:27017/' #Direccion de la conexion
dbName = 'canciones' # Nombre de la BD
colName = 'lista_canciones' #Nombre de la coleccion
myclient = pymongo.MongoClient(dbPath)
mydb = myclient[dbName]
mycol = mydb[colName]
year = 1957
year_list = np.array([])
average_valence = np.array([])
valenceStats = {
}
while(year<=2018):
for x in mycol.find( {'year': str(year)} ):
if year not in valenceStats:
valenceStats[year] = np.array([])
valenceStats[year] = np.append(valenceStats[year], x['valence'])
else:
valenceStats[year] = np.append(valenceStats[year], x['valence'])
year_list = np.append(year_list,year)
year +=1
for i in year_list:
average_valence = np.append(average_valence, np.average(valenceStats[i]))
print(average_valence)
plt.plot(year_list,average_valence,'ro')
plt.xlabel('Año')
plt.ylabel('Valencia Promedio')
plt.show()
|
[
"e.bovio08@gmail.com"
] |
e.bovio08@gmail.com
|
46e48392571cf7b50609181560a7a5b5cfd54d72
|
1d665f40197ba89f756e862c0e62a889c42cddfb
|
/commission/migrations/0007_auto_20150407_2034.py
|
2b1be1c3a9965aa2314ab05057b9179433f0c7eb
|
[
"MIT"
] |
permissive
|
Ourinternet/website
|
8d9f9ddfe7d17fb0bb11b978cf3a7cd34af456ed
|
648203c0d0620da2d11b3b0e398ee218b5bef5df
|
refs/heads/master
| 2021-01-21T21:49:06.834576
| 2016-03-16T20:43:58
| 2016-03-16T20:43:58
| 15,683,988
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('commission', '0006_auto_20150407_1825'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='link',
field=models.CharField(max_length=1024, null=True, blank=True),
),
]
|
[
"csimpson@cigionline.org"
] |
csimpson@cigionline.org
|
bdc5fa0179d1b979bd63b233f5b2dcf76cf0b4a1
|
4676aae1f14170150782455b8c664a9fb462ba87
|
/lawbot/teledomain/util.py
|
3f5ba3e5d66c3fa8d541bb54717d1c8c7bd1c126
|
[] |
no_license
|
alages97/contract_translation
|
488fdae9bc237a205f7840229943c6bd08c622de
|
adcf2bf91667a9c77912b7695f986731f1b95957
|
refs/heads/master
| 2021-01-16T17:32:49.834527
| 2020-03-12T14:18:37
| 2020-03-12T14:18:37
| 243,198,277
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,204
|
py
|
import os
import sys
import time
import logging
#import win32api
import subprocess
import shutil
from pathlib import Path
import getpass
PATH_DIR = os.path.dirname(os.path.realpath(__file__))
PATH_DIR = r"%s" % PATH_DIR
OUTPUT_DIR = os.path.join(PATH_DIR, "./toTransfer/")
LOG_DIR = os.path.join(PATH_DIR, "./teleLogs/")
MOVE_DIR = os.path.join(PATH_DIR,"./testMoveDir/")
# Generate directories if not found
if not os.path.exists(MOVE_DIR):
os.mkdir(MOVE_DIR)
print("Made DIR %s" % MOVE_DIR)
logging.info('util: Made DIR %s' % MOVE_DIR)
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
print("Made DIR %s" % OUTPUT_DIR)
logging.info('util: Made DIR %s' % OUTPUT_DIR)
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
print("Made DIR %s" % LOG_DIR)
logging.info('util: Made DIR %s' % LOG_DIR)
def replaceMultiple(mainString, toBeReplaced, newString):
for elem in toBeReplaced:
if elem in mainString:
if elem in "<>-:":
newString =""
mainString = mainString.replace(elem,newString)
return mainString
def moveFolder(source,destination):
listsource = os.listdir(source)
print("Moving files: " + str(listsource))
for name in listsource:
if name == "System Volume Information":
continue
else :
logging.info('util: Moving file: %s' % name + ' to '+ destination)
#Use commandshell for windows, and moveFiles for linux
#CommandShell(OUTPUT_DIR + name,destination)
print(OUTPUT_DIR+name)
moveFiles(OUTPUT_DIR+name,destination+"/"+name)
def numOfDir(source):
d = os.listdir(source)
return len(d)
def removeFilesFromFolder():
folder = OUTPUT_DIR
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
logging.info('util: Removing file: %s' % file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
def removeFiles():
files = glob.glob(OUTPUT_DIR)
for f in files:
logging.info('util: Removing file: %s' % f)
os.remove(f)
def CommandShell(folder,destination):
folder = '"'+folder+'"'
destination = '"'+destination+'"'
subprocess.Popen(
[
r"C:\WINDOWS\system32\WindowsPowerShell\v1.0\powershell.exe",
"-ExecutionPolicy",
"Unrestricted",
("Move-Item -Path %s -Destination %s"% (folder,destination)),
]
)
def moveFiles(folder,destination):
#os.rename(folder,destination)
shutil.move(folder,destination)
#os.replace(folder,destination)
def SearchMasterDrive():
#following code for windows, comment out the below LINUX code when using windows
#WINDOWS
# drives = win32api.GetLogicalDriveStrings()
# drives = drives.split('\000')[:-1]
# for drive in drives:
# driveDetails = win32api.GetVolumeInformation(drive)
# driveName = driveDetails[0]
# if "MASTER" not in driveName:
# MOVE_DIR = os.path.join(PATH_DIR,"./testMoveDir/")
# if not os.path.exists(MOVE_DIR):
# os.makedirs(MOVE_DIR)
# logging.info('main: Could not find Master drive, moving files here instead: ' + MOVE_DIR)
# continue
# else:
# MOVE_DIR = drive
# print("Master drive found at %s " % (drive))
# break
# return MOVE_DIR
#LINUX
username = getpass.getuser()
masterPath = '/media/'+username+'/MASTER'
if not os.path.exists(masterPath):
MOVE_DIR = os.path.join(PATH_DIR,"./testMoveDir/")
if not os.path.exists(MOVE_DIR):
os.makedirs(MOVE_DIR)
logging.info('main: Could not find Master drive, moving files here instead: ' + MOVE_DIR)
else :
print("Master drive found at %s " % (masterPath))
MOVE_DIR = masterPath
return MOVE_DIR
|
[
"noreply@github.com"
] |
noreply@github.com
|
044985b9b265586f2b071cc1296c5845a039b17d
|
56b7e5ed6941fc4b83148e00bd51421dc3ac993a
|
/Indeed/Expire Map.py
|
2b1778212c66da456e0bb6bd3e0defd2bbc1db77
|
[] |
no_license
|
samir-0711/Leetcode-Python
|
f960e15015a3f2fd88f723d7f9237945a7133553
|
d75876ae96bcd85c67bbfbf91bbc0f0bc773e97c
|
refs/heads/master
| 2022-12-18T05:27:48.224001
| 2020-09-30T21:03:42
| 2020-09-30T21:03:42
| 300,061,318
| 0
| 0
| null | 2020-09-30T20:59:42
| 2020-09-30T20:59:42
| null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
import time
class Data:
def __init__(self, value, duration):
self.value = value
self.duration = duration
self.startTime = int(round(time.time()))
class ExpireMap:
def __init__(self):
self.map = {}
def get(self, key):
data = self.map[key]
if data == None:
return None
currTime = int(round(time.time()))
if currTime - data.startTime <= data.duration:
return data.value
else:
del data
def set(self, key, value, duration):
data = Data(value, duration)
self.map[key] = data
test1 = ExpireMap()
test1.set(1, 5, 3)
time.sleep(2)
print test1.get(1)
time.sleep(2)
print test1.get(1)
|
[
"weng8916@gmail.com"
] |
weng8916@gmail.com
|
4868059941e7bf0fd7ddb81c0359474f6b1d0a89
|
4e522c82894fafbbd87997f39eff0e63b63df14c
|
/request.py
|
c2e4470b58b1a7f03f2ee0ebbb6e20513bc7bba2
|
[] |
no_license
|
tejas198606/wine-new
|
9a6be88190ce752394e970287682b0e83d15ccd7
|
aa79ed820ac59cc3fd374322a2d076a25b11cbd3
|
refs/heads/master
| 2022-12-04T10:21:44.235146
| 2020-08-30T06:39:49
| 2020-08-30T06:39:49
| 291,415,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
import requests
url = 'http://localhost:5000/predict_api'
r = requests.post(url,json={'fixedacidity':2.0000,'volatileacidity':6.0000,'citricacid':2.00000,'residualsugar':9.00000,'chlorides':6.00000,
'freesulfurdioxide':9.00000,'totalsulfurdioxide':6.00000,'density':20000,'pH':900000,
'sulphates':60000,'alcohol':60000})
print(r.json())
|
[
"noreply@github.com"
] |
noreply@github.com
|
4986a3100e08387b0cd05dec0ec98e374ed7f5c9
|
b3585d5d5379540a06b146c40dd50b424cc2aa6b
|
/leetCode/isSameTree.py
|
c899e335ee0dbf03fbfa752d2ad4d3ef741d4e58
|
[] |
no_license
|
shivamkaushik12007/practice
|
402e839512099a42bd94426a863f71f3e8d4156c
|
6689bc725d3bc58741b9bcb48cada4c276c4853f
|
refs/heads/master
| 2021-07-10T07:25:42.966500
| 2020-09-26T18:00:23
| 2020-09-26T18:00:23
| 200,655,807
| 1
| 2
| null | 2019-08-05T13:25:04
| 2019-08-05T12:58:24
| null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if(p==None and q==None):
return True;
if(p==None or q==None or p.val!=q.val):
return False
return self.isSameTree(p.left,q.left) and self.isSameTree(p.right,q.right)
|
[
"noreply@github.com"
] |
noreply@github.com
|
bee96e8e20e7141cc0a9bfd1c6a79a254632d4a3
|
92a1114f93ec0140fd9c60e93ecb39748dc5ac54
|
/Sample Preparation/gray2rgb.py
|
885e42beab92bb852002f4fefbb9de3ab70636e3
|
[
"Apache-2.0"
] |
permissive
|
A-new/ResNet-Packed-Malware-Detection
|
527f92341591421e4cc103ac5157d80f00885b0e
|
b7f9760285246c2ba4e3e6ce8a3b39c3ffbda52f
|
refs/heads/main
| 2023-06-29T17:18:16.753863
| 2021-07-30T07:06:35
| 2021-07-30T07:06:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
import numpy as np
import cv2
src = cv2.imread("目标路径与文件名", 0)
src_RGB = cv2.cvtColor(src, cv2.COLOR_GRAY2RGB)
cv2.imshow("2rgb", src_RGB)
cv2.imwrite("写入的路径与文件名", src_RGB)
cv2.waitKey(0)
|
[
"noreply@github.com"
] |
noreply@github.com
|
00bde05cade22c41d0a35433fd6cb5452820be66
|
c8cf17465cfaf9858fe79de7d56841564226b67b
|
/Block.py
|
e4178e03c1e4acc98c64e32caf7b2e4c82c9f5db
|
[
"MIT"
] |
permissive
|
bullgom/Network-Project
|
29ebeb6699486ecb3528a05f91592b947c3488bd
|
a5a0ffad006c67c9ddbb769bb1d3c557c15d344d
|
refs/heads/master
| 2020-03-07T21:30:40.895642
| 2018-07-08T09:15:05
| 2018-07-08T09:15:05
| 127,728,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
import pygame as pyg
from GUI.base import BaseWidget
from Locals import *
class Block(BaseWidget):
def __init__(self, pos, size, name, id, imageDirectory, level, anchor=CENTER):
super().__init__(pos,size,anchor=anchor)
self.name = name
self.id = id
self.image = pyg.image.load(imageDirectory).convert_alpha()
self.image = pyg.transform.scale(self.image,size)
self.level = level #If level < 0 then lower than character
def render(self, surface):
surface.blit(self.image, self.as_rect())
|
[
"noreply@github.com"
] |
noreply@github.com
|
c36195265104ac0d70f7475b9cbc3d7d62808045
|
8ed85fda69449832e6edc1ed44694eda8d953e98
|
/ml/GestureRecognizer.py
|
d977e678e5da6740d1f21955df1f58ccdee4c26a
|
[] |
no_license
|
rajeevku02/exp
|
4bad7bb69c3c8a45a11a5136a55d0895349d2d23
|
518e8ddea9a0e0eed37065ce8d4304bd83ca282c
|
refs/heads/main
| 2023-09-04T16:56:02.083630
| 2021-11-24T09:20:47
| 2021-11-24T09:20:47
| 410,766,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
import numpy as np
from tensorflow import keras
from Gestures import *
from geometry import dist
from Util import log, pt
from drag_2_gesture import check_drag_2, deactivate_drag2
from drag_1_gesture import check_drag_1, deactivate_drag1
gestures_names = {
0: 'drag1',
1: 'drag2',
2: 'thumb',
3: 'pinch',
4: 'thumb_index',
5: 'open',
6: 'other'
}
class GestureRecognizer:
def __init__(self):
self.model = keras.models.load_model('models/trained_model')
self.drag1_gesture = Drag1Gesture()
self.drag2_gesture = Drag2Gesture()
self.thumb_gesture = ThumGesture()
self.pinch_gesture = PinchGesture()
self.noop_gesture = Gesture('noop')
def predict(self, landmarks):
arr = []
for item in landmarks:
arr.append(item.x)
arr.append(item.y)
arr.append(item.z)
out = self.model.predict(np.array(arr).reshape([1, -1]))
mx = np.argmax(out, axis=-1)
idx = int(mx[0])
#print(gestures_names[idx])
return idx
def get(self, landmarks):
idx = self.predict(landmarks)
pts = [pt(p) for p in landmarks]
ges = self.check_drag2(idx, pts)
if ges is not None:
deactivate_drag1()
return ges
ges = self.check_drag1(idx, pts)
if ges is not None:
return ges
ges = self.check_thumb(idx, pts)
if ges is not None:
return ges
ges = self.check_pinch(idx, pts)
if ges is not None:
return ges
return self.noop_gesture
def check_pinch(self, idx, pts):
if idx == 3:
return self.pinch_gesture
return None
def check_thumb(self, idx, pts):
if idx == 2:
return self.thumb_gesture
return None
def check_drag1(self, idx, pts):
if not (idx == 0 or idx == 4 or idx == 5):
deactivate_drag1()
return None
if check_drag_1(pts):
return self.drag1_gesture
return None
def check_drag2(self, idx, pts):
if not (idx == 1 or idx == 2 or idx == 4):
deactivate_drag2()
return None
if check_drag_2(pts):
return self.drag2_gesture
return None
|
[
"rajeevku02@gmail.com"
] |
rajeevku02@gmail.com
|
447949c77b5e8715fdf2eafed6ecb92897e81cab
|
f75c0721ab885cec9d269bba798803197cc78787
|
/age_scraper.py
|
f6be723b7c0d9633c5a33100c38a1db7b697ddd3
|
[] |
no_license
|
shravan-shandilya/game-of-death
|
b635a51f327e5bb45d183262bb315eb61aa12418
|
59d45e053031ab9023d7da3d1538212aaace64df
|
refs/heads/master
| 2022-02-11T17:18:14.074438
| 2016-06-22T13:51:41
| 2016-06-22T13:51:41
| 53,967,559
| 1
| 0
| null | 2022-01-13T00:48:38
| 2016-03-15T18:09:21
|
CSS
|
UTF-8
|
Python
| false
| false
| 542
|
py
|
#!/usr/bin/python
from bs4 import BeautifulSoup
import requests
base_url = "http://gameofthrones.wikia.com/wiki/"
char_file = open("char_data.txt","r")
for char in char_file:
char = char.split(",")[0].replace(" ","_")
soup = BeautifulSoup(requests.get(base_url+char).content,"html.parser")
results = soup.find_all("div",{"class":"pi-item pi-data pi-item-spacing pi-border-color"})
for res in results:
try:
if res.h3.contents[0] == "Age":
print char,":",res.div.contents[0],"\n"
except AttributeError:
print char," missing"
|
[
"s.shravan95@gmail.com"
] |
s.shravan95@gmail.com
|
eaff76abf8820739330402fe77b0aff5538045b0
|
cde14b5c9ed4fec317abfee4611af59b4967dbef
|
/team-separation/src/rooms.py
|
218974a62f6c80ca12b291093abf41c2617ff79d
|
[] |
no_license
|
hackohio/judging
|
de949e582b500b0fb0c9989ad85cad4c80645a3a
|
2adba3e2c11daa356ba0f2b3caa73e06860950ea
|
refs/heads/master
| 2020-09-01T08:47:14.179356
| 2019-11-03T12:21:45
| 2019-11-03T12:21:45
| 218,923,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
BALLROOM = 'Grand Ballroom (2nd floor)'
SENATE = 'Senate Chamber (2nd floor)'
TRADITIONS = 'Traditions Room(2nd Floor)'
CARTOON = 'Cartoon Room(3rd Floor)'
|
[
"kelly.wu.98@gmail.com"
] |
kelly.wu.98@gmail.com
|
aa196ae79a573731a31b45d1c19f8134b2e2a7bc
|
effa594367e5760dd2800a0a9707c2f26c3d4bd4
|
/connection.py
|
b767752708bdf4a89b2cd7e67f3026ae08556210
|
[
"MIT"
] |
permissive
|
diogocanut/blockchain-sniffer
|
5e2f5595e7e2f5e283c44c2cbcf4049996d5049d
|
8d14844ee2a508e1d5e931c515a27171832ae5cc
|
refs/heads/master
| 2023-03-28T17:10:53.027427
| 2021-04-03T18:11:02
| 2021-04-03T18:11:02
| 147,882,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,694
|
py
|
# Bitcoin P2P network transactions analyser
#
# This file is based on https://github.com/sebicas/bitcoin-sniffer by @sebicas
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import asyncore
import socket
import struct
import time
from StringIO import StringIO
from serializers import *
from event import Event
class Connection(asyncore.dispatcher):
messagemap = {
"version": msg_version,
"verack": msg_verack,
"addr": msg_addr,
"alert": msg_alert,
"inv": msg_inv,
"getdata": msg_getdata,
"getblocks": msg_getblocks,
"tx": msg_tx,
"block": msg_block,
"getaddr": msg_getaddr,
"ping": msg_ping
}
def __init__(self, host, database):
asyncore.dispatcher.__init__(self)
self.dstaddr = host[0]
self.dstport = host[1]
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = ""
self.recvbuf = ""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.event = Event(database)
vt = msg_version()
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print("\n Blockchain transactions analyzer")
print("Connection to peer: ", self.dstaddr)
try:
self.connect((self.dstaddr, self.dstport))
except:
self.handle_close()
def handle_connect(self):
print("Connection realized\n")
self.state = "connected"
def handle_close(self):
print("Ending connection")
self.state = "closed"
self.recvbuf = ""
self.sendbuf = ""
try:
self.close()
except:
pass
self.__init__
def handle_read(self):
try:
t = self.recv(8192)
except:
self.handle_close()
return
if len(t) == 0:
self.handle_close()
return
self.recvbuf += t
self.got_data()
def readable(self):
return True
def writable(self):
return (len(self.sendbuf) > 0)
def handle_write(self):
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != "\xf9\xbe\xb4\xd9":
raise ValueError("Got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 20:
return
command = self.recvbuf[4:16].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[16:20])[0]
checksum = None
if len(self.recvbuf) < 20 + msglen:
return
msg = self.recvbuf[20:20 + msglen]
self.recvbuf = self.recvbuf[20 + msglen:]
else:
if len(self.recvbuf) < 24:
return
command = self.recvbuf[4:16].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[16:20])[0]
checksum = self.recvbuf[20:24]
if len(self.recvbuf) < 24 + msglen:
return
msg = self.recvbuf[24:24 + msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("Bad checksum {}".format(repr(self.recvbuf)))
self.recvbuf = self.recvbuf[24 + msglen:]
if command in self.messagemap:
f = StringIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
print("Unknown command {}".format(command))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
command = message.command
data = message.serialize()
tmsg = "\xf9\xbe\xb4\xd9"
tmsg += command
tmsg += "\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if self.last_sent + 30 * 60 < time.time():
self.send_message(msg_ping())
if message.command == "version":
if message.nVersion >= 209:
self.send_message(msg_verack())
self.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
self.ver_recv = self.ver_send
elif message.command == "verack":
self.ver_recv = self.ver_send
elif message.command == "inv":
want = msg_getdata()
for i in message.inv:
if i.type == 1:
want.inv.append(i)
elif i.type == 2:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
elif message.command == "tx":
self.event.new_transaction(message.tx)
elif message.command == "block":
self.event.new_block(message.block)
|
[
"diogocanut@hotmail.com"
] |
diogocanut@hotmail.com
|
05b6a07425082a6af963320ba3ad7ce4ae2bf435
|
c885e60f9a86dc636b43bfd28e86162bd6d68806
|
/Students/xml与json数据之间的转换.py
|
f040c1ae36c13cd0a5c7c87322ba7c132fb33c45
|
[] |
no_license
|
zhaopengtian/requesttest
|
4eaa235293447cac39964ab383e77436cd70f81c
|
df3ca7a4ad4bd8b5cf67efbc9aff4ee7ad8242f8
|
refs/heads/master
| 2023-09-05T12:30:39.077101
| 2021-11-08T10:27:55
| 2021-11-08T10:27:55
| 424,950,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
#首先安装xmltodict,python3 -m pip install xmltodict
import xmltodict
import json
#定义XML转Json的函数
def xmltojson(xmlstr):
xmlparse = xmltodict.parse(xmlstr) #parse是XML解析器
jsonstr = json.dumps(xmlparse,indent=2,sort_keys=True)
return jsonstr
#定义Json转XML函数
def jsontoxml(jsonstr):
xmlstr = xmltodict.unparse(jsonstr)
return xmlstr
if __name__ == '__main__':
xmlinfo = """
<student>
<bokeid>fighter006</bokeid>
<bokeinfo>
<cnbologsname>laolu</cnbologsname>
<page>120</page>
</bokeinfo>
<data>
<address>http://www.baidu.com</address>
<title>python+dt+requests</title>
</data>
</student>
"""
aa = {
"student": {
"bokeid": "fighter006",
"bokeinfo": {
"cnbologsname": "laolu",
"page": "120"
},
"data": {
"address": "http://www.baidu.com",
"title": "python+dt+requests"
}
}
}
xtoj = xmltojson(xmlinfo)
print('XML转json:',xtoj)
jtox = jsontoxml(aa)
print('json转XML',jtox)
|
[
"chinaume@163.com"
] |
chinaume@163.com
|
a4df80ef0342700b0d72315bfaa9dafc12385988
|
87666a8920b134f2cd0815c9c127c4fa92e98b1b
|
/rover_project/test/test_reader_read_rover_starting_position.py
|
86d1b9dcfed5a8c3d37cd34b1327ffa5e5edf81b
|
[] |
no_license
|
gacrta/backend-rover-challenge
|
7f6d617eaa3528f3151af8ffdfedb33eb71162d7
|
1d3c690c908e485faeffde912aa73227b0490da4
|
refs/heads/master
| 2020-04-21T05:30:06.572240
| 2019-02-11T09:15:19
| 2019-02-11T09:15:19
| 169,342,584
| 0
| 0
| null | 2019-02-06T01:54:44
| 2019-02-06T01:54:44
| null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
from rover_project import reader
import unittest
class TestReaderReadRoverStartingPosition(unittest.TestCase):
""" Test Class for Reader.read_rover_starting_position
method at reader module.
"""
file_path = 'rover_project/tests/'
def test_reader_read_x_coord(self):
"""
Test if reader gets correct x_coord from
a file that contains 3 1 N information.
"""
filename = TestReaderReadRoverStartingPosition.file_path + "test_reader_rover_pos.txt"
with reader.Reader(filename) as r:
x_coord, y_coord, direction = r.read_rover_starting_position()
self.assertEqual(x_coord, 3)
def test_reader_read_y_coord(self):
"""
Test if reader gets correct y_coord from
a file that contains 3 1 N information.
"""
filename = TestReaderReadRoverStartingPosition.file_path + "test_reader_rover_pos.txt"
with reader.Reader(filename) as r:
x_coord, y_coord, direction = r.read_rover_starting_position()
self.assertEqual(y_coord, 1)
def test_reader_read_direction(self):
"""
Test if reader gets correct direction from
a file that contains 3 1 N information.
"""
filename = TestReaderReadRoverStartingPosition.file_path + "test_reader_rover_pos.txt"
with reader.Reader(filename) as r:
x_coord, y_coord, direction = r.read_rover_starting_position()
self.assertEqual(direction, 'N')
def test_reader_wrong_input(self):
"""
Test if reader avoids wrong input and don't
crash.
"""
filename = TestReaderReadRoverStartingPosition.file_path + "test_reader_wrong_input.txt"
with reader.Reader(filename) as r:
self.assertRaises(ValueError, r.read_upper_right_coordinates)
if __name__ == "__main__":
unittest.main(exit=False)
|
[
"gabrielcrta@gmail.com"
] |
gabrielcrta@gmail.com
|
21aaffec3ed8892eaf0a660128ffde4513149715
|
5ae5026dcbaddf976fa925338fb07d498bcc7f11
|
/ncvoter/wsgi.py
|
f2d906e89e65828a66c8c05f414c5340e5a1110e
|
[
"MIT"
] |
permissive
|
calebsmith/voters-ingestor
|
de947aa0aa6218b077f71fed8acfa2ccc68590ea
|
175a1195ec5b7402bf952ed28cff110e504982bb
|
refs/heads/master
| 2021-07-23T08:14:05.005640
| 2017-11-02T23:40:54
| 2017-11-02T23:40:54
| 109,330,315
| 0
| 0
| null | 2017-11-02T23:42:16
| 2017-11-02T23:42:16
| null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for ncvoter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ncvoter.prod_settings")
application = get_wsgi_application()
|
[
"caleb.smithnc@gmail.com"
] |
caleb.smithnc@gmail.com
|
b891a21e50fd7f9a52706f2b802ad343cca4ea72
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/compute_management_client_enums.py
|
94796a92c7936618c37a51b7bf0ec2a9b37639ee
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707
| 2018-03-29T17:16:15
| 2018-03-29T17:16:15
| 21,287,134
| 1
| 3
|
MIT
| 2019-10-25T15:56:00
| 2014-06-27T19:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class StorageAccountTypes(str, Enum):
standard_lrs = "Standard_LRS"
premium_lrs = "Premium_LRS"
class OperatingSystemTypes(str, Enum):
windows = "Windows"
linux = "Linux"
class DiskCreateOption(str, Enum):
empty = "Empty"
attach = "Attach"
from_image = "FromImage"
import_enum = "Import"
copy = "Copy"
restore = "Restore"
class SnapshotStorageAccountTypes(str, Enum):
standard_lrs = "Standard_LRS"
premium_lrs = "Premium_LRS"
standard_zrs = "Standard_ZRS"
class AccessLevel(str, Enum):
none = "None"
read = "Read"
|
[
"noreply@github.com"
] |
noreply@github.com
|
f8f461746d356c6b4f3a0fdabc67b71a89a74e00
|
1fcd563548cc6c54f40a9a7a8b5629db480aef7d
|
/database_handler.py
|
9ae295caff7f1c18e87e783de23e9ab5ff4b7b4f
|
[] |
no_license
|
benno0810/finance_data_scrapy
|
b350954f6e38033eb3a1be7b2114818ac0fdcca8
|
767d6231a382f5df241eaf58a1e51e7c1b696f82
|
refs/heads/main
| 2023-02-09T04:06:11.599816
| 2020-12-29T05:34:36
| 2020-12-29T05:34:36
| 319,832,871
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,446
|
py
|
import pymongo
import time
class DB():
def __init__(self,db_type='MongoDB',db_address='mongodb://localhost:27017/',db_name='db_test',table_name='col_test'):
self.db_address=db_address
self.db_type=db_type
self.db_name=db_name
self.table_name=table_name
def connect(self):
pass
def insert_one(self):
pass
def delete_one(self):
pass
def test_connection(self):
pass
class ProxyPool_DB(DB):
def __init__(self,db_type='MongoDB',db_address='mongodb://localhost:27017/',db_name='proxy_pool',table_name='proxy_col'):
super().__init__(db_type,db_address,db_name,table_name)
self.client = pymongo.MongoClient(self.db_address)
self.db=self.client[self.db_name]
self.col=self.db[self.table_name]
self.collist=self.db.list_collection_names()
if self.table_name in self.collist:
print('集合已存在,集合名{}'.format(self.table_name))
else:
line={
'ip_address':'127.0.0.1:30300',
'expires_time': time.time()
}
x=self.col.insert_one(line)
print(x)
def test_connection(self):
return True
def insert_one(self,line:dict):
super().insert_one()
if self.test_connection() and line.get('ip_address'):
if not line.get('expires_time'):
#若没有过期时间戳则设置过期时间戳为180秒+
line['expires_time']=time.time()+180
x=self.col.insert_one(line)
print(x)
def delete_many(self,myquery:dict):
x = self.col.delete_many(myquery)
print(x.deleted_count, "个文档已删除")
def delete_one(self,myquery:dict):
super().delete_one()
def find_many(self,myquery:dict):
x=self.col.find(myquery)
return x
def aggregate(self,myquery:list):
x=self.col.aggregate(myquery)
return x
if __name__=='__main__':
db_test = ProxyPool_DB()
line_test={
'ip_address':'127.0.0.1:30031',
'expires_time':time.time()-100
}
#db_test.insert_one(line_test)
myquery={
'ip_address':'127.0.0.1:30031'
}
myquery2={}
#=list(db_test.find_many(myquery2))
x=db_test.col.estimated_document_count()
print(x)
|
[
"benno0810@gmail.com"
] |
benno0810@gmail.com
|
495d0a5cabcec1b443839fa4e8201c4e9afae6dd
|
eefbfe5a3f0d655177fd3c17335ae1100e8398bd
|
/Server/structure/ShellInterface.py
|
1fc7c51d327e3d4260d3c8a63c94a1262f4780f0
|
[] |
no_license
|
mkmagic/BCI_API
|
bd3c92c6162a29f2bfd37322e35c60a9446e1551
|
aef94cc14d65e915dd97ce66d06542a4587d04f6
|
refs/heads/master
| 2020-06-21T22:24:13.059550
| 2020-03-12T11:21:46
| 2020-03-12T11:21:46
| 197,566,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,587
|
py
|
"""
The Shell Interface module, utilizes Python's argparse module to create shell-like programs.
To create a shell-like program, copy the template Interface class provided in this file
and follow the instructions marked by # comments.
Auther: Hayun, Yoav
E-mail: yoavhayun@gmail.com
"""
from __future__ import print_function, with_statement
from abc import ABCMeta, abstractmethod
import os, sys
import xml.etree.ElementTree as ET
import argparse
import platform, traceback , shlex
from datetime import datetime
import threading, traceback
import importlib
import time
import codecs
from .Completer import Completer
try:
import builtins
__builtin__ = builtins
except:
import __builtin__
# try:
# try:
# import readline
# except:
# import pyreadline as readline
# except:
# try:
# from pip import main as pipmain
# except:
# from pip._internal import main as pipmain
# error = pipmain(['install', "pyreadline"])
# if not error:
# try:
# import readline
# except:
# import pyreadline as readline
# else:
# sys.exit(error)
class ShellCompleter(Completer):
def __init__(self, controllerInterface):
self.controller = controllerInterface
super(ShellCompleter, self).__init__()
def delegateControl(self, subparsers, id, interface):
self.enterId = id
parser = subparsers.add_parser(id, add_help=False)
parser.set_defaults(delegate=interface)
interface.completer.head.id = id
interface.completer.head.keywords = [id]
self.cur.addCommand(interface.completer.head)
return self
from structure.colors import colors
from structure.keys import Keys
class ShellInterface():
__metaclass__ = ABCMeta
LAST_EXECUTER_NAME = ["Shell Interface"]
END_CMDS = set(["q", "quit", "exit"])
READ_CMDS = set([".read", ".r"])
FILE_COMMENT = '#'
FILE_COLORS = {code + '#': color for code, color in [i for i in vars(colors.fg).items() if not i[0].startswith('_')]}
@abstractmethod
def buildParser(self):
"""
Builds the Interface's argument parser
"""
pass
def preprocessArguments(self):
"""
Prepocesses the arguments that were passed to the Interface
@Return whether or not the preprocessing was successful
"""
return True
def manageUnparsed(self, unparsed):
"""
Handles the arguments that couldn't be parsed by the Interface's arguments parser
@unparsed List of unparsed arguments
@Return whether or not the parsing was successful
"""
return len(unparsed) == 0
def __init__(self, name, version=None, description=None, logFile="ShellInterface.log", xmlConfiguration=None):
"""
Interface Constructor
@name The name of the interface
@version The current version of the interface
@description A description of the interface
@logFile The default log file of the interface
@xmlConfiguration A path to an XML configuration file, content saved in self.CONF
"""
self.parent = []
self.FLAGS = argparse.Namespace()
self.input = ''
self.XMLParser = ShellInterface.XMLParser("value", "name", lambda v: ShellInterface.XMLParser.extractionCast(v, "type"))
self.CONF = self.loadXmlConfiguration(xmlConfiguration, section=name) if xmlConfiguration else argparse.Namespace()
self.isFile = False
self.success = True
self.logLocks = {}
self.logFile = logFile
self.initLog()
self.keys = None
self.name = name if name else os.path.basename(__file__).split(".")[0]
self.version = version
self.description = "{}{}{}".format(self.name,
' v' + self.version if self.version else '',
': ' + description if description else '')
self.parser = argparse.ArgumentParser(description=self.description, add_help=False, formatter_class=argparse.RawTextHelpFormatter)
self.parser.add_argument("-h", "--help", action='store_true')
self.completer = ShellCompleter(self)
self.buildParser()
with self.completer.branch_out(".read"):
self.completer.branch_out("path", type=self.completer.BranchType.PATH)
self.completer.branch_out("--help")
with self.completer.branch_out(self.FILE_COMMENT, complete=False):
self.completer.branch_out("Line to print" , [])
for colorCode in self.FILE_COLORS:
with self.completer.branch_out(self.FILE_COMMENT + colorCode, hidden=True):
self.completer.branch_out("Line to print", [])
@abstractmethod
def execute(self):
"""
The main method of the Interface.
It's called whenever a shell command is entered or Interface.run() is called with argv.
@Return whether or not the execution was successful
"""
return True
def _close(self):
"""
This method is called whenever the interface closes
"""
self.close()
@abstractmethod
def close(self):
"""
This method is called whenever the interface closes
"""
pass
def initLog(self, logFile=None):
"""
Create an empty a log file.
If the file exists, this will overwrite it.
@logFile If given, will init the given log file and not the default
"""
logFile = logFile if logFile is not None else self.logFile
if logFile is not None:
if(os.path.isfile(logFile)):
os.remove(logFile)
open(logFile, "a").close()
def deleteLog(self, logFile=None):
"""
Deletes a logFile from the disk
@logFile If given, will delete the given log file and not the default
"""
logFile = logFile if logFile is not None else self.logFile
if(os.path.isfile(self.logFile)):
os.remove(self.logFile)
def showLog(self, logFiles=[], logLevel=0, lineNumber=0, online=False, inputHandler=None):
"""
Displays a log file on the screen
@logFiles List of files, If given, will show the given files instead of the default log file
@logLevel Show all log prints with (log level <= given log level)
@lineNumber Display log from a given line number instead of the beginning
@online Whether or not the keep displaying the log as it updates from an external source
until a KeyboardInterrupt event
@inputHandler a handler function to handle incoming input
@Return the last printed line number
"""
logFiles = logFiles if len(logFiles) > 0 else [self.logFile]
try:
if inputHandler is not None:
prompt = self.getPrompt()
if len(logFiles) == 1:
prompt = colors.bold
prompt += os.path.split(logFiles[0])[-1].split('.')[0]
prompt += "> " + colors.reset
inputHandler = ShellInterface.InputHandler(prompt, inputHandler, self.keys)
printers = {}
for logFile in logFiles:
if online:
printers[logFile] = ShellInterface.LogPrinter(logFile, lineNumber)
printers[logFile].start(logLevel)
else:
with open(logFile, 'r') as log:
[log.readline() for i in range(self.lineNumber)]
ShellInterface.LogPrinter.printLog(log, logLevel)
while(True):
if inputHandler is not None and not inputHandler.isWorking:
break
time.sleep(0)
except KeyboardInterrupt:
pass
finally:
if inputHandler is not None:
inputHandler.stop()
for printer in printers:
printers[printer].stop()
@staticmethod
def tryExecution(task, maxTries, expecting=Exception):
tries = 0
while(tries < maxTries):
try:
task()
return True
except expecting:
tries += 1
return False
@staticmethod
def _logMsgTask(logFile, descriptor, message):
with open(logFile, 'a') as log:
log.write("{} {}\n".format(descriptor, message))
def log(self, message, logFile=None, logLevel=0, error=False, id=None, timestamp=None, maxTries=1):
"""
This method prints a message to the log file
@message The message to log
@logFile If given, will print to the given file instead of the default log file
@logLevel The minimal logLevel needed to display this message
@error Whether or not this message is an error message
@id An id of what produced this message
@timestamp Whether or not to include a timestamp in the log print
"""
logFile = logFile if logFile is not None else self.logFile
if logFile is not None:
if logFile not in self.logLocks:
self.logLocks[logFile] = threading.Lock()
message = "{}".format(message) if error else message
descriptor = "{}::".format(logLevel)
descriptor = "{}[{}]".format(descriptor, timestamp) if timestamp is not None else descriptor
descriptor = "{}[{}]".format(descriptor, id) if id is not None else descriptor
descriptor = "{} ERROR: ".format(descriptor) if error else descriptor
with self.logLocks[logFile]:
logTask = lambda : ShellInterface._logMsgTask(logFile, descriptor, message)
if not ShellInterface.tryExecution(logTask, maxTries, PermissionError):
self.log("Unable to log message in '{}': {}".format(logFile, message.strip()), error=True)
def __str__(self):
"""
@Return a description of the interface
"""
return self.description
def readCommands(self, file):
"""
Executes argument lines from a file
@file Path to file containing argument lines to be executed by the interface
@Return whether or not the execution was successful
"""
try:
if os.path.isfile(file):
lines = []
with open(file, mode='r') as f:
lines = f.readlines()
self.isFile = True
self.__shell(inputLines=lines)
self.isFile = False
else:
ShellInterface.printError("'{}' is not a file".format(file))
except:
ShellInterface.printError("Could not read file '{}'".format(file))
self.isFile = False
return False
return self.success
def __createFlags(self):
"""
Creates self.FLAGS for the Interface
@Return whether or not the creation of flags was successful
"""
self.__unparsed = []
try:
mem = {}
if hasattr(self.FLAGS, "MEMORY"):
for arg in self.FLAGS.MEMORY:
if hasattr(self.FLAGS, arg):
mem[arg] = getattr(self.FLAGS, arg)
self.FLAGS, self.__unparsed = self.parser.parse_known_args(args=self.input, namespace=self.FLAGS)
for arg in self.FLAGS.MEMORY:
if not arg in mem:
mem[arg] = self.FLAGS.MEMORY[arg]
if arg in mem:
if not hasattr(self.FLAGS, arg) or getattr(self.FLAGS, arg) is None:
setattr(self.FLAGS, arg, mem[arg])
except SystemExit:
if int(str(sys.exc_info()[1])) != 0:
self.success = False
return False
return True
def __processArgs(self):
if not self.manageUnparsed(self.__unparsed):
ShellInterface.printError("The arguments {} are unknown".format(self.__unparsed))
if self.isFile:
self.success = False
return False
if not self.preprocessArguments():
ShellInterface.printError("Failed in preprocessing of '{}'.".format(self.inputLine.strip()))
if self.isFile:
self.success = False
return False
return True
def __resetFlags(self):
"""
Resets self.FLAGS of the Interface
"""
for arg in self.FLAGS.__dict__:
if arg == 'MEMORY':
continue
if hasattr(self.FLAGS, 'MEMORY') and arg not in self.FLAGS.MEMORY:
setattr(self.FLAGS, arg, None)
def runLine(self, line):
"""
Parse and execute a single argument line
@line argument line to parse and execute
@Return whether or not the execution was successful
"""
ShellInterface.LAST_EXECUTER_NAME.append(self.name)
isLastLine = False
self.__resetFlags()
self.inputLine = line
self.input = shlex.split(line, posix=(platform.system()!='Windows'))
if self.inputLine.startswith(self.FILE_COMMENT):
toPrint = self.inputLine[1:].strip()
availableColors = [k for k in vars(colors.fg).items() if not k[0].startswith('_')]
for code in self.FILE_COLORS:
if toPrint.lower().startswith(code):
toPrint = self.FILE_COLORS[code] + toPrint[len(code):].strip() + colors.reset
break
print(toPrint)
elif len(self.input) > 0:
if self.input[0] in ShellInterface.END_CMDS and not self.isFile:
isLastLine = True
elif self.input[0] in ShellInterface.READ_CMDS:
expArgs = 2
if len(self.input) < expArgs:
ShellInterface.printError("Read command accepts a path as an argument.")
else:
self.readCommands(' '.join(self.input[1:]))
else:
if self.__createFlags():
if hasattr(self.FLAGS, "delegate") and self.FLAGS.delegate:
hasKeys = self.keys is not None
if hasKeys: self.keys.close()
self.callOtherInterface(self.FLAGS.delegate ,self.input[1:])
#if hasKeys: self.keys = Keys(self.name, intro=self.getUsage())
elif self.FLAGS.help:
self.parser.print_help()
else:
if self.__processArgs():
self.success = self.execute()
return isLastLine
def getUsage(self):
usage = ''
usage += colors.fg.yellow + '\n'
usage += self.description + '\n'
usage += colors.reset
usage += "\tTo exit, enter one of the following {}\n".format([cmd for cmd in ShellInterface.END_CMDS])
usage += "\tto read commands from a file, enter one of the following {}\n".format([cmd for cmd in ShellInterface.READ_CMDS])
usage += colors.bold + '\n'
usage += "\tTip: At any time, add '-h' flag to the command for help.\n"
usage += colors.reset
return usage
def printUsage(self):
"""
Prints the welcome usage information of the interface
"""
print(self.getUsage())
def setMarkerView(self):
sys.stdout.write("\033[2A")
sys.stdout.flush()
def unsetMarkerView(self):
sys.stdout.write("\033[2B")
sys.stdout.flush()
def getPrompt(self, parent=[]):
shellPromptMsg = "{}> ".format('\\'.join(parent + [self.name]))
return colors.bold + shellPromptMsg + colors.reset
def __shell(self, inputLines=None):
"""
Runs the Interface as a shell program
@parent the name of the parent Interface
@inputLines a pre set list of input lines
@Return whether or not the last input line was successful
"""
if not self.isFile:
self.keys = Keys(self.name, intro=self.getUsage())
self.printUsage()
try:
shellPromptMsg = self.getPrompt(self.parent)
while inputLines is None or len(inputLines) > 0:
if inputLines is None:
print()
try:
inputLine = inputLines.pop(0) if inputLines else self.keys.readInput(shellPromptMsg, self.completer)
except EOFError:
break
try:
lastLine = self.runLine(inputLine)
if lastLine:
break
if not self.success:
if self.isFile:
ShellInterface.printError("Command Failed, Aborting execution from file")
break
else:
ShellInterface.printError("Command Failed")
self.success = True
except SystemExit:
if int(str(sys.exc_info()[1])) != 0:
raise
except:
traceback.print_exc()
sys.exit(1)
finally:
if not self.isFile:
self.keys.close()
return self.success
def loadXmlConfiguration(self, xml, section=None):
"""
Loads an XML configuration file into the interface.
@xml A path to an XML file
@section Specify to load a specific section in the XML only
@Return an argparse Namespace containing the values extracted from XML
XML Structure:
section : Groups arguments together
name - name of the section
[Content] - 'import', 'value' and 'group' elements
import : Includes another section in the current section
section - section name to import
[Content] - None
value : Holds a value for the interface to use
name - Access name for the value
type - A casting method to apply on the given string value
[Content] - The value to store
group : groups several values together
name - Access name for the group
[Content] - 'value' elements
XML Example:
<root>
<section name="A">
<group name="A_Group1">
<value name="Arg1">value for A.A_Group1.Arg1</value>
<value name="Arg2">value for A.A_Group1.Arg2</value>
</group>
</section>
<section name="B">
<import section="A"/> <!--Access 'B.A.A_Group1.Arg1' and 'B.A.A_Group1.Arg2'-->
<value name="Arg1">value for B.Arg1</value>
</section>
</root>
"""
return self.XMLParser.loadXml(xml, section)
def run(self, argv=None, parent=[]):
"""
Runs the Interface
@argv include argv list to be executed by the given Interface
omit argv list to pass control to the given Interface
# First arg is expected to be the call command
@parent the name of the parent Interface
@Return whether or not the parsing was successful
"""
try:
self.parent = parent
if argv and len(argv) > 1:
self.runLine(' '.join(argv))
return self.success
else:
retValue = self.__shell()
self._close()
return retValue
except SystemExit:
self._close()
if int(str(sys.exc_info()[1])) != 0:
raise
def callOtherInterface(self, other, argv=None):
"""
Calls another Interface
@other An Interface instance
@argv argv list as expected by the Interface's run method
@Return whether or not the call returned success
"""
return other.run(argv, self.parent + [self.name])
@staticmethod
def printError(error):
"""
Prints an error
@argv error error message
"""
executer = ShellInterface.LAST_EXECUTER_NAME.pop() if len(ShellInterface.LAST_EXECUTER_NAME) > 0 else "Shell Interface"
print(colors.fg.lightred + "\n[{}] Error: {}".format(executer, error) + colors.reset)
class LogPrinter:
def __init__(self, log, lineNumber):
self.log = log
self.lineNumber = lineNumber
def start(self, logLevel=0):
self.isWorking = True
self.worker = threading.Thread(target=self.run, args=[logLevel])
self.worker.start()
def stop(self):
self.isWorking = False
self.worker.join()
def run(self, logLevel):
with open(self.log, 'r') as log:
[log.readline() for i in range(self.lineNumber)]
while(self.isWorking):
ShellInterface.LogPrinter.printLog(log, logLevel=logLevel)
@staticmethod
def printLog(logFile, logLevel=0):
content = logFile.readline()
if content:
content = content.split("::")
if len(content) == 2:
level, content = content[0], content[1]
if logLevel >= int(level):
print(content, end='')
class InputHandler:
def __init__(self, prompt, handlerFunction, keys):
self.prompt = prompt
self.handlerFunction = handlerFunction
self.keys = keys
self.isWorking = True
self.worker = threading.Thread(target=self.run, args=[])
self.worker.start()
def stop(self):
self.isWorking = False
self.worker.join()
def run(self):
print()
while(self.isWorking):
inputline = self.keys.readInput(self.prompt, hideInputLine=True)
if inputline.strip() in ShellInterface.END_CMDS:
self.isWorking = False
break
self.handlerFunction(inputline)
class XMLParser():
XML = argparse.Namespace(
section = argparse.Namespace(tag="section", id="name"),
include = argparse.Namespace(tag="import", id="section"),
group = argparse.Namespace(tag="group", id="name")
)
def __init__(self, valueTitle, valueId, valueExtractMethod=None):
if valueExtractMethod is None:
valueExtractMethod = lambda value: value.text
self.value = argparse.Namespace(title=valueTitle,
id=valueId,
extractMethod=valueExtractMethod)
@staticmethod
def castValue(value, castDescription):
module = __builtin__
if '.' in castDescription:
modulePath = '.'.join(castDescription.split('.')[0:-1])
try:
module = importlib.import_module(modulePath)
except:
modulePath = modulePath.split('.')
for i in range(0, len(modulePath)):
module = getattr(module, modulePath[i])
method = castDescription.split('.')[-1]
return getattr(module, method)(value)
@staticmethod
def extractionCast(valueElement, castId):
"""
Casts a value in a given XML element to it's specified type
@valueElement XML element that has a text value and a 'type' attribute
@Return the casting of the text value to it's specified type
"""
if castId in valueElement.attrib:
return ShellInterface.XMLParser.castValue(valueElement.text, valueElement.attrib[castId])
return valueElement.text
def _appendNamespace(self, namespace, id, value):
namespace._ORDER.append(id)
setattr(namespace, id, value)
return namespace
def _createNamespaceFromXmlRoot(self, xml, root, history):
"""
Creates a new namespace containing values specified under a given XML root elemment
@xml A path to an XML file
@root The XML element containing values to parse out
@history Holds already visited sections
@Return an argparse Namespace containing the values extracted from XML
"""
namespace = argparse.Namespace(_ORDER=[])
for section in root.findall(self.XML.include.tag):
id = section.attrib[self.XML.include.id]
namespace = self._appendNamespace(namespace, id, self._loadXml(xml, id, history))
for value in root.findall(self.value.title):
id = value.attrib[self.value.id]
namespace = self._appendNamespace(namespace, id, self.value.extractMethod(value))
for group in root.findall(self.XML.group.tag):
groupId = group.attrib[self.XML.group.id]
namespace = self._appendNamespace(namespace, groupId, OrderedDict())
for value in group.findall(self.value.title):
groupValues = getattr(namespace, groupId)
groupValues[value.attrib[self.value.id]] = self.value.extractMethod(value)
return namespace
def _loadXml(self, xml, section=None, history=[]):
"""
Loads an XML configuration file into the interface.
@xml A path to an XML file
@section Specify to load a specific section in the XML only
@history Holds already visited sections
@Return an argparse Namespace containing the values extracted from XML
"""
tree = ET.parse(xml)
root = tree.getroot()
if section:
if section not in history:
history.append(section)
for sec in root.findall(self.XML.section.tag):
if sec.attrib[self.XML.section.id].upper() == section.upper():
return self._createNamespaceFromXmlRoot(xml, sec, history[:])
else:
print("ERROR: Found a circular import in XML file: '{}'".format(xml))
return None
else:
return self._createNamespaceFromXmlRoot(xml, root, history)
# We got a non existing section to read
return argparse.Namespace()
def loadXml(self, xml, section):
"""
Loads an XML file as an argparse.Namespace
@xml A path to an XML file
@section Specify to load a specific section in the XML only
@Return an argparse Namespace containing the values extracted from XML
XML Structure:
section : Groups arguments together
name - name of the section
[Content] - 'import', 'value' and 'group' elements
import : Includes another section in the current section
section - section name to import
[Content] - None
value : Holds a value for the interface to use
name - Access name for the value
type - A casting method to apply on the given string value
[Content] - The value to store
group : groups several values together
name - Access name for the group
[Content] - 'value' elements
XML Example:
<root>
<section name="A">
<group name="A_Group1">
<value name="Arg1">value for A.A_Group1.Arg1</value>
<value name="Arg2">value for A.A_Group1.Arg2</value>
</group>
</section>
<section name="B">
<import section="A"/> <!--Access 'B.A.A_Group1.Arg1' and 'B.A.A_Group1.Arg2'-->
<value name="Arg1">value for B.Arg1</value>
</section>
</root>
"""
return self._loadXml(xml, section, history=[])
"""
Interface Template Class
"""
###############################################################################
### Copy the entire code found below to start a new Shell Interface program ###
###############################################################################
import os, sys
from structure.ShellInterface import ShellInterface
class Interface(ShellInterface):
NAME = os.path.basename(__file__).split(".")[0] # Default is current file's name
VERSION = "1.0.0.0"
DESCRIPTION = 'A template Interface class' # Interface Short Description
def buildParser(self):
"""
Builds the Interface's argument parser
"""
# Add the arguments to self.parser (argparse.ArgumentParser type)
# use to keep values of arguments saved between commands at runtime.
self.parser.set_defaults(MEMORY={}) # dict: {[argument dest name] : [default value]}.
def __init__(self):
"""
Interface Constructor
"""
super(Interface, self).__init__(self.NAME, self.VERSION, description=self.DESCRIPTION)
def preprocessArguments(self):
"""
Prepocesses the arguments that were passed to the Interface
@Return whether or not the preprocessing was successful
"""
# Preprocess received arguments, stored in self.FLAGS (argparse namespace)
return super(Interface, self).preprocessArguments() # Return preprocessing result (bool)
def manageUnparsed(self, unparsed):
"""
Handles the arguments that couldn't be parsed by the Interface's arguments parser
@unparsed list of unparsed arguments
@Return whether or not the parsing was successful
"""
# Handle unparsed arguments (str list)
return super(Interface, self).manageUnparsed(unparsed) # Return parsing result (bool)
# Main Method
def execute(self):
"""
The main method of the Interface.
It's called whenever a shell command is entered or Interface.run() is called with argv.
@Return whether or not the execution was successful
"""
# Use self.FLAGS to access the parsed arguments (argparse namespace)
# Use self.input to access the given arguments (str list)
return True # Return execution result (bool)
def close(self):
"""
This method is called whenever the interface closes
@Return whether or not the execution was successful
"""
if __name__ == "__main__":
Interface().run(sys.argv)
|
[
"michaelkanon1@gmail.com"
] |
michaelkanon1@gmail.com
|
992d0d89d6bb0e793cbf80caa45bb759cd343dba
|
58674e0ea4f0faa70892db30627fda006c3dc478
|
/Beginner/1060_positive_numbers.py
|
8e603faa7151ff993363d4fcdc22e248597d36bf
|
[] |
no_license
|
ShekhRaselMasrurAhmmadNissan/URI-Online-Judge
|
fe0f176987f63dc342d741de34c52b10edb3f6f6
|
1554d12a0338850ba1f07f401633390815e505b2
|
refs/heads/main
| 2023-02-23T10:36:42.935212
| 2021-01-25T14:21:26
| 2021-01-25T14:21:26
| 325,745,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
# Reading the Data...
numbers = list()
for serial_number in range(0, 6):
numbers.append(float(input()))
# Checking the conditions...
positive_number_count = 0
for number in numbers:
if (number >= 0):
positive_number_count += 1
print(f'{positive_number_count} valores positivos')
|
[
"shekhraselmasrurahmmadnissan@gmail.com"
] |
shekhraselmasrurahmmadnissan@gmail.com
|
6d9a899cc5415e40329693b80d3cc1bbf9759db2
|
a257bf65a2a1ba2c6841dd25c89d98c5672e4e57
|
/BackEnd/Semana22/DjangoRestFramework/DjangoRestFramework/wsgi.py
|
424593130b609b9f268eda5e5d98d2c974645dad
|
[] |
no_license
|
jorgegarba/CodiGo9
|
190cb67e3c7f9cbad271baf62657bda7ca03ec42
|
3b85c36a3ed8d2d5ee1d0fb6e8ca18599621fe47
|
refs/heads/master
| 2023-01-22T22:31:00.244982
| 2020-03-31T17:59:37
| 2020-03-31T17:59:37
| 211,982,487
| 6
| 5
| null | 2023-01-05T05:23:27
| 2019-10-01T00:21:25
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
"""
WSGI config for DjangoRestFramework project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoRestFramework.settings')
application = get_wsgi_application()
|
[
"ederiveroman@gmail.com"
] |
ederiveroman@gmail.com
|
5a00b73020577be86d6b7c9f68827501ec2be3eb
|
fafb5b817011892be9a824a4693bae58cebd5f04
|
/app/routes/auth/__init__.py
|
e3db8201ab12cfce7b750fd3cbb96e4e2952274a
|
[] |
no_license
|
vincentscode/Data-Studio
|
4c3f94a9d8bdf10bf08136637cb6c8ba162eeb0a
|
65d6b665a7b6ceef2ef388c96f6b6f6661fee2ce
|
refs/heads/master
| 2020-07-28T09:43:49.805922
| 2019-12-01T15:06:16
| 2019-12-01T15:06:16
| 209,384,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
from . import GoogleAuth
|
[
"vincentscode@gmail.com"
] |
vincentscode@gmail.com
|
6f9219124cdf28edd912b1cbde65e7ea17aece30
|
7b315bbe8c85ce05e6c51112e985ae1b392d83f5
|
/desafio_calcipv4/__init__.py
|
52688559769bae42e626eb8e42a779ae27f16e24
|
[] |
no_license
|
Cica013/aprendendoPython
|
e9f993b1b144e294a338a53f2bc36673d3cd00a6
|
9c964f2322e3d52b39a811aceec64b169bab4e10
|
refs/heads/main
| 2023-08-10T20:12:47.640239
| 2021-10-06T21:01:19
| 2021-10-06T21:01:19
| 385,755,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
from classes.calcipv4 import CalcIpv4
calc_ipv4 = CalcIpv4(ip='192.168.0.1', mascara='255.255.255.0')
|
[
"61808853+Cica013@users.noreply.github.com"
] |
61808853+Cica013@users.noreply.github.com
|
0bebf2b16ff727c6ad6f1d7aca0f42970ec1dc48
|
bed559d18b0a9604e6d18879e1f3837d228d1440
|
/rx/backpressure/pausable.py
|
631ce64e952fd6f555f3e9866c6f605c96299a8e
|
[
"Apache-2.0"
] |
permissive
|
jesonjn/RxPY
|
a80b7a8f0a3a8a6ddcb7f3ed678d2f8411cad84e
|
9dfb62979f2c54b93bbb8c0ee5fa18cfae4d73d0
|
refs/heads/master
| 2020-12-29T00:25:17.866220
| 2014-11-15T10:24:05
| 2014-11-15T10:24:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,932
|
py
|
from six import add_metaclass
from rx import Observable
from rx.internal import ExtensionMethod
from rx.disposables import CompositeDisposable, Disposable
from rx.subjects import Subject
class PausableObservable(Observable):
def __init__(self, source, subject=None):
self.source = source
self.subject = subject or Subject()
self.is_paused = True
super(PausableObservable, self).__init__(self.subscribe)
def subscribe(self, observer):
conn = self.source.publish()
subscription = conn.subscribe(observer)
connection = [Disposable.empty()]
def on_next(b):
if b:
connection[0] = conn.connect()
else:
connection[0].dispose()
connection[0] = Disposable.empty()
pausable = self.subject.distinct_until_changed().subscribe(on_next)
return CompositeDisposable(subscription, connection[0], pausable)
def pause(self):
if self.is_paused:
return
self.is_paused = True
self.subject.on_next(False)
def resume(self):
if not self.is_paused:
return
self.is_paused = False
self.subject.on_next(True)
@add_metaclass(ExtensionMethod)
class ObservablePausable(Observable):
"""Uses a meta class to extend Observable with the methods in this class"""
def pausable(self, pauser):
"""Pauses the underlying observable sequence based upon the observable
sequence which yields True/False.
Example:
pauser = rx.Subject()
source = rx.Observable.interval(100).pausable(pauser)
Keyword parameters:
pauser -- {Observable} The observable sequence used to pause the
underlying sequence.
Returns the observable {Observable} sequence which is paused based upon
the pauser."""
return PausableObservable(self, pauser)
|
[
"dag@brattli.net"
] |
dag@brattli.net
|
747a7cb3b08db83515a6456c6b9e5af1e2d0e703
|
c818b186c3e76f6d3c5edb8e2e30a04c2a1b99a9
|
/early-bird-impl/early_bird/wc_gc.py
|
48f2ab50c70b1d0126a8320643a5e33f7ebb09b6
|
[] |
no_license
|
imperialguy/algc
|
3048e31e16e19ea195797d4935111e8238244455
|
3d71210e6fd0e33249bfa461473da2fa79fff433
|
refs/heads/master
| 2021-03-24T09:32:59.492479
| 2017-05-05T21:52:25
| 2017-05-05T21:52:25
| 82,343,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,901
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 17 14:12:31 2016
@author: mcontrac
"""
import ConfigParser
import math
import numpy
import pandas
from helpers import setup_logging
from helpers import get_discount_amount
from helpers import get_dnb_scores
from helpers import get_sic_major_group
from helpers import round_down
from model_builder import GLMModel
# User inputs
user_duns_number = 608201141
user_division = 21
user_is_uslh = False
user_sic_code = '0111'
user_effective_date = pandas.datetime(2016, 9, 1)
user_total_projected_payroll = 10000000
user_estimated_clerical_payroll_ratio = 0.3
user_estimated_clerical_payroll = user_estimated_clerical_payroll_ratio * user_total_projected_payroll
user_estimated_non_clerical_payroll = user_total_projected_payroll - user_estimated_clerical_payroll
user_experience_mod = 0.97
input_data = pandas.DataFrame({'state': ['AK', 'CT', 'MD', 'KY', 'CA', 'CA', 'DE', 'AK'],
'class_code': ['6504', '4720', '2039', '6504', '8810', '6504', '0953', '9139'],
'payroll': [4000000, 500000, 1000000, 100000, 1000000, 200000, 200000, 0]})
input_history = pandas.DataFrame({'years_before': [1, 2, 3], 'ind_claim_count': [2, 2, 2], 'med_claim_count': [26, 19, 14]})
def read_rate_lookup(filename, is_uslh):
"""Reads the data from the rate_lookup.csv file into a pandas DataFrame
The rate_lookup.csv file should contain the columns called ``state``,
``class_code``, ``final_rate``, ``final_rate_uslh`` and ``clerical_ind``.
If the input division is 58-USLH, the ``final_rate`` column is dropped and
the ``final_rate_uslh`` column is renamed to final_rate.
Otherwise the ``final_rate_uslh`` column is dropped.
Args:
**is_uslh**: Boolean indicator whether the division is 58-USLH or not
Return:
A pandas DataFrame object with the state, class_code and final_rate
columns
"""
rate_lookup = pandas.read_csv(filename, index_col='lookup_key')
if is_uslh:
rate_lookup.drop('final_rate', axis=1, inplace=True)
rate_lookup.rename(columns={'final_rate_uslh': 'final_rate'}, inplace=True)
else:
rate_lookup.drop('final_rate_uslh', axis=1, inplace=True)
return rate_lookup
def read_discount_lookup(filename):
"""Reads the discount lookup data for the specifiec NCCI table number
Args:
**filename**: csv file from which to read the NCCI data
Return:
A pandas DataFrame containing the bucket as the index and the discount
rates for each bucket
"""
return pandas.read_csv(filename)
def read_state_rate_need_lookup(filename, division, effective_date, is_uslh):
"""Reads the fixed and variable rate need data for the input division and
effective date
The is_uslh indicator is only applicable to division 58. For all other
divisions, the indicator is assumed to be False regardless of input.
Args:
**filename**: csv file containing the state rate need data\n
**division**: The user input division\n
**effective_date**: The user input effective date\n
**is_uslh**: Boolean indicator for which division 58 rates to lookup
Return:
A pandas DataFrame with columns state, variable_rate_need,
fix_rate_need and indicated_loss_ratio
"""
state_rate_need = pandas.read_csv(filename, parse_dates=['effective_date', 'expiration_date'], infer_datetime_format=True)
def keep_row(index):
return (state_rate_need['division'][index] == division
and state_rate_need['effective_date'][index] <= effective_date <= state_rate_need['expiration_date'][index]
and state_rate_need['uslh_ind'][index] == is_uslh)
return state_rate_need.select(keep_row).drop(['division', 'uslh_ind', 'effective_date', 'expiration_date'], axis=1)
def read_wcng_loss_ratio_lookup(filename, division, is_uslh):
"""Reads the WCNG average loss ratio for the division by state
The is_uslh indicator is only applicable to division 58. For all other
divisions, the indicator is assumed to be False regardless of input.
Args:
**filename**: csv file containing the WCNG loss ratio data\n
**division**: The user input division\n
**is_uslh**: Boolean indicator for which division 58 rates to lookup
Return:
A pandas DataFrame with columns state and avg_wcng_loss_ratio
"""
wcng_loss_ratio = pandas.read_csv(filename)
def keep_row(index):
return (wcng_loss_ratio['division'][index] == division) and (wcng_loss_ratio['uslh_ind'][index] == is_uslh)
return wcng_loss_ratio.select(keep_row).drop(['division', 'uslh_ind'], axis=1)
def read_cdf(filename, state):
"""Reads the CDFs for prior three years
Args:
**filename**: csv file containing the CDF data\n
**state**: The state for which CDFs are to be read
Return:
A pandas DataFrame with columns ``prior_year`` and ``cdf``. Prior
year refers to number of years prior to current year.
"""
cdf_data = pandas.read_csv(filename)
cdf_data['inverse_cdf'] = 1 / cdf_data['cdf']
if state in cdf_data['state'].unique():
return cdf_data[cdf_data['state'] == state].drop('state', axis=1)
else:
return cdf_data[cdf_data['state'].isnull()].drop('state', axis=1)
def get_monopolistic_states():
"""Returns a list of state codes that are monopolistic states"""
return ['ND', 'OH', 'WA', 'WY']
def get_t9_states():
"""Returns a list of state codes that require T9 discount rates"""
return ['AZ', 'FL', 'IA', 'ID', 'MA', 'NJ']
def merge_rate_lookup(input_data, rate_lookup_table):
"""Merges the ``clerical_ind`` and ``class_rate`` from the rate lookup to
the input
The function also calculates the class premium ,non-clerical and clerical
payrolls for each input entry and also calculates the overall average
clerical and non-clerical rates for the input provided. The function also
adds the columns ``class_rate``, ``clerical_ind``, ``payroll_non_clerical``
and ``payroll_clerical`` columns to the input data.
Args:
**input_data**: The state, class code and payroll data input by the
user as a DataFrame\n
**rate_lookup_table**: The rates for calculating the class premium
percents from payroll
Return:
A dictionary containing the average clerical rate (``avg_clerical_rate``) and
the average non-clerical rate (``avg_non_clerical_rate``)
"""
input_data['class_rate'] = input_data.apply(lambda row: rate_lookup_table['final_rate'][row['lookup_key']], axis=1)
input_data['clerical_ind'] = input_data.apply(lambda row: rate_lookup_table['clerical_ind'][row['lookup_key']], axis=1)
input_data['class_premium'] = input_data['payroll'] * input_data['class_rate']
input_data['payroll_non_clerical'] = input_data['payroll'] * (1 - input_data['clerical_ind'])
input_data['payroll_clerical'] = input_data['payroll'] * input_data['clerical_ind']
avg_clerical_rate = sum(input_data['payroll_clerical'] * input_data['class_rate']) / input_data['payroll_clerical'].sum()
avg_non_clerical_rate = sum(input_data['payroll_non_clerical'] * input_data['class_rate']) / input_data['payroll_non_clerical'].sum()
return {'avg_clerical_rate': avg_clerical_rate, 'avg_non_clerical_rate': avg_non_clerical_rate}
def merge_wcng_lr_rate_need(payrolls, division, effective_date, is_uslh,
rate_need_file, wcng_lr_file):
"""Merges the payrolls data to the WCNG loss ratio and rate need data
Note that this function returns a separate DataFrame object instead of
merging inplace
Args:
**payrolls**: DataFrame containing the allocation ratio of each state\n
**division**: The user input division\n
**effective_date**: The user input effective date\n
**is_uslh**: Boolean indicator for which division 58 rates to lookup\n
**rate_need_file**: csv file containing the state rate need data\n
**wcng_lr_file**: csv file containing the WCNG loss ratio data
Return:
A pandas DataFrame with all columns from ``payrolls`` along with
``avg_wcng_loss_ratio``, ``variable_rate_need``, ``fix_rate_need`` and
``indicated_loss_ratio`` columns
"""
wcng_lr_data = read_wcng_loss_ratio_lookup(wcng_lr_file, division, is_uslh)
rate_need_data = read_state_rate_need_lookup(rate_need_file, division, effective_date, is_uslh)
return payrolls.merge(wcng_lr_data, how='left', on='state').merge(rate_need_data, how='left', on='state')
def calc_payroll_ratio(input_data):
"""Calculates the non-clerical and clerical payrolls for each state
The function modifies the input dataframe and calculates the non-clerical
payroll and clerical payroll columns for each row. It then calculates the
total non-clerical and clerical payroll for each state and returns that as
a DataFrame.
Args:
**input_data**: DataFrame containing the class premium, net, clerical
and non-clerical payrolls for each state and class code
Return:
A pandas DataFrame with total class premium, net, non-clerical and
clerical payrolls by state, and the ratio of non-clerical payroll for
each state where the clerical payroll is missing
"""
payrolls = input_data.groupby(by='state', as_index=False, sort=False).agg({'class_premium': 'sum',
'payroll': 'sum',
'payroll_non_clerical': 'sum',
'payroll_clerical': 'sum'})
payrolls['payroll_non_clerical_only'] = payrolls.apply(lambda row: row['payroll_non_clerical'] if row['payroll_clerical'] == 0 else 0,
axis=1)
total_non_clerical = payrolls['payroll_non_clerical_only'].sum()
payrolls['state_non_clerical_ratio'] = payrolls['payroll_non_clerical_only'] / total_non_clerical
payrolls.drop('payroll_non_clerical_only', axis=1, inplace=True)
return payrolls
def calc_allocate_clerical_payroll(payrolls, user_estimated_clerical_payroll):
"""Allocates the unentered clerical payroll to states based on non-clerical
payroll ratio
Uses the calculated non-clerical payroll ratio to allocate clerical payroll
that was not entered by the user based on the user entered total estimated
clerical payroll. The method modifies the payrolls DataFrame in place by
adding the ``allocated_clerical_payroll`` column
Args:
**payrolls**: DataFrame containing the allocation ratio of each state\n
**user_estimated_clerical_payroll**: User input total estimated
clerical payroll
"""
entered_clerical_payroll = payrolls['payroll_clerical'].sum()
clerical_payroll_to_be_allocated = max(0, user_estimated_clerical_payroll - entered_clerical_payroll)
payrolls['allocated_clerical_payroll'] = payrolls['state_non_clerical_ratio'] * clerical_payroll_to_be_allocated
def calc_clerical_class_premium(payrolls, rate_lookup_table):
"""Calculates the clerical class premium based on the allocated clerical
payroll
Determines the clerical rate to use from the rate table and calculates the
class premium for clerical payroll based on the allocated clerical payroll.
Modifies the payrolls DataFrame in place by adding the ``clerical_rate``
and ``allocated_clerical_class_premium`` columns
Args:
**payrolls**: DataFrame containing the allocated clerical payroll for
each state\n
**rate_lookup_table**: Table containing the rate for each state and
class code, with an boolean indicator for clerical vs non-clerical rate
"""
clerical_rates = rate_lookup_table.loc[rate_lookup_table['clerical_ind'] == 1].set_index('state')
payrolls['clerical_rate'] = payrolls['state'].map(clerical_rates['final_rate'])
payrolls['allocated_clerical_class_premium'] = payrolls['clerical_rate'] * payrolls['allocated_clerical_payroll']
def calc_standard_premium(payrolls, user_experience_mod):
"""Calculates the standard premium for each state
If a state is monopolistic, the experience mod is 1 else it is the user
input experience mod. Monopolistic states are determined by the
``get_monopolistic_states()`` function. Modifies the payrolls DataFrame
in place by adding the ``experience_mod``, ``standard_premium`` and
``standard_premium_ratio`` columns
Args:
**payrolls**: DataFrame containing the class premium by each state\n
**user_experience_mod**: User input experience mod factor
"""
monopolistic_states = get_monopolistic_states()
payrolls['experience_mod'] = payrolls.apply(lambda row: user_experience_mod if row['state'] not in monopolistic_states else 1, axis=1)
payrolls['standard_premium'] = payrolls['experience_mod'] * payrolls['class_premium']
total_standard_premium = payrolls['standard_premium'].sum()
payrolls['standard_premium_ratio'] = payrolls['standard_premium'] / total_standard_premium
def calc_missing_standard_premium(payrolls, avg_rates, user_experience_mod):
"""Returns the missing standard premiums to be allocated across the states
Args:
**payrolls**: DataFrame containing the clerical and non-clerical
payroll by state\n
**avg_rates**: Dictionary containing the average clerical and
non-clerical rates for input\n
**user_experience_mod**: User input experience mod factor
Return:
The total standard premium that is missing based on the inputs
"""
missing_clerical_payroll = max(0, user_estimated_clerical_payroll - payrolls['payroll_clerical'].sum())
missing_non_clerical_payroll = max(0, user_estimated_non_clerical_payroll - payrolls['payroll_non_clerical'].sum())
allocated_clerical_class_premium = payrolls['allocated_clerical_class_premium'].sum()
unknown_clerical_class_premium = (allocated_clerical_class_premium
if allocated_clerical_class_premium > 0
else avg_rates['avg_clerical_rate'] * missing_clerical_payroll)
unknown_non_clerical_class_premium = missing_non_clerical_payroll * avg_rates['avg_non_clerical_rate']
missing_clerical_standard_premium = unknown_clerical_class_premium * user_experience_mod
missing_non_clerical_standard_premium = unknown_non_clerical_class_premium * user_experience_mod
return missing_clerical_standard_premium + missing_non_clerical_standard_premium
def calc_allocated_standard_premium(payrolls, standard_premium_to_allocate):
"""Calcualtes the allocated the standard premiums for each state
Distributes the missing standard premium to each state based on the
standard premium ratio, and adds the calculated standard premium for the
state to get the final allocated standard premium for the state. The
function modifies the payrolls DataFrame in place by adding a
``allocated_standard_premium`` column
Args:
**payrolls**: DataFrame containing the standard premium value and ratio
for each state\n
**standard_premium_to_allocate**: The missing standard premium that
needs to be distributed among the states
"""
payrolls['allocated_standard_premium'] = (payrolls['standard_premium']
+ (payrolls['standard_premium_ratio'] * standard_premium_to_allocate))
def calc_premium_discount(payrolls, other_loadings, ncci_tier_files):
"""Calculates the premium discount to be applied to each state
Reads the discount tables for NCCI state groups (currently only 7 and 9)
and calculates the discount for each bucket within that group, totals it
and puts it as ``premium_discount`` column in the ``payrolls`` DataFrame.
The function also calculates the manual rate for each state as
``manual_rate`` column in the payrolls DataFrame.
Args:
**payrolls**: DataFrame containing the allocated standard premium for
each state\n
**other_loadings**: Other loadings factor for the rate calculations\n
**ncci_tier_files**: A dict containing the NCCI tier number as key, and
the filename as the value
"""
ncci_table7 = read_discount_lookup(ncci_tier_files[7])
ncci_table9 = read_discount_lookup(ncci_tier_files[9])
t9_states = get_t9_states()
def __discount_amount_helper(row):
if row['state'] in t9_states:
table = ncci_table9
else:
table = ncci_table7
return get_discount_amount(row['allocated_standard_premium'], table)
payrolls['premium_discount'] = payrolls.apply(__discount_amount_helper, axis=1)
payrolls['manual_rate_pre_model'] = (1 + other_loadings) * (payrolls['allocated_standard_premium'] - payrolls['premium_discount'])
payrolls['manual_rate'] = (1 + other_loadings) * (payrolls['standard_premium'] - payrolls['premium_discount'])
def calc_normalized_claim_counts(input_history, predom_state, aqi_data,
total_class_premium, cdf_file):
"""Calculates the normalized indemnity and medical claim counts and ratio
Uses the user input claim count history and the reference CDFs
to calculate the normalized claim counts for the last 3 years,
and calculates the indemnity to medical claim count ratio using
the credibilty and global average from AQI profitability studies.
Claim counts are calculated as 2 * claim count in prior year + claim counts
in two years before that. CDF adjusted premium is also calculated similarly.
Normalized claim counts are calculated by dividing the claim counts by the
CDF adjusted premium in millions. The indemnity to medical claim ratio is
calculated by adding the average respective claim frequency times the
credibility (as obtained from AQI profitability study) to the claim counts,
and then taking the ratio.
Args:
**input_history**: User input claim count history DataFrame\n
**predom_state**: State whose CDFs are used\n
**aqi_data**: A dictionary containing the keys ``credibility``,
``avg_indemenity_frequency_3yrs`` and ``avg_medical_frequency_3yrs``\n
**total_class_premium**: Class premium value to use to calculate
CDF adjusted premium\n
**cdf_file**: csv file containing the CDF data
Return:
A pandas DataFrame containing the ``indemnity_claim_count``,
``medical_claim_count``,``cdf_adjusted_premium``,
``norm_indemnity_claim_count``, ``norm_medical_claim_count``
and ``indemnity_medical_ratio`` as keys, with their corresponding values
"""
__calc_claim_count = lambda column: input_history[column].sum() + input_history[input_history['years_before'] == 1][column]
__norm_claim_count = lambda value, premium: value / (premium / 1000000)
credibility = aqi_data['credibility']
avg_indemnity_frequency_3yrs = aqi_data['avg_indemnity_frequency_3yrs']
avg_medical_frequency_3yrs = aqi_data['avg_medical_frequency_3yrs']
cdfs = read_cdf(cdf_file, predom_state)
cdfs['cdf_premium'] = cdfs['inverse_cdf'] * total_class_premium
cdf_premium_3yrs = cdfs['cdf_premium'].sum() + cdfs.loc[cdfs['prior_year'] == 1]['cdf_premium'].sum()
indemnity_claim_count = __calc_claim_count('ind_claim_count')
medical_claim_count = __calc_claim_count('med_claim_count')
norm_indemnity_claim_count = __norm_claim_count(indemnity_claim_count, cdf_premium_3yrs)
norm_medical_claim_count = __norm_claim_count(medical_claim_count, cdf_premium_3yrs)
indemnity_medical_ratio = ((indemnity_claim_count + (credibility * avg_indemnity_frequency_3yrs)) /
(medical_claim_count + (credibility * avg_medical_frequency_3yrs)))
return pandas.DataFrame.from_dict(data={'indemnity_claim_count': indemnity_claim_count,
'medical_claim_count': medical_claim_count,
'cdf_adjusted_premium': cdf_premium_3yrs,
'norm_indemnity_claim_count': norm_indemnity_claim_count,
'norm_medical_claim_count': norm_medical_claim_count,
'indemnity_medical_ratio': indemnity_medical_ratio
}, orient='columns')
def calc_entered_payroll_ratios(input_data):
"""Calculates the entered clerical and non-clerical payroll ratios
Entered clerical payroll ratio is defined as the clerical payroll entered
divided by the total projected payroll. Max is 1.
Entered non-clerical payroll ratio is defined as the non-clerical payroll
entered divided the non-clerical payroll estimated. The estimated non-clerical
payroll ratio is
``1 - max(entered_clerical_payroll_ratio, user_estimated_clerical_payroll_ratio)``
If this is 0, the entered non-clerical payroll ratio is 0. Otherwise, max is
1.
Args:
**input_data**: User input state, class code and payroll data after
clerical and non-clerical payrolls have been calculated
Return:
A dictionary containing the entered ratios with keys as ``clerical`` and
``non_clerical``
"""
entered_clerical_payroll_ratio = min(1, input_data['payroll_clerical'].sum() / user_total_projected_payroll)
estimated_non_clerical_payroll_ratio = 1 - max(entered_clerical_payroll_ratio, user_estimated_clerical_payroll_ratio)
if estimated_non_clerical_payroll_ratio > 0:
estimated_total_non_clerical_payroll = estimated_non_clerical_payroll_ratio * user_total_projected_payroll
entered_non_clerical_payroll_ratio = min(1, input_data['payroll_non_clerical'].sum() / estimated_total_non_clerical_payroll)
else:
entered_non_clerical_payroll_ratio = 0
return {'clerical': entered_clerical_payroll_ratio,
'non_clerical': entered_non_clerical_payroll_ratio}
def calc_diamond_bound_ratios(entered_clerical_payroll_ratio, entered_non_clerical_payroll_ratio,
bound_ratios_filename):
"""Calculates the upper and lower bound ratios for the diamond
Args:
**entered_clerical_payroll_ratio**: The ratio of clerical payroll to the
total payroll entered\n
**entered_non_clerical_payroll**: The ratio of non clerical payroll
entered to the non clerical payroll estimated\n
**bound_ratios_filename**: csv file containing the bound ratios for each
division
Return:
A tuple whose 0th element is the lower bound ratio, and 1st element
is the upper bound ratio. If ratios cannot be calculated, both are
``numpy.NaN``
"""
if 0.5 < entered_non_clerical_payroll_ratio < 1:
base_ratio = entered_non_clerical_payroll_ratio
elif 0.5 < entered_clerical_payroll_ratio < 1 and user_estimated_clerical_payroll_ratio == 1:
base_ratio = entered_clerical_payroll_ratio
else:
return (numpy.NaN, numpy.NaN)
bounds_base = (base_ratio - round_down(base_ratio, 1)) * 10
bound_ratios = pandas.read_csv(bound_ratios_filename)
bounds = bound_ratios.select(lambda ix: bound_ratios['ratio_lower_cap'][ix] < base_ratio <= bound_ratios['ratio_upper_cap'][ix]
).to_dict('records')[0]
return ((bounds_base * bounds['lower_bound_delta']) + bounds['lower_bound_ratio'],
(bounds_base * bounds['upper_bound_delta']) + bounds['upper_bound_ratio'])
def check_inputs(input_data, entered_ratios):
"""Checks whether inputs can be used by model for scoring
Args:
**input_data**: User input state, class code and payroll data after
clerical and non-clerical payrolls have been calculated\n
**entered_ratios**: The entered ratios dictionary returned by
``calc_entered_payroll_ratios(input_data)``
Return:
A tuple whose 0th element indicates whether inputs are usable or not,
and if not, the 1st element provides the reason
"""
if input_data['payroll'].sum() > (user_total_projected_payroll + 100):
return (False, 'Input payroll exceeds total projected payroll')
if input_data['payroll_clerical'].sum() > (user_total_projected_payroll * (user_estimated_clerical_payroll_ratio + 0.01)):
return (False, 'Clerical payroll entry exceeds total clerical payroll estimate')
estimated_non_clerical_payroll_ratio = 1 - max(entered_ratios['clerical'], user_estimated_clerical_payroll_ratio)
if input_data['payroll_non_clerical'].sum() > (user_total_projected_payroll * (estimated_non_clerical_payroll_ratio + 0.01)):
return (False, 'Non-clerical payroll entry exceeds total non-clerical payroll estimate')
if ((user_estimated_clerical_payroll_ratio == 1 and entered_ratios['clerical'] > 0.6) or
(user_estimated_clerical_payroll_ratio < 1 and entered_ratios['non_clerical'] > 0.6)):
return (True, '')
return (False, 'Not enough payroll data entered')
def run_model(model_inputs, model_coefficients_filename, rules_dict):
"""Runs the model based on the provided inputs
Builds a GLMModel object from the external coefficients, loads the rules
to convert apply the model coefficients based on the inputs and then runs
the model based on the inputs provided.
Args:
**model_inputs**: A dictionary or DataFrame containing the variables
required by the model as keys\n
**model_coefficients_filename**: Path to file containing the model
coefficients for the Worker's Comp GC model\n
**rules_dict**: Dictionary with lambda functions to derive the features
used by the model from the input variables
Return:
The predicted loss ratio for the account
"""
wc_gc_model = GLMModel(pandas.read_csv(model_coefficients_filename))
wc_gc_model.load_rules(rules_dict)
return math.exp(wc_gc_model.prep_data_and_score(model_inputs.iloc[0])[0])
def main_wc_gc_model():
config = ConfigParser.ConfigParser()
config.read('config/model_config.config')
app_log = setup_logging('wc_gc_logger', config.get('logger', 'log_file_name'))
app_log.info('Scoring DUNS number: %d' % user_duns_number)
rate_lookup_table = read_rate_lookup(config.get('data_files', 'rate_lookup'), user_is_uslh)
input_data['lookup_key'] = input_data['state'] + input_data['class_code']
avg_rates = merge_rate_lookup(input_data, rate_lookup_table)
entered_ratios = calc_entered_payroll_ratios(input_data)
inputs_valid, reason = check_inputs(input_data, entered_ratios)
if not inputs_valid:
return (numpy.NaN, numpy.NaN, numpy.NaN, reason)
payrolls = calc_payroll_ratio(input_data)
calc_allocate_clerical_payroll(payrolls, user_estimated_clerical_payroll)
calc_clerical_class_premium(payrolls, rate_lookup_table)
calc_standard_premium(payrolls, user_experience_mod)
standard_premium_to_allocate = calc_missing_standard_premium(payrolls, avg_rates, user_experience_mod)
calc_allocated_standard_premium(payrolls, standard_premium_to_allocate)
calc_premium_discount(payrolls, config.getfloat('constants', 'other_loadings'),
eval(config.get('data_files', 'ncci_tier_files')))
state_rate_data = merge_wcng_lr_rate_need(payrolls, user_division, user_effective_date, user_is_uslh,
config.get('data_files', 'state_rate_need_lookup'),
config.get('data_files', 'wcng_lr'))
credit_scores = get_dnb_scores(user_duns_number,
default_credit_score_pct=config.get('constants', 'default_duns_cs_pct'),
default_financial_score_pct=config.get('constants', 'default_duns_fs_pct'))
total_class_premium = input_data['class_premium'].sum()
predom_state = input_data.groupby(by='state')['class_premium'].sum().idxmax(axis=1)
model_inputs = calc_normalized_claim_counts(input_history, predom_state, eval(config.get('aqi', 'aqi_data')),
total_class_premium, config.get('data_files', 'cdf_file'))
model_inputs['credit_score_pct'] = credit_scores['credit_score_pct']
model_inputs['financial_score_pct'] = credit_scores['financial_score_pct']
model_inputs['payroll'] = user_total_projected_payroll
model_inputs['major_group'] = get_sic_major_group(user_sic_code)
predicted_lr = run_model(model_inputs, config.get('data_files', 'model_coefficients_file'),
eval(config.get('model_rules', 'rules')))
state_rate_data['target_pricing_deviation_factor'] = (((predicted_lr / state_rate_data['avg_wcng_loss_ratio'])
* state_rate_data['variable_rate_need'])
+ state_rate_data['fix_rate_need'])
state_rate_data['estimated_premium'] = state_rate_data['target_pricing_deviation_factor'] * state_rate_data['manual_rate_pre_model']
output_midpoint = state_rate_data['estimated_premium'].sum()
lower_ratio, upper_ratio = calc_diamond_bound_ratios(entered_ratios['clerical'], entered_ratios['non_clerical'],
config.get('data_files', 'bound_ratios'))
return (output_midpoint * lower_ratio, output_midpoint, output_midpoint * upper_ratio, '')
|
[
"ven.karri@aig.com"
] |
ven.karri@aig.com
|
8707c6732a08d5007fe5f72f81bc5b3ae3317802
|
2a27d1c04b86fc32afea72cb4df12848f4a39078
|
/VNOI/pnumber.py
|
f8ec7a2caefa926ccafeffb8080595d84b7963b4
|
[] |
no_license
|
ngctnnnn/Competitive-Programming
|
9b68d3d30bdb8c0b258708b0c70005a037f2d01a
|
461d715720d4cdf88c0c79011c2aa873fb9e189c
|
refs/heads/main
| 2023-07-02T09:07:26.138500
| 2021-08-03T12:50:37
| 2021-08-03T12:50:37
| 321,263,350
| 4
| 0
| null | 2021-02-22T14:53:03
| 2020-12-14T07:11:07
|
C++
|
UTF-8
|
Python
| false
| false
| 394
|
py
|
def prime(x):
if x == 2 or x == 3:
return True
elif x % 2 == 0 or x < 2:
return False
else:
for i in range(3, x, 2):
if i*i > x:
break
if x % i == 0:
return False
return True
x, y = map(int, input().split())
for i in range(x, y + 1):
if prime(i) is True:
print(i)
|
[
"noreply@github.com"
] |
noreply@github.com
|
1cf78c07b6bdc205a1bca76933f89a3c6e6c2fd3
|
83e472f89c48a2793fa244f573a032bae80ba6bb
|
/Dragomir Robert-Simion/camera_app/blueprints/main/routes.py
|
965401d01a0abb990fe0e376f4b7a57201dcecfd
|
[] |
no_license
|
rodicadp/mobile-2020
|
992293d516a47cfe78a13b63fff7b1e9a4b475bd
|
9c3ceb82c1b4ec5b1e75af2a884c611990164e74
|
refs/heads/master
| 2020-12-27T19:47:27.515452
| 2020-02-03T19:13:21
| 2020-02-03T19:13:21
| 238,030,034
| 0
| 0
| null | 2020-02-03T18:12:56
| 2020-02-03T18:12:55
| null |
UTF-8
|
Python
| false
| false
| 2,700
|
py
|
import os
from flask_login import login_required
from flask import render_template, redirect, url_for, request, flash, Blueprint, session
from sqlalchemy import exc
from camera_app import db
from camera_app.blueprints.main.forms import Form_Photo
from camera_app.blueprints.main.models import Photo
main = Blueprint('main', __name__, template_folder='templates', static_folder='static', static_url_path='/static')
def iterate_pages(table):
return table.iter_pages(left_edge=2, right_edge=2, left_current=2, right_current=2)
def upload_photo(form_file):
if type(form_file) == str:
name = form_file
else:
name = form_file.filename
file_path = os.path.join(main.root_path, 'static', name)
form_file.save(file_path)
return name
@main.route("/edit_photo/<int:id>", methods=['GET', 'POST'])
@main.route('/add_photo', methods=['GET', 'POST'])
@login_required
def add_photo(id=None):
form = Form_Photo()
if id is not None: photo = Photo.query.get_or_404(id)
if request.method == 'GET':
form.process(request.args)
if id is not None:
form.description.data = photo.description
form.photo.data = photo.photo
if form.validate_on_submit():
try:
if id is not None:
photo.description = form.description.data
photo.photo = upload_photo(form.photo.data)
db.session.commit()
flash('Success!', 'success')
return redirect(url_for('main.photo', id=photo.id))
else:
row = Photo(description=form.description.data, photo=upload_photo(form.photo.data))
db.session.add(row)
db.session.commit()
return redirect(url_for('main.photos'))
flash('Success!', 'success')
except exc.IntegrityError as e:
flash(f'Error: {e}', 'danger')
return render_template('add_photo.html', title='Add a photo', form=form)
@main.route("/photo/<int:id>")
@login_required
def photo(id):
session['photo'] = id
return render_template('photo.html', photo=Photo.query.get_or_404(id))
@main.route("/delete_photo/<int:id>")
@login_required
def delete_photo(id):
db.session.delete(Photo.query.get_or_404(id))
db.session.commit()
return redirect(url_for('main.photos'))
@main.route('/', methods=['GET', 'POST'])
@main.route("/photos")
@login_required
def photos():
page = request.args.get('page', 1, type=int)
return render_template('photos.html', title='Photos', photos=Photo.query.paginate(per_page=5, page=page))
|
[
"noreply@github.com"
] |
noreply@github.com
|
e2053a52894b2dba4c8f3b3e5598d763d3246c1e
|
809b59be1161be7cf19a483ff1154fe2c8eda794
|
/loss_from_log.py
|
687abcf780272687c083b35408390555e7da9ff6
|
[] |
no_license
|
qzhao/train-CRF-RNN
|
d444bdc434424c20e98a68ca408a935cdb1b575c
|
9af2ce367e34f9d3d12df55701ad14757b908d58
|
refs/heads/master
| 2020-12-29T00:55:42.557635
| 2016-03-10T09:21:49
| 2016-03-10T09:21:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,642
|
py
|
#!/usr/bin/env python
# Martin Kersner, 2016/01/13
from __future__ import print_function
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
from utils import strstr
def main():
log_files = process_arguments(sys.argv)
train_iteration = []
train_loss = []
test_iteration = []
test_loss = []
test_accuracy = []
pixel_accuracy = []
mean_accuracy = []
mean_IU = []
frequency_weighted_IU = []
base_test_iter = 0
base_train_iter = 0
for log_file in log_files:
with open(log_file, 'rb') as f:
if len(train_iteration) != 0:
base_train_iter = train_iteration[-1]
base_test_iter = test_iteration[-1]
for line in f:
# TRAIN NET
if strstr(line, 'Iteration') and strstr(line, 'lr'):
matched = match_iteration(line)
train_iteration.append(int(matched.group(1))+base_train_iter)
elif strstr(line, 'Train net output'):
matched = match_loss(line)
train_loss.append(float(matched.group(1)))
elif strstr(line, 'pixel_accuracy'):
matched = re.search(r'pixel_accuracy: (.*)', line)
pixel_accuracy.append(float(matched.group(1)))
elif strstr(line, 'mean_accuracy'):
matched = re.search(r'mean_accuracy: (.*)', line)
mean_accuracy.append(float(matched.group(1)))
elif strstr(line, 'mean_IU'):
matched = re.search(r'mean_IU: (.*)', line)
mean_IU.append(float(matched.group(1)))
elif strstr(line, 'frequency_weighted'):
matched = re.search(r'frequency_weighted: (.*)', line)
frequency_weighted_IU.append(float(matched.group(1)))
# TEST NET
elif strstr(line, 'Testing net'):
matched = match_iteration(line)
test_iteration.append(int(matched.group(1))+base_test_iter)
elif strstr(line, 'Test net output'):
matched = match_loss(line)
if matched:
test_loss.append(float(matched.group(1)))
else:
matched = match_accuracy(line)
test_accuracy.append(float(matched.group(1)))
print("TRAIN", train_iteration, train_loss)
print("TEST", test_iteration, test_loss)
print("ACCURACY", test_iteration, test_accuracy)
# loss
plt.plot(train_iteration, train_loss, 'k', label='Train loss')
plt.plot(test_iteration, test_loss, 'r', label='Test loss')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Number of iterations')
plt.savefig('loss.png')
# evaluation
plt.clf()
plt.plot(range(len(pixel_accuracy)), pixel_accuracy, 'k', label='pixel accuracy')
plt.plot(range(len(mean_accuracy)), mean_accuracy, 'r', label='mean accuracy')
plt.plot(range(len(mean_IU)), mean_IU, 'g', label='mean IU')
plt.plot(range(len(frequency_weighted_IU)), frequency_weighted_IU, 'b', label='frequency weighted IU')
plt.legend(loc=0)
plt.savefig('evaluation.png')
def match_iteration(line):
return re.search(r'Iteration (.*),', line)
def match_loss(line):
return re.search(r'loss-ft = (.*) \(', line)
def match_accuracy(line):
return re.search(r'seg-accuracy = (.*)', line)
def process_arguments(argv):
print(argv)
if len(argv) < 2:
help()
log_files = argv[1:]
return log_files
def help():
print('Usage: python loss_from_log.py [LOG_FILE]+\n'
'LOG_FILE is text file containing log produced by caffe.'
'At least one LOG_FILE has to be specified.'
'Files has to be given in correct order (the oldest logs as the first ones).'
, file=sys.stderr)
exit()
if __name__ == '__main__':
main()
|
[
"m.kersner@gmail.com"
] |
m.kersner@gmail.com
|
79b21ab5c4ba6fadd6e18c4bc14248a109112bf2
|
e008b7ec16cbcffb5368bb1685d44b75c4019a44
|
/DeepVO/deepvo_net.py
|
bfb623602caa9d49fad86c837efe32080b09047b
|
[] |
no_license
|
akhanian/VisualOdometry
|
df02d03c031901f3b746e8e77a574a0f319f9acd
|
03bd99fa2312687cd67b159a20afa72ae15ba4c4
|
refs/heads/master
| 2023-01-06T08:41:24.802547
| 2020-11-11T05:10:29
| 2020-11-11T05:10:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
# -*- coding: utf-8 -*-
"""
Created by etayupanta at 6/30/2020 - 21:10
__author__ = 'Eduardo Tayupanta'
__email__ = 'eduardotayupanta@outlook.com'
"""
# Import Libraries:
from tensorflow import keras
from tensorflow.keras import layers
class DeepVONet(keras.Model):
def __init__(self):
super(DeepVONet, self).__init__()
self.reshape = keras.layers.Reshape((-1, 10 * 3 * 1024))
self.lstm1 = layers.LSTM(1000, dropout=0.5, return_sequences=True)
self.lstm2 = layers.LSTM(1000, dropout=0.5)
self.dropout = layers.Dropout(0.5)
self.out = layers.Dense(6)
def call(self, inputs, is_training=False):
x = self.reshape(inputs)
x = self.lstm1(x)
x = self.lstm2(x)
x = self.dropout(x, is_training)
x = self.out(x)
return x
|
[
"wetayupanta@gmail.com"
] |
wetayupanta@gmail.com
|
770781cf8434a6484eb3418aafba1bd504f0315d
|
1a819b4d69a7c455199b638b1609d3284ecbf255
|
/alttprbot_srl/racebot.py
|
c760ffc28d30de0301fd73fb1bf3fb04a1d6a28b
|
[] |
no_license
|
Maxor14/sahasrahbot
|
5167355a23a4e9d91171b583fe8065acd0ab99a6
|
9183933869f87743d94867cf52c463179d0b687a
|
refs/heads/master
| 2021-05-22T21:30:54.015013
| 2020-04-01T01:01:47
| 2020-04-01T01:01:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,951
|
py
|
import asyncio
import math
import re
import ircmessage
from alttprbot.database import spoiler_races, srl_races
from alttprbot.tournament import league
from alttprbot.util.srl import srl_race_id
from alttprbot_srl import alt_hunter, discord_integration
from config import Config as c
starting = re.compile(
"\\x034\\x02The race will begin in 10 seconds!\\x03\\x02")
go = re.compile("\\x034\\x02GO!\\x03\\x02")
newroom = re.compile(
"Race initiated for (.*)\. Join\\x034 (#srl-[a-z0-9]{5}) \\x03to participate\.")
runnerdone = re.compile(
"(.*) (has forfeited from the race\.|has finished in .* place with a time of [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\.)")
racedone = re.compile(
"^Status: Complete \| Game: .*$"
)
srl_game_whitelist = [
'The Legend of Zelda: A Link to the Past Hacks',
'A Link to the Past & Super Metroid Combo Randomizer'
]
async def topic_change_handler(target, source, message, client):
if not (source == 'RaceBot' or source == 'synack'):
return
if target.startswith('#srl-') and racedone.search(message):
await asyncio.sleep(5)
await league.process_league_race_finish(target, client)
async def handler(target, source, message, client):
if not (source == 'RaceBot' or source == 'synack'):
return
srl_id = srl_race_id(target)
if target == '#speedrunslive':
result = newroom.search(message)
if result and result.group(1) in srl_game_whitelist:
if not c.DEBUG:
await asyncio.sleep(1)
await client.join(result.group(2))
await asyncio.sleep(60)
await client.message(result.group(2), "Hi! I'm SahasrahBot, your friendly robotic elder and ALTTPR/SMZ3 seed roller. To see what I can do, visit https://sahasrahbot.synack.live")
else:
print(f'would have joined {result.group(2)}')
if target.startswith('#srl-'):
if starting.match(message) or message == 'test starting':
race = await srl_races.get_srl_race_by_id(srl_id)
if race:
if not client.in_channel(target):
await client.join(target)
await client.message(target, f".setgoal {race['goal']}")
if race['message'] is not None:
await asyncio.sleep(15)
await client.message(target, race['message'])
await srl_races.delete_srl_race(srl_id)
if go.match(message) or message == 'test go':
# spoilers
race = await spoiler_races.get_spoiler_race_by_id(srl_id)
if race:
await client.message(target, 'Sending spoiler log...')
await client.message(target, '---------------')
await client.message(target, f"This race\'s spoiler log: {race['spoiler_url']}")
await client.message(target, '---------------')
await client.message(target, 'GLHF! :mudora:')
await countdown_timer(
ircbot=client,
duration_in_seconds=race['studytime'],
srl_channel=target,
beginmessage=True,
)
await spoiler_races.delete_spoiler_race(srl_id)
await discord_integration.discord_race_start(srl_id)
await alt_hunter.check_race(srl_id)
if message == 'test complete':
await topic_change_handler(target, source, message, client)
result = runnerdone.search(message)
if result:
await discord_integration.discord_race_finish(result.group(1), srl_id)
async def countdown_timer(ircbot, duration_in_seconds, srl_channel, beginmessage=False):
loop = asyncio.get_running_loop()
reminders = [1800, 1500, 1200, 900, 600, 300,
120, 60, 30, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
start_time = loop.time()
end_time = loop.time() + duration_in_seconds
while True:
# print(datetime.datetime.now())
timeleft = math.ceil(start_time - loop.time() + duration_in_seconds)
# print(timeleft)
if timeleft in reminders:
minutes = math.floor(timeleft/60)
seconds = math.ceil(timeleft % 60)
if minutes == 0 and seconds > 10:
msg = f'{seconds} second(s) remain!'
elif minutes == 0 and seconds <= 10:
msg = ircmessage.style(
f"{seconds} second(s) remain!", fg='green', bold=True)
else:
msg = f'{minutes} minute(s), {seconds} seconds remain!'
await ircbot.message(srl_channel, msg)
reminders.remove(timeleft)
if loop.time() >= end_time:
if beginmessage:
await ircbot.message(srl_channel, ircmessage.style('Log study has finished. Begin racing!', fg='red', bold=True))
break
await asyncio.sleep(.5)
|
[
"tcprescott@gmail.com"
] |
tcprescott@gmail.com
|
79a7f455388690fa7a0287ab242b104b0be5b488
|
6233dfe18e53b55aef0c5eef9d6b59730f96dccb
|
/adminNotification/views.py
|
4583d1d1789130bfe01feecf28960ae530ccfaf0
|
[] |
no_license
|
omar74/HISIK_API2-master
|
6fe4f8380717f0a767409c62c1ffcfd060fddd4d
|
5d891bc9c7a31de8cdb0591a77d5fb1e0f759984
|
refs/heads/master
| 2020-12-15T15:38:33.103082
| 2020-01-20T18:48:18
| 2020-01-20T18:48:18
| 235,158,685
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
from django.shortcuts import render
from .models import NotificationAdmin
from .serializer import NotificationAdminSerializer
from rest_framework import generics
class AdminNotificationListView(generics.ListCreateAPIView):
permission_classes = []
authentication_classes = []
queryset = NotificationAdmin.objects.all()
serializer_class = NotificationAdminSerializer
def create(self, request, *args, **kwargs):
''' I wanted to do some stuff with serializer.data here '''
return super(AdminNotificationListView, self).create(request, *args, **kwargs)
class AdminNotficationDetailedView(generics.RetrieveUpdateDestroyAPIView):
permission_classes = []
authentication_classes = []
queryset = NotificationAdmin.objects.all()
serializer_class = NotificationAdminSerializer
lookup_field = 'type'
|
[
"omar.ashraf0597@gmail.com"
] |
omar.ashraf0597@gmail.com
|
5db76e654df91d80b843b73dd410c5b47ee56eeb
|
57b6db85bd35ffa9c5ab8f38cf5bca5821b42d73
|
/reader_file_csv.py
|
72ee5a3064914def4e49534021923ec1bcff2107
|
[] |
no_license
|
Suryana009/PythonTutorial
|
2adb880f20dbfed64e9f8e7b2f9aa18f18bac1ad
|
858a5fe146cf9c6b82d89c236ba6c4524f1782fd
|
refs/heads/master
| 2020-03-09T22:06:21.236164
| 2018-04-23T06:17:57
| 2018-04-23T06:17:57
| 129,026,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
import csv
f = open('karyawan.csv', 'r')
reader = csv.reader(f)
for row in reader:
print row
f.close()
|
[
"suryana.ryan009@gmail.com"
] |
suryana.ryan009@gmail.com
|
d7147c0137ee9e0ad4bd9852404b8af452a36406
|
191e0df0aa9f2bb7e5a9da214e2ca73fd9f277e9
|
/src/apps/myapp/views.py
|
45899d4020acf1ee1d7e7f9ee2029db2d08c96db
|
[
"MIT"
] |
permissive
|
NewReStarter/Django_Form
|
720b76fd8bffacbf46ba96d31e0ea5f862658a7c
|
3a9c8b536f5750ed9490533cee64ca358020a265
|
refs/heads/master
| 2020-03-13T04:23:12.460130
| 2018-05-08T15:52:35
| 2018-05-08T15:52:35
| 130,961,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
from django.shortcuts import render
from django.views.generic.base import View
from .models import *
import json
class FormView(View):
def get(self, request):
categories = Category.objects.filter(status=1)
return render(request, "index.html", {'categories': categories})
def post(self, request):
data = []
check_list = {}
q_check_list = {}
for k, v in request.POST.items():
category = Category.objects.get(id=k.split('_')[0])
question = Question.objects.get(id=k.split('_')[1])
if check_list.__contains__(category.id):
if len(k.split('_')) == 3:
c_index = check_list[category.id]['count']
q_index = check_list[category.id]['question'][question.id]
data[c_index]['questions'][q_index]['answer'].append(v)
else:
data[check_list[category.id]['count']]['questions'].append({
'answer': [v],
'id': question.id,
'text': question.title,
'addtion_info': question.describe,
})
check_list[category.id]['question'][question.id] = len(check_list[category.id]['question'])
else:
data.append({
'id': category.id,
'text': category.text,
'questions': [{
'answer': [v],
'id': question.id,
'text': question.title,
'addtion_info': question.describe,
}],
})
check_list[category.id] = {
'count': len(data) - 1,
'question': {
question.id: 0
}
}
form_data = Form_data()
form_data.data = json.dumps(data)
form_data.create_time = datetime.now()
form_data.modify_time = datetime.now()
form_data.save()
categories = Category.objects.filter(status=1)
return render(request, "index.html", {'categories': categories})
|
[
"ziliugao@gmail.com"
] |
ziliugao@gmail.com
|
f075ed8bbec5b8e9a2aa280e5a35872d3244c077
|
f170a491f323a63665ccf39291ae2ad3fe8d626b
|
/privat_bank_test/wsgi.py
|
a1f6538b7d18f522fa3ec9e8b05ae0ac9fb13d3c
|
[] |
no_license
|
DmitryFleur/PrivatBankTest
|
098f1829a3c031f619ae82b8e498b827640dde5b
|
0ac2d605966735575b3fe498b92d20c352fdf458
|
refs/heads/master
| 2020-03-27T00:49:30.499822
| 2018-08-22T05:33:43
| 2018-08-22T05:33:43
| 145,660,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
WSGI config for privat_bank_test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'privat_bank_test.settings')
application = get_wsgi_application()
|
[
"bDEFpm74!!"
] |
bDEFpm74!!
|
cddab9580d9af9da3a18d635c9717ed2acc1f201
|
4bc2d855558ccb962991f997e9779919031687dd
|
/capstone/causalmodel/migrations/0001_initial.py
|
d9fe267a7a9b8e4c5697913127b312847c7b2554
|
[] |
no_license
|
jmblontoc/Likha-Capstone
|
80081e44b7ad6457eb776432e623c6db8b7a17e2
|
e1c32911b58cd1419c8e1a554ac32210456d201d
|
refs/heads/master
| 2022-12-10T03:26:32.946638
| 2018-12-09T04:33:10
| 2018-12-09T04:33:10
| 134,726,142
| 0
| 1
| null | 2022-11-25T23:52:42
| 2018-05-24T14:21:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
# Generated by Django 2.0.5 on 2018-06-27 15:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metric', models.CharField(max_length=255)),
('value', models.DecimalField(decimal_places=2, max_digits=10)),
('threshold', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='RootCause',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
migrations.AddField(
model_name='datamap',
name='root_cause',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='causalmodel.RootCause'),
),
]
|
[
"37819032+jmblontoc@users.noreply.github.com"
] |
37819032+jmblontoc@users.noreply.github.com
|
4ddc52309634f93275931f026fe9acd394cf88e0
|
04d1c898b4fdd1b55785c48260f0b7efcd8d0060
|
/int.py
|
76537a32fd9ae97927370dbb376a91ce8b0d25a7
|
[] |
no_license
|
protosscom/python-ch2.2
|
27799f8971839456333aa61ba249c2c67b04efa9
|
61e70008f4261068bb7c570b2f9eaa6a6940f87b
|
refs/heads/master
| 2020-04-10T16:03:52.606662
| 2018-12-10T07:04:40
| 2018-12-10T07:04:40
| 161,131,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
# 2진, 8진, 10진, 16진 Literal
a = 23
print(type(a))
b = 0b1101
o = 0o23
h = 0x23
print(b, o, h)
# 3.x에서는 int와 long이 합쳐졌다. 표현범위가 무한대
e = 2**1024
print(type(e))
print(e)
print(e.bit_length())
# 변환 함수
print(oct(38))
print(hex(38))
print(bin(38))
|
[
"protosscom@gmail.com"
] |
protosscom@gmail.com
|
3c9a07fa27647dc38716eb782a3a4838a70b2d17
|
1e182038f280285fa6a833b5aaf49591c707ad53
|
/ycombinator/encodings.py
|
deb67e51fe3528a3081ec9107a5ac4be87b9b944
|
[] |
no_license
|
turing-complet/samples
|
87e13e75ea1cb52503d0937cc32d02ad380909b9
|
87e1042cdf2d427def822a56a9701817b2f3fae8
|
refs/heads/master
| 2021-12-15T08:46:48.493475
| 2021-12-13T02:25:18
| 2021-12-13T02:25:18
| 235,974,764
| 0
| 0
| null | 2021-12-13T02:36:02
| 2020-01-24T09:36:55
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 109
|
py
|
class Numeral:
def __init__(self, n):
pass
class Bool:
def __init__(self, b):
pass
|
[
"jhagg314@gmail.com"
] |
jhagg314@gmail.com
|
404cc31ac2c1214b9ebd5d4d1ef590b04436a905
|
0e130ed05664c02888ed2f7305ddacc34192519f
|
/changecsv.py
|
590aea1330fe38d20bbd249578a1c18e515dd5a0
|
[] |
no_license
|
victormm88/Click_Through_Rate_Prediction
|
86acd70784fc11d56bb113a9738ce1b549b6abd1
|
cc2df8724dc95776f3ec6974f13e61a34408ba8c
|
refs/heads/master
| 2021-01-21T07:38:57.697293
| 2015-02-13T05:16:16
| 2015-02-13T05:16:16
| 30,741,451
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' '''
__author__ = 'Wang Junq'
import csv;
pre=33563901./6865066;
pre=1-pre/(pre+1);
f_init=open('l1005.csv','rb');
f_result=open('l1005-change.csv','wb');
csv_init=csv.reader(f_init);
csv_result=csv.writer(f_result);
tittle=csv_init.next();
csv_result.writerow(tittle);
for row in csv_init:
# pre=float(row[1]);
# if pre<0.25 and pre>0.11:
# pre=0.1698;
# elif pre>0.6:
# pre=0.99;
# elif pre>0.4:
# pre=0.6;
# elif pre>0.35:
# pre=0.5;
temp_list=[row[0],pre];
csv_result.writerow(temp_list);
f_init.close();
f_result.close();
|
[
"351868656@qq.com"
] |
351868656@qq.com
|
7557f544a64fd0f4ff99c1cbdade203205fdfb81
|
279967844e5b35f5d926f75f34d2a3e926819a52
|
/covid-19-timelapse/dashapps/term_frequency/utils.py
|
9e1c38043f6edbf626ced82cf315979851293bb5
|
[
"Apache-2.0"
] |
permissive
|
thehonduranjazzman/developer-platform
|
e22e62c27714e531fb87c77087aafb286221a797
|
ba3d8be69c78dc3ec189d0e1df045f5e7272341c
|
refs/heads/master
| 2022-05-23T18:27:27.935734
| 2020-04-22T08:54:13
| 2020-04-22T08:54:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,727
|
py
|
import collections
import json
import random
import re
from datetime import datetime
import fastavro
import nltk
import pandas as pd
import requests
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.util import ngrams
from .config import TERMS_TO_REMOVE
# nltk.download('stopwords')
# nltk.download('punkt')
def ngram_frequencies(n, articles, verbose=True, start_date=None, end_date=None):
"""
Generate NGram frequencies from an article dataframe
Args:
n (int): The size of the ngram
articles (pandas.DataFrame): Articles to process
verbose (bool): Whether or not to print some useful information while the process is running.
Returns:
Frequencies (dict): Dict containing ngram counts by day.
"""
if start_date:
articles = articles[articles['publication_datetime'] >= start_date]
if end_date:
articles = articles[articles['publication_datetime'] < end_date]
articles['publication_datetime'] = articles['publication_datetime'].dt.floor(
'D')
grouped_by_pub_date = articles.sort_values(
by='publication_datetime').groupby(['publication_datetime'])
if verbose:
print('Number of groups (days): {}'.format(
len(grouped_by_pub_date.groups)))
sw = set(stopwords.words('english'))
frequencies = {}
for i, group in enumerate(grouped_by_pub_date.groups):
articles = grouped_by_pub_date.get_group(group)
article_tokens = [word.lower() for text in articles['full_articles']
for word in word_tokenize(text)
if (not word in sw) and word.isalnum()]
ngrams_ = ngrams(article_tokens, n)
counted = collections.Counter(ngrams_)
most_common = {' '.join(list(k)): v for (
k, v) in counted.most_common(100)}
pub_date_str = datetime.strftime(group, '%Y-%m-%d')
#pub_date_str = datetime.strftime(group, '%#m/%d/%Y')
if group in frequencies.keys():
frequencies[pub_date_str].update(most_common)
else:
frequencies[pub_date_str] = {}
frequencies[pub_date_str].update(most_common)
if verbose:
if i > 0 and i % 5 == 0:
print('Processed {} groups.'.format(i))
return frequencies
def strip_split(value):
return value.strip(',').split(',')
def strip_commas(value):
return value.strip(',')
def clean_up_text(string):
if string:
return re.sub(r'[^A-Za-z0-9!?.,:;\' ]', ' ', string)
return ''
def process_datetimes(value):
return datetime.utcfromtimestamp(value / 1000)
def snapshot_files_to_dataframe(user_key, snapshot_id):
'''
Retrieve the files from a completed extraction
Args:
user_key: Snapshots API user key.
files: The file URI list retrieved from a completed snapshot job.
'''
headers = {
'content-type': 'application/json',
'user-key': user_key
}
article_dataframes = []
job_url = 'https://api.dowjones.com/alpha/extractions/documents/{}'.format(
snapshot_id)
files = requests.get(job_url, headers=headers).json()[
'data']['attributes']['files']
for f in files:
uri = f['uri']
file_name = uri.split('/')[-1]
if len(file_name) > 0:
file_response = requests.get(
uri, headers=headers, allow_redirects=True, stream=True)
file_response.raw.decode_content = True
records = fastavro.reader(file_response.raw)
records_df = pd.DataFrame(records)
article_dataframes.append(records_df)
data = pd.concat(article_dataframes, ignore_index=True)
return data
def reformat_dataframe(source_df):
"""
Reformat dataframe to use in the graph.
Args:
source_df: DataFrame to reformat
Returns:
New dataframe: reformatted dataframe
"""
new_df = pd.DataFrame(columns=['day', 'term', 'count'])
for i in range(len(source_df)):
for j in source_df.iloc[i].index:
new_df = new_df.append({
'day': source_df.iloc[i].name,
'term': str(j),
'count': source_df.iloc[i][j]
}, ignore_index=True)
return new_df
def generate_figure(source_df):
"""
Generate figure with a slider
Args:
source_df: Dataframe with data to use for the figure
Returns:
Figure dict containing necessary parameters to pass to go.Figure()
"""
# Define the figure
fig_dict = {
'data': [],
'layout': {},
'frames': []
}
days = []
for day in source_df['day']:
if day not in days:
days.append(day)
terms = []
for term in source_df['term']:
if term not in terms:
terms.append(term)
fig_dict['layout']['xaxis'] = {
'range': [source_df['day'].min(), source_df['day'].max()],
'title': 'Publication Date'
}
fig_dict['layout']['yaxis'] = {
'range': [0, 4000],
'title': 'Term Frequency'
}
fig_dict['layout']['title'] = 'COVID-19 - Term Evolution'
fig_dict['layout']['hovermode'] = 'x'
fig_dict['layout']['sliders'] = {
'args': [
'transition', {
'duration': 0,
'easing': 'linear'
}
],
'initialValue': days[0],
'plotlycommand': 'animate',
'values': days,
'visible': True
}
sliders_dict = {
'active': 0,
'yanchor': 'top',
'xanchor': 'left',
'currentvalue': {
'font': {
'size': 12
},
'visible': True,
'xanchor': 'right'
},
'transition': {
'duration': 0,
'easing': 'linear'
},
'pad': {
'b': 10,
't': 50
},
'len': 1.0,
'steps': []
}
# Generate the first point in the display
day_1 = days[0]
for term in terms:
dataset_by_day = source_df[source_df['day'] == day_1]
dataset_by_day_and_term = dataset_by_day[dataset_by_day['term'] == term]
data_dict = {
'x': list(dataset_by_day_and_term['day']),
'y': list(dataset_by_day_and_term['count']),
'mode': 'lines',
'text': list(dataset_by_day_and_term['term']),
'name': term,
'line': {
'width': 3
},
'showlegend': True
}
fig_dict['data'].append(data_dict)
all_x = []
# Create frames
for i, day in enumerate(days):
all_x.append(day)
frame = {'data': [], 'name': str(day)}
for term in terms:
dataset_by_day = source_df[source_df['day'] == day]
dataset_by_day_and_term = dataset_by_day[dataset_by_day['term'] == term]
all_counts = list(source_df[source_df['term'] == term]['count'])
if i == 0:
all_y = [all_counts[i]]
else:
all_y = all_counts[:i+1]
data_dict = {
'x': all_x,
'y': all_y,
'mode': 'lines',
'text': list(dataset_by_day_and_term['term']),
'name': term,
'line': {
# 'color': term_color_dict[term]
'width': 3
},
'showlegend': True
}
frame['data'].append(data_dict)
fig_dict['frames'].append(frame)
slider_step = {
'args': [
[day],
{
'frame': {
'duration': 0,
'redraw': False
},
'mode': 'immediate',
'transition': {
'duration': 0
}
}
],
'label': day,
'method': 'animate'
}
sliders_dict['steps'].append(slider_step)
fig_dict['layout']['sliders'] = [sliders_dict]
return fig_dict
def update_terms_figure(date, terms_df):
"""
Generate a figure frame using the date.
Args:
date: The date until to generate the frame.
terms_df: Dataframe to use.
"""
filtered_df = terms_df[terms_df['day'] <= date]
days = [day for day in filtered_df['day'].unique()]
terms = [term for term in filtered_df['term'].unique()]
traces = []
for term in terms:
counts = list(filtered_df[filtered_df['term'] == term]['count'])
data_dict = {
'x': days,
'y': counts,
'mode': 'lines',
'text': [term],
'name': term,
'line': {
'width': 3
}
}
traces.append(data_dict)
return {
'data': traces,
'layout': dict(
xaxis = {
'range': [terms_df['day'].min(), terms_df['day'].max()],
'title': 'Publication Date',
'showgrid': False
},
yaxis = {
'range': [0, 3500],
'title': 'Term Frequency',
'showgrid': False
},
hovermode = 'x',
title = 'Bi-grams in the news',
paper_bgcolor = '#39485A',
plot_bgcolor = '#39485A',
font = dict(color = 'white', family='SimplonRegular')
)
}
def ngram_dataframe_from_file(bigrams_or_path, read_from_file=False, start_date=None):
"""
Generate the ngram dataframe to use in charts from a file.
Args:
bigrams_or_path (str): Either the bigrams to use for dataframe, or file path to read bigrams from.
read_from_file (bool): Whether or not to read bigrams from file.
Returns:
Dataframe containing dates, bigrams, counts to use in the charts.
"""
if read_from_file:
bigrams = json.load(open(bigrams_or_path, 'rt', encoding='utf-8'))
else:
bigrams = bigrams_or_path
bigram_df = pd.DataFrame.from_dict(bigrams).fillna(0)
date_ind = bigram_df.swapaxes('index', 'columns', copy=True)
date_ind = date_ind[date_ind.index >= '2020-03-06']
date_ind = date_ind[date_ind.index <= '2020-04-01']
to_remove = TERMS_TO_REMOVE
top_ngrams = date_ind.sum().sort_values(ascending=False).head(100)
top_ngrams = top_ngrams.keys().tolist()
relevant_terms = set(top_ngrams) - set(to_remove)
df_for_chart = date_ind[relevant_terms]
return reformat_dataframe(df_for_chart)
|
[
"miballegh@outlook.com"
] |
miballegh@outlook.com
|
cec8243c693159b82311f03b0f97f689b0252e68
|
8ed2700f29e669a05e324c23fc3cced361c25dd1
|
/cli/ceph/osd/crush.py
|
e2876d2eb9566dbd481491c580912e453ca9c57d
|
[
"MIT"
] |
permissive
|
red-hat-storage/cephci
|
179cdc8cc01f20bb80cb171800f04123ae8d6651
|
0691fbaf8fca2a9cd051c5049c83758c65301654
|
refs/heads/master
| 2023-08-31T15:19:00.375389
| 2023-08-31T14:43:30
| 2023-08-31T14:43:30
| 171,728,354
| 28
| 87
|
MIT
| 2023-09-14T18:59:33
| 2019-02-20T18:36:22
|
Python
|
UTF-8
|
Python
| false
| false
| 4,811
|
py
|
from cli import Cli
class Crush(Cli):
"""This module provides CLI interface to manage the Crush service."""
def __init__(self, nodes, base_cmd):
super(Crush, self).__init__(nodes)
self.base_cmd = f"{base_cmd} crush"
def rule(self, *Kargs):
"""
To create rules
Kargs:
Supported args
rule_type (str): create-simple | create-replicated |create-erasure
rule_name (str): name of the rule
root (str): root of the CRUSH hierarchy
failure_domain_type (str): failure domain (host/rack)
device_class (str): storage device class (hdd/sdd)
replicated (bool): if the rule is replicated or not
"""
cmd = f"{self.base_cmd} rule"
for arg in Kargs:
cmd += f" {arg}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def set_device_class(self, device_class, osd_id):
"""
To set device class to osd
Args:
device_class (str): device class (hdd/ssd)
osd_id (list): list of osd's
"""
cmd = f"{self.base_cmd} set-device-class {device_class}"
for _osd in osd_id:
cmd += f" {_osd}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def rm_device_class(self, device_class, osd_id):
"""
To remove device class to osd
Args:
device_class (str): device class (hdd/ssd)
osd_id (list): list of osd's
"""
cmd = f"{self.base_cmd} rm-device-class {device_class}"
for _osd in osd_id:
cmd += f" {_osd}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def rename_device_class(self, old_name, new_name):
"""
To rename device class
Args:
old_name (str): old class name
new_name (str): new class name
"""
cmd = f"{self.base_cmd} class rename {old_name} {new_name}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def ls_osd(self, device_class):
"""
To list all OSDs that belong to a particular class
Args:
device_class (str): device class (hdd/ssd)
"""
cmd = f"{self.base_cmd} class ls-osd {device_class}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def add_bucket(self, name, type):
"""
To add a bucket instance to CRUSH hierarchy
Args:
name (str): bucket name
type (str): type of bucket
"""
cmd = f"{self.base_cmd} add-bucket {name} {type}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def move(self, name, type):
"""
To move a bucket instance to a particular location in CRUSH hierarchy
Args:
name (str): bucket name
type (str): type of bucket
"""
cmd = f"{self.base_cmd} move {name} {type}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def add(self, osd, weight, bucket_details):
"""
To add an OSD to a CRUSH hierarchy
Args:
osd (str): osd id or name
weight (str): weight to be assigned
bucket_details (list): details of format {bucket-type}={bucket-name}
"""
cmd = f"{self.base_cmd} add {osd} {weight} "
cmd += " ".join(bucket_details)
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def remove(self, item):
"""
To remove an OSD from the CRUSH map of a running cluster
Args:
item (str): osd id or bucket name to be removed
"""
cmd = f"{self.base_cmd} remove {item}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
def set(self, key, value):
"""
Set value to give key
Args:
key (str): Key to be updated
value (str): Value to be set to the key
"""
cmd = f"{self.base_cmd} set {key} {value}"
out = self.execute(sudo=True, cmd=cmd)
if isinstance(out, tuple):
return out[0].strip()
return out
|
[
"pranavprakash20@gmail.com"
] |
pranavprakash20@gmail.com
|
bee21a100ddcbd04daa619398ab9c09790be2d86
|
106536a7448d4414fac079cb657044f1dc92a588
|
/framework/machine.py
|
6cb012ab17185fe4a33168086a06f249a3002025
|
[] |
no_license
|
ChrisQiqiang/drlScheduler
|
0b9a10c8de4883cea2ada7565cdfb65185608dc4
|
2cd8b984bfed16687a7852baccb79742d1a35773
|
refs/heads/main
| 2023-08-03T17:55:17.654560
| 2021-09-14T15:17:56
| 2021-09-14T15:17:56
| 405,674,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,161
|
py
|
from framework.instance import Instance
class MachineConfig(object):
# def __init__(self, machine_id, cpu_capacity, memory_capacity, disk_capacity):#, cpu=None, memory=None, disk=None):
# self.id = machine_id
# self.cpu_capacity = cpu_capacity
# self.memory_capacity = memory_capacity
# self.disk_capacity = disk_capacity
# """self.cpu = cpu_capacity if cpu is None else cpu
# self.memory = memory_capacity if memory is None else memory
# self.disk = disk_capacity if disk is None else disk"""
# self.to_schedule = False
def __init__(self, machine_id, cpu_capacity):
self.id = machine_id
self.cpu_capacity = cpu_capacity
self.to_schedule = False
class Machine(object):
# def __init__(self, machine_config):
# self.id = machine_config.id
# self.cpu_capacity = machine_config.cpu_capacity
# self.memory_capacity = machine_config.memory_capacity
# self.disk_capacity = machine_config.disk_capacity
# """self.cpu = machine_config.cpu
# self.memory = machine_config.memory
# self.disk = machine_config.disk"""
# self.cluster = None
# self.instances = {}
def __init__(self, machine_config):
self.id = machine_config.id
self.cpu_capacity = machine_config.cpu_capacity
self.cluster = None
self.instances = {}
def attach(self, cluster):
self.cluster = cluster
def add_instance(self, instance_config):
# assert instance_config.cpu <= self.cpu and instance_config.memory <= self.memory and instance_config.disk <= self.disk
# print('instance_config.cpu = ', instance_config.cpu, ', self.cpu = ', self.cpu)
# assert instance_config.cpu <= self.cpu
instance = Instance(instance_config)
self.instances[instance.id] = instance
"""self.cpu -= instance.cpu
self.memory -= instance.memory
self.disk -= instance.disk"""
instance.attach(self)
# def accommodate_w(self, instance, cpu_threshold=0.75, memory_threshold=0.75, disk_threshold=0.75):
# return self.cpu - instance.cpu >= self.cpu_capacity * (1 - cpu_threshold) \
# and self.memory - instance.memory >= self.memory_capacity * (1 - memory_threshold) \
# and self.disk - instance.disk >= self.disk_capacity * (1 - disk_threshold)
def accommodate_w(self, instance, cpu_threshold=0.75):
return self.cpu - instance.cpu >= self.cpu_capacity * (1 - cpu_threshold)
# def accommodate_wo(self, instance, cpu_threshold=0.75, memory_threshold=0.75, disk_threshold=0.75):
# return self.cpu + instance.cpu >= self.cpu_capacity * (1 - cpu_threshold) \
# and self.memory + instance.memory >= self.memory_capacity * (1 - memory_threshold) \
# and self.disk + instance.disk >= self.disk_capacity * (1 - disk_threshold)
def accommodate_wo(self, instance, cpu_threshold=0.75):
return self.cpu + instance.cpu >= self.cpu_capacity * (1 - cpu_threshold)
def pop(self, instance_id):
instance = self.instances.pop(instance_id)
"""self.cpu += instance.cpu
self.memory += instance.memory
self.disk += instance.disk"""
instance.machine = None
return instance
def push(self, instance):
self.instances[instance.id] = instance
"""self.cpu -= instance.cpu
self.memory -= instance.memory
self.disk -= instance.disk"""
instance.attach(self)
@property
def cpu(self):
occupied = 0
for instance in self.instances.values():
occupied += instance.cpu
return self.cpu_capacity - occupied
# @property
# def memory(self):
# occupied = 0
# for instance in self.instances.values():
# occupied += instance.memory
# return self.memory_capacity - occupied
# @property
# def disk(self):
# occupied = 0
# for instance in self.instances.values():
# occupied += instance.disk
# return self.disk_capacity - occupied
|
[
"2290142073@qq.com"
] |
2290142073@qq.com
|
3ad629c37259ce486878f28cf6844c6bc01b524f
|
bdb781b295f2c4fe570ff2db39b9bfe38cab6476
|
/example/auth0login/urls.py
|
68805a4c05ba7da12313edc66b0c5a93f436d96a
|
[] |
no_license
|
jangita/learn-django-auth0
|
c8386dc138e9706c9507c5472402b60cb119bc17
|
3cdf25a066409dd7acecf0308ed901fbc136fddb
|
refs/heads/master
| 2023-01-02T01:34:53.665904
| 2020-10-28T03:17:53
| 2020-10-28T03:17:53
| 308,088,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
urlpatterns = [
path('', views.index),
path('dashboard', views.dashboard),
path('logout', views.logout),
path('', include('django.contrib.auth.urls')),
path('', include('social_django.urls')),
]
|
[
"jangita.nyagudi@gmail.com"
] |
jangita.nyagudi@gmail.com
|
0dc52145873acef997045ced74eebb0ce1aa6d7f
|
19b0fd18df23da2999d298ee9aa426451b4e5c12
|
/src/sonic_ax_impl/mibs/vendor/__init__.py
|
5514a7346795691dbb1528f20f694081290f58e4
|
[
"Apache-2.0"
] |
permissive
|
qiluo-msft/sonic-snmpagent
|
ced0e2fd053bbed60ee5f22c1794040105ab5a4f
|
a5b2983be06fa51a711cded92cbc4f089a147233
|
refs/heads/master
| 2023-02-19T15:17:49.463707
| 2022-03-28T18:15:00
| 2022-03-28T18:15:00
| 79,850,509
| 0
| 0
|
NOASSERTION
| 2023-02-14T21:49:13
| 2017-01-23T21:33:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
import collections
import time
import psutil
from ax_interface import MIBUpdater
from sonic_ax_impl import logger
class SystemUtilizationHandler(MIBUpdater):
def __init__(self):
super().__init__()
# From the psutil documentation https://pythonhosted.org/psutil/#psutil.cpu_percent:
#
# Warning the first time this function is called
# with interval = 0.0 or None it will return a
# meaningless 0.0 value which you are supposed
# to ignore.
psutil.cpu_percent()
# '...is recommended for accuracy that this function be called with at least 0.1 seconds between calls.'
time.sleep(0.1)
# a sliding window of 60 contiguous 5 sec utilization (up to five minutes)
self.cpuutils = collections.deque([psutil.cpu_percent()], maxlen=60)
self.system_virtual_memory = psutil.virtual_memory()
logger.debug('System Utilization handler initialized.')
def get_cpuutil_5sec(self):
"""
:return: Last polled CPU utilization.
"""
return int(self.cpuutils[-1])
def get_cpuutil_1min(self):
"""
:return: Up to one minute's worth of average CPU utilization.
"""
past_utilization = list(self.cpuutils)[-12:]
return int(sum(past_utilization) / len(past_utilization))
def get_cpuutil_5min(self):
"""
:return: Up to five minute's worth of average CPU utilization.
"""
return int(sum(self.cpuutils) / len(self.cpuutils))
def get_memutil(self):
"""
:return: The current memory utilization (as a percent integer)
"""
return int(self.system_virtual_memory.percent)
def update_data(self):
"""
Background task to add CPU Utilization sample / refresh memory utilization.
"""
cpu_util = psutil.cpu_percent()
self.cpuutils.append(cpu_util)
self.system_virtual_memory = psutil.virtual_memory()
logger.debug('Updating CPU/Mem Utilization with: {}% / {}%'.format(cpu_util, self.get_memutil()))
sys_util_h = SystemUtilizationHandler()
|
[
"noreply@github.com"
] |
noreply@github.com
|
49af44e9d1dc28c1ec60101728e6a68fa331e058
|
9788bf7929da8a87d7dfab8b633601122df88bf2
|
/accounts/urls.py
|
920c688f52fbd6db80c3959580af4dc27ff733f8
|
[] |
no_license
|
praneshsaminathan/dukaan
|
d0eab83d28625857a84c6f6ab1f44619326985b3
|
f4986966892fb7b3cede083b142bccf35174e068
|
refs/heads/main
| 2023-03-02T02:38:15.003309
| 2021-02-10T17:20:43
| 2021-02-10T17:20:43
| 337,749,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from accounts.views import LoginAPIView, GenerateOTPAPIView, StoreViewSet
from dukaan.utils.apps import get_api_url
router = DefaultRouter(trailing_slash=True)
router.register(r'stores', StoreViewSet, 'api-stores')
urlpatterns = [
path(get_api_url(), include(router.urls)),
path(get_api_url(url_name='generate-otp'), GenerateOTPAPIView.as_view(), name='ap-generate-otp'),
path(get_api_url(url_name='login'), LoginAPIView.as_view(), name='api-login')
]
|
[
"pranesh"
] |
pranesh
|
0a034e44b177bb293899d150df0f040bea24495c
|
8e35bffd191e2eec8b50370828ca954b5e249ae8
|
/flaskps/resources/api/ciclos_lectivos.py
|
ab6b587733294ca3a1e1d6c424845cb928fd9b7a
|
[] |
no_license
|
jmsolar/proySoft2019
|
6a0e42af239f13f3a7e314f5cf740c2a6b6d7a51
|
bc607c3e0c9830d5a0b48d88e299df46b5b20c6f
|
refs/heads/master
| 2023-05-30T02:44:02.410680
| 2020-01-21T17:23:06
| 2020-01-21T17:23:06
| 235,398,209
| 0
| 0
| null | 2023-05-22T22:38:36
| 2020-01-21T17:16:12
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
from flask_restful import Resource
from flask_restful import request
from flaskps.models.ciclo_lectivo import CicloLectivoModel
from flaskps.models.configuracion import Configuracion
class CicloLectivo(Resource):
def get(self):
datatables = False
page = None
if request.args.__len__() == 0:
ciclos = CicloLectivoModel.all()
else:
datatables = True
start = int(request.args['start'])
page = 1
if start != 0:
page = start / Configuracion.get_config().registros_por_pagina + 1
order = {'column': request.args['columns[' + request.args['order[0][column]'] + '][data]'],
'dir': request.args['order[0][dir]']}
page = CicloLectivoModel.all_by_page(page, order)
ciclos = page.items
ciclos_lectivos = []
for ciclo in ciclos:
semestre = "Primero" if (ciclo.semestre == 0) else "Segundo"
c = {
"id": ciclo.id,
"fecha_ini": ciclo.fecha_ini.strftime("%d/%m/%Y"),
"fecha_fin": ciclo.fecha_fin.strftime("%d/%m/%Y"),
"semestre": semestre
}
ciclos_lectivos.append(c)
if datatables:
return {
"draw": request.args['draw'],
"recordsTotal": page.total,
"recordsFiltered": page.total,
"data": ciclos_lectivos
}
else:
return ciclos_lectivos
class CicloLectivoTalleres(Resource):
def get(self, id):
ciclo = CicloLectivoModel.find_by_id(id)
talleres = []
for taller in ciclo.talleres:
t = {
"id": taller.id,
"nombre": taller.nombre
}
talleres.append(t)
return {
"talleres": talleres
}
|
[
"matias.solar@outlook.com"
] |
matias.solar@outlook.com
|
1c07148d7ab0dac268d97289f85bcfd5323f3892
|
4c7ccea26d2a6f7197fcdd7b8413652cea199485
|
/IPython/SdA/StackeddAutoencoder.py
|
88ef597fa78dc3337214ffa36a0bb97d7a894564
|
[] |
no_license
|
cgallego/Section3
|
77fc1c8e5f6dfa273775f165cfb54f28c05e0f52
|
1745cb018811541b6ece603f2762ef05cc263b3b
|
refs/heads/master
| 2021-01-19T06:41:31.153702
| 2016-08-08T16:45:43
| 2016-08-08T16:45:43
| 60,637,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,074
|
py
|
"""
Stacked denoising auto-encoders (SdA) using Theano.
Denoising autoencoders are the building blocks for SdA.
They are based on auto-encoders as the ones used in Bengio et al. 2007.
An autoencoder takes an input x and first maps it to a hidden representation
y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting
latent representation y is then mapped back to a "reconstructed" vector
z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight
matrix W' can optionally be constrained such that W' = W^T, in which case
the autoencoder is said to have tied weights. The network is trained such
that to minimize the reconstruction error (the error between x and z).
For the denosing autoencoder, during training, first x is corrupted into
\tilde{x}, where \tilde{x} is a partially destroyed version of x by means
of a stochastic mapping. Afterwards y is computed as before (using
\tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction
error is now measured between z and the uncorrupted input x, which is
computed as the cross-entropy :
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,
2008
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
import os
import sys
import timeit
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from MultilayerPerceptron import HiddenLayer
from dAutoencoder import dA
from LogisticRegression import LogisticRegression
# start-snippet-1
class SdA(object):
"""Stacked denoising auto-encoder class (SdA)
A stacked denoising autoencoder model is obtained by stacking several
dAs. The hidden layer of the dA at layer `i` becomes the input of
the dA at layer `i+1`. The first layer dA gets as input the input of
the SdA, and the hidden layer of the last dA represents the output.
Note that after pretraining, the SdA is dealt with as a normal MLP,
the dAs are only used to initialize the weights.
"""
def __init__(
self,
numpy_rng,
theano_rng=None,
n_ins=None,
hidden_layers_sizes=None,
corruption_levels=None,
n_outs=None
):
""" This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the sdA
:type n_layers_sizes: list of ints
:param n_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
:type corruption_levels: list of float
:param corruption_levels: amount of corruption to use for each
layer
"""
self.sigmoid_layers = []
self.dA_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
# The SdA is an MLP, for which all weights of intermediate layers
# are shared with a different denoising autoencoders
# We will first construct the SdA as a deep multilayer perceptron,
# and when constructing each sigmoidal layer we also construct a
# denoising autoencoder that shares weights with that layer
# During pretraining we will train these autoencoders (which will
# lead to chainging the weights of the MLP as well)
# During finetunining we will finish training the SdA by doing
# stochastich gradient descent on the MLP
for i in range(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question...
# but we are going to only declare that the parameters of the
# sigmoid_layers are parameters of the StackedDAA
# the visible biases in the dA are parameters of those
# dA, but not the SdA
self.params.extend(sigmoid_layer.params)
# Construct a denoising autoencoder that shared weights with this
# layer
dA_layer = dA(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
bhid=sigmoid_layer.b)
self.dA_layers.append(dA_layer)
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs
)
self.params.extend(self.logLayer.params)
# construct a function that implements one step of finetunining
# compute the cost for second phase of training,
# defined as the negative log likelihood
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
def pretraining_functions(self, train_set_x, np_train_y, batch_size):
''' Generates a list of functions, each of them implementing one
step in trainnig the dA corresponding to the layer with same index.
The function will require as input the minibatch index, and to train
a dA you just need to iterate, calling the corresponding function on
all minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type learning_rate: float
:param learning_rate: learning rate used during training for any of
the dA layers
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
corruption_level = T.scalar('corruption') # % of corruption to use
learning_rate = T.scalar('lr') # learning rate to use
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dAuto,kdA in zip(self.dA_layers, range(len(self.dA_layers))):
print(kdA,dAuto)
# get the cost and the updates list
cost, updates = dAuto.get_cost_updates(corruption_level,
learning_rate)
# compile the theano function
fn = theano.function(
inputs=[
index,
theano.In(corruption_level, value=0.2),
theano.In(learning_rate, value=0.1)
],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin: batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on
a batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(valid_set_x, valid_set_y) = datasets[1]
(test_set_x, test_set_y) = datasets[2]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches //= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches //= batch_size
# compute number of minibatches for training, validation and testing
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = [
(param, param - gparam * learning_rate)
for param, gparam in zip(self.params, gparams)
]
train_fn = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: train_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='train'
)
test_score_i = theano.function(
[index],
self.errors,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='test'
)
valid_score_i = theano.function(
[index],
self.errors,
givens={
self.x: valid_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: valid_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='valid'
)
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in range(n_valid_batches)]
# Create a function that scans the entire test set
def test_score():
return [test_score_i(i) for i in range(n_test_batches)]
return train_fn, valid_score, test_score
def sigmoid_activate(self, Xtest, W, b):
# code and compute
sigmoid_input = Xtest
sigmoid_output = np.tanh(np.dot(sigmoid_input, W.get_value(borrow=True)) + b.get_value(borrow=True))
return sigmoid_output
def softmax_activate(self, Xtest, logLayer):
# code and compute
softmax_input = Xtest
v = np.exp( np.dot(softmax_input, logLayer.W.get_value(borrow=True)) + logLayer.b.get_value(borrow=True))
softmax_output = v/np.sum(v)
return softmax_output
def predict_functions(self, Xtest):
''' Given a set_x of examples produce a vector y' of predictions by the sDA.
'''
tmp = Xtest
for L in self.sigmoid_layers:
tmp = self.sigmoid_activate( tmp, L.W, L.b )
# finalize with log layer
tmp = self.softmax_activate( tmp, self.logLayer )
return tmp
|
[
"admin@webdsdesign.com"
] |
admin@webdsdesign.com
|
3eef37096a1b8dfa04f2d43d8a80e433d5771e3c
|
0d4966bb125abc0def9a48309e8353b05c242c4c
|
/Test1/diseases/migrations/0001_initial.py
|
5d68d0d3942a4853924bb3d73981ea9df6115ece
|
[] |
no_license
|
ChanBong/Viral-De-cease
|
8b7b30c698883f45f26d2f9f2be7ab787399a484
|
b44c95cdbf87af76039ae32bbe3ac4502fe9045e
|
refs/heads/master
| 2023-02-12T20:49:11.879306
| 2021-01-11T16:37:20
| 2021-01-11T16:37:20
| 327,962,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
# Generated by Django 3.1.4 on 2021-01-09 09:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Diseas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('about_s', models.TextField()),
('site', models.URLField()),
('symptoms', models.TextField()),
('about_l', models.TextField()),
],
),
]
|
[
"harsh_k@ch.iitr.ac.in"
] |
harsh_k@ch.iitr.ac.in
|
07c821b253d8b2176af47cd42bb65e0f706db38a
|
3109e3a7f2f2dccc5a806695f0adbe0fed879112
|
/ecommerce/Loma/migrations/0022_auto_20190204_1200.py
|
4724c3c1c3f80c03fa75c1a13fc32a1f6bb13401
|
[] |
no_license
|
Maheshwari2604/ecommercee
|
9ebbf18b4fbf933a0d9641009f7f17ce836de587
|
4411e7e10eccda907711200d2c0d873db3d7f803
|
refs/heads/master
| 2020-04-20T18:03:49.575124
| 2019-02-12T16:02:05
| 2019-02-12T16:02:05
| 169,007,411
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-04 06:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Loma', '0021_auto_20190203_1829'),
]
operations = [
migrations.AlterField(
model_name='promocode_model',
name='promocode_name',
field=models.CharField(max_length=11),
),
]
|
[
"maheshwarishivam2604@gmail.com"
] |
maheshwarishivam2604@gmail.com
|
1e92f6030603376b040c99b7ed7806971393cfca
|
a500d0a13e025a7e25376592188663f26c13385e
|
/lpthw/ex24.py
|
088c897cf7916cf73da44aba4071ebebfe6f2a79
|
[] |
no_license
|
sraywall/GitTutorial
|
c6096cfa9dc5c89ebaedee10ee93fed69118f296
|
cd0de5db58e42fb4a5094504147ba804b0424247
|
refs/heads/master
| 2021-04-27T20:36:30.290444
| 2020-05-07T19:27:06
| 2020-05-07T19:27:06
| 122,381,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
print("Let's practice everything.")
print('You\'d need to know \'bout excapes with \\ that do:')
print('\n newlines and \t tabs.')
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print("--------------")
print(poem)
print("--------------")
five = 10 - 2 + 3 - 6
print(f"This should be five: {five}")
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
#remember that this is another way to format a string
print("With a starting point of: {}".format(start_point))
# it's just like with an f"" string
print(f"We'd have {beans} beans, {jars} jars, and {crates} crates.")
start_point = start_point / 10
print("We can also do that this way:")
formula = secret_formula(start_point)
# this is an easy way to apply a list to a format string
print("We'd have {} beans, {} jars, and {} crates.".format(*formula))
|
[
"sraywall@gmail.com"
] |
sraywall@gmail.com
|
b5c1fff82ac0901d1ae985cd1826ca4b47c6f5af
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/Bisong19Building/I_PartVIII/C_Chapter47/index.py
|
cce9e2225cec24eabc5302e3a2817b1a5b9cd72f
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651
| 2019-10-26T04:40:49
| 2019-10-26T04:40:49
| 213,980,247
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,830
|
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
from .A_Overviewof.index import Overviewof as A_Overviewof
from .B_Createa.index import Createa as B_Createa
from .C_BuildContainers.index import BuildContainers as C_BuildContainers
from .D_Compilethe.index import Compilethe as D_Compilethe
from .E_Uploadand.index import Uploadand as E_Uploadand
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# CHAPTER 47
#
#
#
# Deploying
# an End-to-End Machine
# Learning Solution
# on Kubeflow Pipelines
# A Kubeflow pipeline component is an implementation of a pipeline task. A component
# is a step in the workflow. Each task takes one or more artifacts as input and may produce
# one or more artifacts as output.
# Each component usually includes two parts:
#
# • Client code: The code that talks to endpoints to submit jobs, for
# example, code to connect with the Google Cloud Machine Learning
# Engine.
#
# • Runtime code: The code that does the actual job and usually runs in
# the cluster, for example, the code that prepares the model for training
# on Cloud MLE.
# A component consists of an interface (inputs/outputs), the implementation
# (a Docker container image and command-line arguments), and metadata (name,
# description).
#
#
#
#
# 687
# © Ekaba Bisong 2019
# E. Bisong, Building Machine Learning and Deep Learning Models on Google Cloud Platform,
# https://doi.org/10.1007/978-1-4842-4470-8_47
#
# Chapter 47 Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines
#
#
# Overview of a Simple End-to-End Solution Pipeline
# In this simple example, we will implement a deep neural regressor network to predict the
# closing prices of Bitcoin crypto-currency. The machine learning code itself is pretty basic
# as it is not the focus of this article. The goal here is to orchestrate a machine learning
# engineering solution using microservice architectures on Kubernetes with Kubeflow
# Pipelines. The code for this chapter is in the book code repository. Clone the repository
# from the GCP Cloud Shell.
# The pipeline consists of the following components:
#
# 1. Move raw data hosted on GitHub to a storage bucket.
#
# 2. Transform the dataset using Google Dataflow.
#
# 3. Carry out hyper-parameter training on Cloud Machine
# Learning Engine.
#
# 4. Train the model with the optimized hyper-parameters.
#
# 5. Deploy the model for serving on Cloud MLE.
#
#
#
# Create a Container Image for Each Component
# First, we’ll package the client and runtime code into a Docker image. This image
# also contains the secure service account key to authenticate against GCP. For example,
# the component to transform the dataset using Dataflow has the following files built into
# its image:
# • __ Dockerfile: Dockerfile to build the Docker image.
#
# • __ build.sh: Script to initiate the container build and upload to
# Google Container Registry.
#
# • __ dataflow_transform.py: Code to run the beam pipeline on
# Cloud Dataflow.
#
# • __ service_account.json: Secure key to authenticate container
# on GCP.
#
# • __ local_test.sh: Script to run the image pipeline component
# locally.
#
#
# 688
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
self.add(mbk("# Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Chapter47(HierNode):
def __init__(self):
super().__init__("Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines")
self.add(Content())
self.add(A_Overviewof())
self.add(B_Createa())
self.add(C_BuildContainers())
self.add(D_Compilethe())
self.add(E_Uploadand())
# eof
|
[
"lawrence.mcafee@gmail.com"
] |
lawrence.mcafee@gmail.com
|
74a40fde608fbfe9129efe89a8eff85127fc7b21
|
2d8898337f9b16a084bec9c447af9a59d4a8c69c
|
/server_less/fargate/container-app/main.py
|
0b4ea91b8e7c73b62847457e7511aa1accc70e6b
|
[] |
no_license
|
hayaosato/advent-calendar-2019
|
de22e780ea2a5131da5da5943b93a354dd2e21e9
|
eba09cf3abfbde2e05f7b0e9eb5ca47fab54cdc1
|
refs/heads/master
| 2022-12-28T23:04:32.755496
| 2019-12-20T03:20:18
| 2019-12-20T03:20:18
| 225,546,645
| 0
| 1
| null | 2022-12-08T04:03:50
| 2019-12-03T06:24:04
|
HCL
|
UTF-8
|
Python
| false
| false
| 132
|
py
|
"""
hoge
"""
import sys
def main(arg):
"""
hoge
"""
print(arg)
if __name__ == "__main__":
main(sys.argv[1])
|
[
"jake.bibikyary.880@gmail.com"
] |
jake.bibikyary.880@gmail.com
|
d380946226e4e0189d165d2631379f3c9a24cb80
|
06e0cf403b744009c817e5cfa7f5f898020109ab
|
/Assignments/Assignment2.py
|
519fdd5c2c88a90eca552e8a532e787a0ce56c64
|
[] |
no_license
|
giurgiumatei/Fundamentals-of-Programming
|
6338ebb616b219ae927af2dd9b42145911efe138
|
92d33def4ed49b86145caf2d28a6340d89709133
|
refs/heads/main
| 2023-03-12T07:44:25.041872
| 2021-03-01T23:38:01
| 2021-03-01T23:38:01
| 343,584,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,778
|
py
|
numere = [ ]#this list contains the complex numbers
def citire(): #this function reads the complex numbers and stores them as a list of lists
n=int(input("Give the number of complex elements: "))
for i in range(0,n):
print("Give the values for element number "+str(i+1)+" : ")
ele=[int(input("Give the real part: ")),int(input("Give the imaginary part: "))]
numere.append(ele)
return
def afisare():#this function types the list in the console
n=len(numere)
for i in range(0,n):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
def cer1():#this function solves req. 1
l=int(0)
maxl=int(0)
n=len(numere)
b=int(-1)
e=int(-1)
for i in range(0,n-1):
l=0
if numere[i][0]>numere[i-1][0]:
l+=1
for j in range(i+1,n):
if numere[j][0]>numere[j-1][0]:
l+=1
else: break
if l > maxl:
maxl=l
b=i-1
e=j
if e==n-1:
for i in range(b,e+1):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
else:
for i in range(b,e):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
def cer5():#this function solves req 5
l=int(0)
maxl=int(0)
n=len(numere)
b=int(-1)
e=int(-1)
for i in range(0,n-1):
l=0
if numere[i][1]==0:
l+=1
for j in range(i+1,n):
if numere[j][1] == 0:
l+=1
else: break
if l > maxl:
maxl=l
b=i
e=j
if e==n-1:
for i in range(b,e+1):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
else:
for i in range(b,e):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
def cer10():#this function solves req.10
l=int(0)
maxl=int(0)
n=len(numere)
b=int(-1)
e=int(-1)
s1=int(0)
s2=int(0)
inceput=int(0)
for i in range(0,n):
s1+=numere[i][0]
s2+=numere[i][1]
if s1>10 or s2>10:
s1=0
s2=0
l=0
inceput+=1
i=inceput-1
elif s1==10 and s2==10:
if l > maxl:
maxl=l
b=inceput
e=i
else: l+=1
if e==n-1:
for i in range(b,e+1):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
else:
for i in range(b,e):
print(str(numere[i][0]) + "+" + str(numere[i][1]) + "i")
def optiunea3():#when this function is called it asks the user which req. should be solved
n=int(input("Which requirement do you want to solve: "))
if n==5:
cer5()
elif n==10:
cer10()
elif n==1:
cer1()
else: print("Invalid number")
def menu():#this function acts like a main function which calls the others
citire()
while(True):
option=int(input("Give a number corresponding to the option:"))
if option==4:
return
elif option==1:
citire()
elif option==2:
afisare()
elif option==3:
optiunea3()
elif option==0:
break
menu()#program starts here
|
[
"noreply@github.com"
] |
noreply@github.com
|
9c68ae44c857794289d718b86b9cf28781944546
|
d49f38323dc30a3cb4a581b451f7db7eec220324
|
/app.py
|
c50f59488d7cad0a63272dce103f97c62cf594dd
|
[] |
no_license
|
bbiyongel/NaverAPI-telegram
|
0e67259ed2faa86860014f0a5ff1ee0528175b67
|
bfcffdb03c6c2cb2387aee461490c520542227bf
|
refs/heads/master
| 2022-01-15T19:50:28.409431
| 2019-07-12T09:00:15
| 2019-07-12T09:00:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,740
|
py
|
from pprint import pprint
from flask import Flask, request
import requests
from decouple import config
import random
app = Flask(__name__)
token = config('TELEGRAM_TOKEN')
base_url = f"https://api.telegram.org/bot{token}"
naver_client_id = config('NAVER_CLIENT_ID')
naver_client_secret = config('NAVER_CLIENT_SECRET')
@app.route(f'/{token}', methods=['POST']) #
def telegram():
response = request.get_json()
chat_id = response.get('message').get('chat').get('id')
# 사진 파일이 온다면,
if response.get('message').get('photo'):
# 사진 파일의 id를 가져온다
file_id = response.get('message').get('photo')[-1].get('file_id')
# 텔레그램 서버에 파일의 경로를 받아온다.
file_response = requests.get(
f'{base_url}/getFile?file_id={file_id}').json()
# 파일 경로를 통해 URL을 만든다.
file_path = file_response.get('result').get('file_path')
file_url = f'https://api.telegram.org/file/bot{token}/{file_path}'
# print(file_url)
response = requests.get(file_url, stream=True)
image = response.raw.read()
# 2. URL 설정
naver_url = 'https://openapi.naver.com/v1/vision/celebrity'
# 3. 요청보내기! POST
headers = {'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
response = requests.post(naver_url, headers=headers, files={'image': image}).json()
if response.get('faces'):
best = response.get('faces')[0].get('celebrity')
if best.get('confidence') > 0.2:
text = f"{best.get('confidence')*100}%만큼 {best.get('value')}를 닮으셨네요"
else:
text = "연예인을 닮지 않음..."
else:
text = "사람 아닌듯"
# print(text)
api_url = f'{base_url}/sendMessage?chat_id={chat_id}&text={text}'
requests.get(api_url)
# text가 온다면
elif response.get('message').get('text'):
# 사용자가 보낸 메시지를 text 변수에 저장, 사용자 정보는 chat_id에 저장
text = response.get('message').get('text')
chat_id = response.get('message').get('chat').get('id')
if '/번역 ' == text[0:4]:
headers = {'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
data = {
'source': 'ko',
'target': 'en',
'text': text[4:]
}
# data = {
# 'source': 'en',
# 'target': 'ko',
# 'text': 'War never again! Never again war!'
# }
response = requests.post(naver_url, headers=headers, data=data).json()
text = response.get('message').get('result').get('translatedText')
# if 인사말이 오면, 나만의 인사해주기
elif '안녕' in text or 'hi' in text:
text = '간디'
elif '로또' in text:
text = sorted(random.sample(range(1,46), 6))
# 마지막 url 만들어서 메시지 보내기
if text=='호우':
text = '장마임'
if text=='패드립':
text = '패드립 머신 가동'
api_url = f'{base_url}/sendMessage?chat_id={chat_id}&text={text}'
requests.get(api_url)
return 'OK', 200 # 200 : 응답 상태 코드
if __name__ == '__main__':
import os
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
[
"jjgk91@naver.com"
] |
jjgk91@naver.com
|
1b1488f2e5ebd9c410f0123465c4c7e05c7126e8
|
c097eb64ab0305fb653bba74c616161a64b42a29
|
/carspy/convol_fcn.py
|
884fb80ada4dd43c536d8a786c54b3e392fe101e
|
[
"BSD-3-Clause"
] |
permissive
|
zhangxin19981016/carspy
|
f832a7a58dc1683506eefb6c4341c09cb5b95300
|
4c91138018b288635e1e608e7f8b0edd8950085b
|
refs/heads/main
| 2023-04-12T05:44:38.123167
| 2021-05-09T19:12:31
| 2021-05-09T19:12:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,439
|
py
|
"""Functions used in the convolution of CARS spectrum.
- Laser lineshape
- Impulse spectral response function (ISRF) for the spectrometer slit
"""
import numpy as np
def gaussian_line(w, w0, sigma):
"""Generate a normalized Gaussian lineshape (integral equals to 1).
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the Gaussian lineshape in wavenumber cm^(-1).
sigma : float
FWHM of the Gaussian lineshape wavenumber cm^(-1).
Returns
-------
1d array of floats
Intensities of the normalized Gaussian lineshape over w.
"""
_lineshape = 2/sigma*(np.log(2)/np.pi)**0.5*np.exp(
-4*np.log(2)*((w-w0)/sigma)**2)
return _lineshape
def lorentz_line(w, w0, sigma):
"""Generate a normalized Lorentzian lineshape (integral equals to 1).
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the Lorentzian lineshape in wavenumber cm^(-1).
sigma : float
FWHM of the Lorentzian lineshape wavenumber cm^(-1).
Returns
-------
1d array of floats
Intensities of the normalized Lorentzian lineshape over w.
"""
_lineshape = 1/np.pi*(sigma/2)/((w-w0)**2+sigma**2/4)
return _lineshape
def voigt_line(w, w0, sigma_V, sigma_L):
"""Generate an approximated Voigt lineshape following :cite:`Whiting:68`.
Parameters
----------
w : 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the Lorentzian lineshape in wavenumber cm^(-1).
sigma_V : float
FWHM of the Voigt lineshape wavenumber cm^(-1).
sigma_L : float
FWHM of the Lorentzian lineshape wavenumber cm^(-1).
Returns
-------
1d array
Intensities of the Voigt lineshape over w.
"""
# Preparations
_ratio = sigma_L/sigma_V
I_g = 1/(sigma_V*(1.065 + 0.447*_ratio + 0.058*_ratio**2))
_w = abs(w-w0)/sigma_V
# Building up the function
_term_1 = I_g*(1-_ratio)*np.exp(-2.772*_w**2) + _ratio/(1 + 4*_w**2)
_term_2 = 0.016*(1-_ratio)*_ratio*(np.exp(-0.4*_w**2.25)
- 10/(10 + _w**2.25))
return _term_1 + _term_2
def asym_Gaussian(w, w0, sigma, k, a_sigma, a_k, offset):
"""Asymmetric super-Gaussian following :cite:`Beirle:17`.
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the asymmetric Gaussian function in wavenumber cm^(-1).
sigma : float
FWHM of the Gaussian function in wavenumber cm^(-1).
k : float
Controls the skewing of the asymmetry.
a_sigma, a_k : float
Tuning factors for sigma and k.
offset : float
Background offset (from experimental spectrum).
Returns
-------
1d array of floats
Intensities of the peak-normalized asymmetric super-Gaussian over w.
"""
response_low = np.exp(-abs((w[w <= w0]-w0)/(sigma-a_sigma))**(k-a_k))
response_high = np.exp(-abs((w[w > w0]-w0)/(sigma+a_sigma))**(k+a_k))
response = np.append(response_low, response_high) + offset
return response/response.max()
def asym_Voigt(w, w0, sigma, k, a_sigma, a_k, sigma_L_l, sigma_L_h, offset):
"""Asymmetric super-Voigt.
.. note::
This is based on the super-Gaussian from :cite:`Beirle:17`, with
additional convolution with two Lorentzian profiles to better capture
slow-decaying wings in some experimental slit function
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the asymmetric Gaussian function in wavenumber cm^(-1).
sigma : float
FWHM of the Gaussian function in wavenumber cm^(-1).
k : float
Controls the skewing of the asymmetry.
a_sigma, a_k : float
Tuning factors for sigma and k.
sigma_L_l : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for the
lower half.
sigma_L_h : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for the
higher half.
offset : float
Background offset.
Returns
-------
1d array of floats
Intensities of the peak-normalized asymmetric super-Gaussian over w.
"""
response_low = np.exp(-abs((w-w0)/(sigma-a_sigma))**(k-a_k))
response_high = np.exp(-abs((w-w0)/(sigma+a_sigma))**(k+a_k))
response_low = np.convolve(response_low, lorentz_line(w, w0, sigma_L_l),
'same')
response_high = np.convolve(response_high, lorentz_line(w, w0, sigma_L_h),
'same')
response = np.append(response_low[np.where(w <= w0)],
response_high[np.where(w > w0)]) + offset
return response/response.max()
def asym_Voigt_deprecated(w, w0, sigma_V_l, sigma_V_h, sigma_L_l, sigma_L_h,
offset):
"""Asymmetric Voigt profile following NRC.
.. admonition:: Deprecated
:class: attention
This profile cannot capture certain slit functions with broadened
Gaussian profile.
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the asymmetric Gaussian function in wavenumber cm^(-1).
sigma_V_l : float
FWHM of the Voigt function in wavenumber cm^(-1) for the lower half.
sigma_V_h : float
FWHM of the Voigt function in wavenumber cm^(-1) for the higher half.
sigma_L_l : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for the
lower half.
sigma_L_h : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for the
higher half.
offset : float
Background offset.
Returns
-------
1d array of floats
Intensities of the peak-normalized asymmetric super-Gaussian over w.
"""
response_low = voigt_line(w[w <= w0], w0, sigma_V_l, sigma_L_l)
response_high = voigt_line(w[w > w0], w0, sigma_V_h, sigma_L_h)
response = (np.append(response_low/response_low.max(),
response_high/response_high.max()) +
offset)
return response/response.max()
def slit_ISRF(w, w0, param_1, param_2, param_3, param_4, param_5, param_6,
offset, mode='sGaussian'):
"""Impulse spectral response function (ISRF) as the slit function.
Parameters
----------
w : sorted 1d array of floats
Spectral positions in wavenumber cm^(-1).
w0 : float
Center of the asymmetric Gaussian function in wavenumber cm^(-1).
param_1, param_2, param_3, param_4 : float
Parameters needed for the asymmetric ISRF depending on the mode.
- 'sGaussian':
sigma : float
FWHM of the Gaussian function in wavenumber cm^(-1).
k : float
Controls the skewing of the asymmetry.
a_sigma, a_k : float
Tuning factors for sigma and k.
- 'Voigt':
sigma_V_l : float
FWHM of the Voigt function in wavenumber cm^(-1) for
the lower half.
sigma_L_l : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for
the lower half.
sigma_V_h : float
FWHM of the Voigt function in wavenumber cm^(-1) for
the higher half.
sigma_L_h : float
FWHM of the Lorentzian function in wavenumber cm^(-1) for
the higher half.
offset : float
Background offset.
mode : 'sGaussian', str, optional
Two options for the ISRF:
- Asymmetric super Gaussian: 'sGaussian'.
- Asymmetric Voigt: 'Voigt'.
Returns
-------
1d array of floats
Intensities of the peak-normalized asymmetric ISRF.
"""
slit_fc = []
if mode == 'sGaussian':
slit_fc = asym_Gaussian(w, w0, param_1, param_2, param_3,
param_4, offset)
elif mode == 'Voigt':
slit_fc = asym_Voigt(w, w0, param_1, param_2, param_3, param_4,
param_5, param_6, offset)
return slit_fc
|
[
"43315257+chuckedfromspace@users.noreply.github.com"
] |
43315257+chuckedfromspace@users.noreply.github.com
|
9ca3d949f4eba7c4f5c4434c364d62be9b136a99
|
aa4024b6a846d2f6032a9b79a89d2e29b67d0e49
|
/UMLRT2Kiltera_MM/graph_MT_post__Model.py
|
3f264f3c35aea6264d6efa85f991b713f54237a9
|
[
"MIT"
] |
permissive
|
levilucio/SyVOLT
|
41311743d23fdb0b569300df464709c4954b8300
|
0f88827a653f2e9d3bb7b839a5253e74d48379dc
|
refs/heads/master
| 2023-08-11T22:14:01.998341
| 2023-07-21T13:33:36
| 2023-07-21T13:33:36
| 36,246,850
| 3
| 2
|
MIT
| 2023-07-21T13:33:39
| 2015-05-25T18:15:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,610
|
py
|
"""
__graph_MT_post__Model.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
___________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_post__Model(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 172, 82
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([189.0, 62.0, 189.0, 62.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([20.0, 20.0, 190.0, 100.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin')
self.gf4 = GraphicalForm(drawing, h, "gf4")
self.graphForms.append(self.gf4)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([110.0, 41.0, 110.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'MT_post__Model_S', width = '0', justify= 'left', stipple='' )
self.gf66 = GraphicalForm(drawing, h, 'gf66', fontObject=font)
self.graphForms.append(self.gf66)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_post__Model
|
[
"levi"
] |
levi
|
e6a2a28a5d17ffa3424d45048710a8687df2c863
|
9256eeff108787245a1d9a8e27f80c04377ba10f
|
/src/datasets/mnist.py
|
49071693a70659a10514560cc67cff58309b79cf
|
[
"MIT"
] |
permissive
|
martinhavlicek/meta-inference-public
|
99a22daef937921deb9f677f68aa1c954e456e55
|
3cad0b84acd407f3d790f3d75d3045f62bdbf250
|
refs/heads/master
| 2022-04-12T14:15:42.514426
| 2020-03-31T21:39:50
| 2020-03-31T21:39:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,748
|
py
|
import math
import numpy as np
from PIL import Image
from torchvision import datasets
from torchvision import transforms
# ----- ROTATED MNIST -----
ROTATIONS = np.arange(-180, 180, 20)
DEFAULT_ROTATIONS = ROTATIONS[0::2]
UNSEEN_ROTATIONS = ROTATIONS[1::2]
DEFAULT_ROTATIONS_SPARSE = np.array([-160, -80, 0, 80, 160])
UNSEEN_ROTATIONS_SPARSE = np.array([-180, -140, -120, -100, -60, -40, -20, 20, 40, 60, 100, 120, 140])
DEFAULT_ROTATIONS_DISJOINT = ROTATIONS[:len(ROTATIONS) // 2 + 1]
UNSEEN_ROTATIONS_DISJOINT = ROTATIONS[len(ROTATIONS) // 2 + 1:]
ALL_ROTATIONS = ROTATIONS
DEFAULT_ROTATIONS_DICT = {
'standard': DEFAULT_ROTATIONS,
'sparse': DEFAULT_ROTATIONS_SPARSE,
'disjoint': DEFAULT_ROTATIONS_DISJOINT
}
UNSEEN_ROTATIONS_DICT = {
'standard': UNSEEN_ROTATIONS,
'sparse': UNSEEN_ROTATIONS_SPARSE,
'disjoint': UNSEEN_ROTATIONS_DISJOINT
}
def load_many_rotated_mnist(data_dir, image_size=32, train=True,
rotations=DEFAULT_ROTATIONS):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular rotation.
"""
return [
load_rotated_mnist( data_dir, image_size=image_size,
train=train, rotation=rotation)
for rotation in rotations
]
def load_rotated_mnist(data_dir, image_size=32, train=True, rotation=0):
"""
Load a MNIST dataset where each image has a rotation.
"""
rotate_image = rotate_transform(rotation)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
rotate_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def rotate_transform(angle):
def f(img):
return transforms.functional.rotate(img, angle)
return f
# ----- SCALED MNIST -----
SCALES = np.arange(0.5, 2.0, 0.1)
DEFAULT_SCALES = SCALES[0::2]
UNSEEN_SCALES = SCALES[1::2]
DEFAULT_SCALES_SPARSE = np.array([0.6, 1.0 ,1.4, 1.8])
UNSEEN_SCALES_SPARSE = np.array([0.5, 0.7, 0.8, 0.9, 1.1, 1.2, 1.3, 1.5, 1.6, 1.7, 1.9])
DEFAULT_SCALES_DISJOINT = SCALES[:len(SCALES) // 2 + 1]
UNSEEN_SCALES_DISJOINT = SCALES[len(SCALES) // 2 + 1:]
ALL_SCALES = SCALES
DEFAULT_SCALES_DICT = {
'standard': DEFAULT_SCALES,
'sparse': DEFAULT_SCALES_SPARSE,
'disjoint': DEFAULT_SCALES_DISJOINT
}
UNSEEN_SCALES_DICT = {
'standard': UNSEEN_SCALES,
'sparse': UNSEEN_SCALES_SPARSE,
'disjoint': UNSEEN_SCALES_DISJOINT
}
def load_many_scaled_mnist( data_dir, image_size=32, train=True,
scales=DEFAULT_SCALES):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular scale.
"""
return [
load_scaled_mnist( data_dir, image_size=image_size,
train=train, scale=scale)
for scale in scales
]
def load_scaled_mnist(data_dir, image_size=32, train=True, scale=1):
"""
Load a MNIST dataset where each image has is scaled by a scale.
"""
scale_image = scale_transform(scale)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
scale_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def scale_transform(scale):
def f(img):
size = img.size
i, j, h, w = get_crop_params(img, scale, ratio=1)
return transforms.functional.resized_crop(
img, i, j, h, w, size, Image.BILINEAR)
return f
def get_crop_params(img, scale, ratio=1):
w = img.size[0] * scale
h = img.size[1] * scale
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
# ----- SHEARED MNIST -----
SHEARS = np.arange(-180, 180, 20)
DEFAULT_SHEARS = SHEARS[0::2]
UNSEEN_SHEARS = SHEARS[1::2]
DEFAULT_SHEARS_SPARSE = np.array([-160, -80, 0, 80, 160])
UNSEEN_SHEARS_SPARSE = np.array([-180, -140, -120, -100, -60, -40, -20, 20, 40, 60, 100, 120, 140])
DEFAULT_SHEARS_DISJOINT = SHEARS[:len(SHEARS) // 2 + 1]
UNSEEN_SHEARS_DISJOINT = SHEARS[len(SHEARS) // 2 + 1:]
ALL_SHEARS = SHEARS
DEFAULT_SHEARS_DICT = {
'standard': DEFAULT_SHEARS,
'sparse': DEFAULT_SHEARS_SPARSE,
'disjoint': DEFAULT_SHEARS_DISJOINT
}
UNSEEN_SHEARS_DICT = {
'standard': UNSEEN_SHEARS,
'sparse': UNSEEN_SHEARS_SPARSE,
'disjoint': UNSEEN_SHEARS_DISJOINT
}
def load_many_sheared_mnist(data_dir, image_size=32, train=True,
shears=DEFAULT_SHEARS):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular shear.
"""
return [
load_sheared_mnist( data_dir, image_size=image_size,
train=train, shear=shear)
for shear in shears
]
def load_sheared_mnist(data_dir, image_size=32, train=True, shear=0):
"""
Load a MNIST dataset where each image has a rotation.
"""
shear_image = shear_transform(shear)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
shear_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def shear_transform(shear):
def f(img):
return transforms.functional.affine(img, 0, (0, 0), 1, shear)
return f
|
[
"me@mikewuis.me"
] |
me@mikewuis.me
|
f0338b1f24a90d5fbc5b99ebe5f32f64d18dd26f
|
34f1693e4bd6b85abc289725d535656b36fb5e72
|
/.file/hash/6.py
|
64c612f22b34aebed2e2831867886eeed92feae8
|
[] |
no_license
|
mels595/termux-toolkit
|
f15aeeb8f673082e2ee6cde50f72f6d40481eb61
|
872b9220e9fe857b65502ff775073e26fedbc0b9
|
refs/heads/master
| 2022-11-17T18:35:09.892480
| 2020-07-11T18:46:29
| 2020-07-11T18:46:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
import hashlib
text = raw_input("\033[00m[\033[1;31m+\033[00m] Text\033[1;31m: \033[0;36m")
m = hashlib.new('sha384')
m.update(text)
md4 = m.hexdigest()
print "\033[00m[\033[1;32m+\033[00m] SHA384 \033[1;31m: \033[0;33m"+md4
|
[
"bangaslanz@yahoo.com"
] |
bangaslanz@yahoo.com
|
3a7cf247650cce99e5be3bda5ca00bcccf38972f
|
35d0c90beda35b277474a4800415ccbe63a1d04a
|
/inquisitor/management/commands/count_agencies_with_active_address.py
|
cbd3bfd95b24cb4d1d40701098453a53bee2d35f
|
[] |
no_license
|
ArtieCode/Projekt_Koncowy
|
6cec4b346b361293f28ad5682a0f92bda90b83a5
|
fe06cfa09af0762919260b25f0052a6d5d2f5456
|
refs/heads/master
| 2020-05-20T23:02:42.818788
| 2019-05-17T09:56:51
| 2019-05-17T09:56:51
| 185,794,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
from django.core.management.base import BaseCommand, CommandError
from inquisitor.models import AgencyAddress, DetectiveAgency
from tqdm import tqdm
import re
class Command(BaseCommand):
help = 'Count agencies without a valid address'
def handle(self, *args, **options):
all_agencies = DetectiveAgency.objects.all()
active_count = 0
def has_active_address(db_object):
addresses = db_object.agencyaddress_set.all()
for address in addresses:
if address.address_type == 2:
return True
return False
for agency in tqdm(all_agencies):
has_active = has_active_address(agency)
if has_active:
active_count += 1
print(f'{active_count}/{len(all_agencies)} agencies with an active address')
|
[
"artur.placha@gmail.com"
] |
artur.placha@gmail.com
|
7f19a3de1a2177407921827f9a30e9f957520c64
|
ace2dc6096eb0b7a540f28e57df8459adafad6ed
|
/Algorithmic Toolbox/week3_greedy_algorithms/MaxValueofLoot.py
|
d0556e46829682f74ac9c48922ded067c88a5f6e
|
[] |
no_license
|
tdslivensky/AlgorithmsAndDataStructures
|
6ad2c28204600b1f8f72228c13d29d2c3c9437c9
|
e8b1011ab5210bc52854f911e2a7e41a83b36740
|
refs/heads/master
| 2023-01-11T16:32:49.399654
| 2020-11-13T13:49:18
| 2020-11-13T13:49:18
| 289,050,279
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
def get_optimal_value(capacity, weights, values):
TotalWeight = capacity
value = 0
weightValueIndex = 0
arr = [0] * len(weights)
# write your code here
for i in range(len(weights)):
WeightPerValue = values[i]/weights[i]
arr[i] = [weights[i],values[i],WeightPerValue]
a = sorted(arr, key=lambda x:float(x[2]), reverse=True)
while(TotalWeight != 0):
if(len(weights)==1):
if(TotalWeight > a[weightValueIndex][0]):
value = a[weightValueIndex][1]
return value
else:
value += (TotalWeight * a[weightValueIndex][2])
return value
elif(TotalWeight > a[weightValueIndex][0]):
TotalWeight -= a[weightValueIndex][0]
value += a[weightValueIndex][1]
weightValueIndex += 1
else:
value += (TotalWeight * a[weightValueIndex][2])
TotalWeight = 0
return value
if __name__ == "__main__":
capacity = 10
values = [500]
weights = [30]
opt_value = get_optimal_value(capacity, weights, values)
print("{:.10f}".format(opt_value))
|
[
"tslivensky@emailatg.com"
] |
tslivensky@emailatg.com
|
8c1b2c443b10f64ad81dbb48b78341c22ec527dc
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/discount_info_v3.py
|
3eeec1c5d49a77c443407f9193187e6c6e93816a
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,663
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DiscountInfoV3:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'discount_id': 'str',
'discount_value': 'str',
'discount_type': 'int',
'orders': 'list[OrderV3]'
}
attribute_map = {
'discount_id': 'discount_id',
'discount_value': 'discount_value',
'discount_type': 'discount_type',
'orders': 'orders'
}
def __init__(self, discount_id=None, discount_value=None, discount_type=None, orders=None):
"""DiscountInfoV3 - a model defined in huaweicloud sdk"""
self._discount_id = None
self._discount_value = None
self._discount_type = None
self._orders = None
self.discriminator = None
self.discount_id = discount_id
self.discount_value = discount_value
self.discount_type = discount_type
self.orders = orders
@property
def discount_id(self):
"""Gets the discount_id of this DiscountInfoV3.
订单的可用折扣ID。 支付订单时,输入该参数的值,即可使用折扣。
:return: The discount_id of this DiscountInfoV3.
:rtype: str
"""
return self._discount_id
@discount_id.setter
def discount_id(self, discount_id):
"""Sets the discount_id of this DiscountInfoV3.
订单的可用折扣ID。 支付订单时,输入该参数的值,即可使用折扣。
:param discount_id: The discount_id of this DiscountInfoV3.
:type: str
"""
self._discount_id = discount_id
@property
def discount_value(self):
"""Gets the discount_value of this DiscountInfoV3.
折扣率或者满减值,如果折扣模式是一口价,这个值为空。
:return: The discount_value of this DiscountInfoV3.
:rtype: str
"""
return self._discount_value
@discount_value.setter
def discount_value(self, discount_value):
"""Sets the discount_value of this DiscountInfoV3.
折扣率或者满减值,如果折扣模式是一口价,这个值为空。
:param discount_value: The discount_value of this DiscountInfoV3.
:type: str
"""
self._discount_value = discount_value
@property
def discount_type(self):
"""Gets the discount_type of this DiscountInfoV3.
折扣类型,取值为 0:促销折扣1:合同折扣2:商务优惠3:合作伙伴授予折扣609:订单调价折扣
:return: The discount_type of this DiscountInfoV3.
:rtype: int
"""
return self._discount_type
@discount_type.setter
def discount_type(self, discount_type):
"""Sets the discount_type of this DiscountInfoV3.
折扣类型,取值为 0:促销折扣1:合同折扣2:商务优惠3:合作伙伴授予折扣609:订单调价折扣
:param discount_type: The discount_type of this DiscountInfoV3.
:type: int
"""
self._discount_type = discount_type
@property
def orders(self):
"""Gets the orders of this DiscountInfoV3.
可使用折扣的订单列表。 具体请参见表3。
:return: The orders of this DiscountInfoV3.
:rtype: list[OrderV3]
"""
return self._orders
@orders.setter
def orders(self, orders):
"""Sets the orders of this DiscountInfoV3.
可使用折扣的订单列表。 具体请参见表3。
:param orders: The orders of this DiscountInfoV3.
:type: list[OrderV3]
"""
self._orders = orders
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiscountInfoV3):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
1109161a39f73fe01e4a6f4099ad4dad4a0939bc
|
abdb582b9ab76eaf6df1fdb5843c24fa6fa1ede0
|
/flendz_test/urls.py
|
80bc3d35b33735c54f511c2ea63a1065e235799b
|
[] |
no_license
|
jabykuniyil/flendz
|
1375341ee97986842d962702e0f1ac7f6d48cae7
|
ef952f9e14320b9c512b4047c6726ab9ff776120
|
refs/heads/main
| 2023-05-27T20:12:36.774259
| 2021-06-05T04:38:47
| 2021-06-05T04:38:47
| 372,798,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('test_app.url')),
]
|
[
"mohdjabiran112@gmail.com"
] |
mohdjabiran112@gmail.com
|
74265b4401c3e94e0487b4755dd8a0b7a8dd4660
|
bf3b729b635c2f0505e1adeed88cf583d8923367
|
/devscripts/createSampleLinks.py
|
af5e719511b1e15bc906f8a16f86458ce2edb257
|
[] |
no_license
|
LucBerge/GitHub-map
|
0e9e3c4d96530e376e531640af2f950cf0c6c69a
|
66c73079bf8e4a27a0e057d94cdb9e7ac8aa341f
|
refs/heads/master
| 2020-04-28T04:56:03.346772
| 2020-01-06T18:45:04
| 2020-01-06T18:45:04
| 175,000,271
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#!/usr/bin/python
import sys
try:
input = open(sys.argv[1],"rU")
output = open(sys.argv[2],"w")
limit = int(sys.argv[3])
for line in input:
line_split = line.split('\t')
if(len(line_split) == 3):
if(int(line_split[2]) >= limit):
output.write(line)
input.close()
output.close()
except:
print("Usage <input> <output> <limit>")
|
[
"lucas.bergeron@outlook.fr"
] |
lucas.bergeron@outlook.fr
|
ef4232d6318f6c09b7ce5f6cb4de67654392c61e
|
5de91e63d99ba96db2aa69bc7efaf93dbe7fcbe3
|
/compute_dp_sgd_privacy.py
|
aa0d1ba586dcd1545019cc3192c5d75dfea3d85b
|
[] |
no_license
|
cuongtran-syr/DP_Fair
|
a44168eef05e06de427f5b09dbe5c5c9516a1864
|
d5f7d59a2163013c1c119a956e9e87bd1127e0f4
|
refs/heads/master
| 2022-11-16T03:01:15.326027
| 2020-07-10T18:39:31
| 2020-07-10T18:39:31
| 260,422,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,576
|
py
|
# clone from https://github.com/ebagdasa/differential-privacy-vs-fairness/blob/master/compute_dp_sgd_privacy.py
r"""Command-line script for computing privacy of a model trained with DP-SGD.
The script applies the RDP accountant to estimate privacy budget of an iterated
Sampled Gaussian Mechanism. The mechanism's parameters are controlled by flags.
Example:
compute_dp_sgd_privacy
--N=60000 \
--batch_size=256 \
--noise_multiplier=1.12 \
--epochs=60 \
--delta=1e-5
The output states that DP-SGD with these parameters satisfies (2.92, 1e-5)-DP.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
from absl import flags
from tfcode.rdp_accountant import compute_rdp
from tfcode.rdp_accountant import get_privacy_spent
FLAGS = flags.FLAGS
flags.DEFINE_integer('N', None, 'Total number of examples')
flags.DEFINE_integer('batch_size', None, 'Batch size')
flags.DEFINE_float('noise_multiplier', None, 'Noise multiplier for DP-SGD')
flags.DEFINE_float('epochs', None, 'Number of epochs (may be fractional)')
flags.DEFINE_float('delta', 1e-6, 'Target delta')
flags.mark_flag_as_required('N')
flags.mark_flag_as_required('batch_size')
flags.mark_flag_as_required('noise_multiplier')
flags.mark_flag_as_required('epochs')
def apply_dp_sgd_analysis(q, sigma, steps, orders, delta):
"""Compute and print results of DP-SGD analysis."""
rdp = compute_rdp(q, sigma, steps, orders)
eps, _, opt_order = get_privacy_spent(orders, rdp, target_delta=delta)
# print('DP-SGD with sampling rate = {:.3g}% and noise_multiplier = {} iterated'
# ' over {} steps satisfies'.format(100 * q, sigma, steps), end=' ')
# print('differential privacy with eps = {:.3g} and delta = {}.'.format(
# eps, delta))
# print('The optimal RDP order is {}.'.format(opt_order))
#
# if opt_order == max(orders) or opt_order == min(orders):
# print('The privacy estimate is likely to be improved by expanding '
# 'the set of orders.')
return eps
def main(argv):
del argv # argv is not used.
q = FLAGS.batch_size / FLAGS.N # q - the sampling ratio.
if q > 1:
raise app.UsageError('N must be larger than the batch size.')
orders = ([1.25, 1.5, 1.75, 2., 2.25, 2.5, 3., 3.5, 4., 4.5] +
list(range(5, 64)) + [128, 256, 512])
steps = int(math.ceil(FLAGS.epochs * FLAGS.N / FLAGS.batch_size))
apply_dp_sgd_analysis(q, FLAGS.noise_multiplier, steps, orders, FLAGS.delta)
if __name__ == '__main__':
app.run(main)
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.