blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2bd87a7aa56b344c55ef25bb7d11c215473055d2
|
b3d8a02bdcb563f9f8f819ca278e548cbbb6a719
|
/weekday.py
|
5b0c41b175d75e561bcf314b105ecd97f5ba1244
|
[] |
no_license
|
shadab4150/data-mining-call-records-to-get-location-of-cellphone
|
f1b05b6ccbbbf9140b485fb7314eff665ef2bf6b
|
4e0712806c12956eff6db84bb76bdf68df14fc53
|
refs/heads/master
| 2020-06-12T22:21:15.634498
| 2019-09-08T12:35:38
| 2019-09-08T12:35:38
| 194,445,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,460
|
py
|
import pandas as pd
from datetime import timedelta
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
def clusterInfo(model):
print("Cluster Analysis Inertia: ", model.inertia_)
print('------------------------------------------')
for i in range(len(model.cluster_centers_)):
print("\n Cluster ", i)
print(" Centroid ", model.cluster_centers_[i])
print(" #Samples ", (model.labels_==i).sum()) # NumPy Power
# Find the cluster with the least # attached nodes
def clusterWithFewestSamples(model):
# Ensure there's at least one cluster...
minSamples = len(model.labels_)
minCluster = 0
for i in range(len(model.cluster_centers_)):
if minSamples > (model.labels_==i).sum():
minCluster = i
minSamples = (model.labels_==i).sum()
print("\n Cluster With Fewest Samples: ", minCluster)
return (model.labels_==minCluster)
def doKMeans(data, clusters=0):
df1 = pd.concat([data.TowerLon, data.TowerLat], axis = 1)
kmeans = KMeans(n_clusters = clusters)
labels = kmeans.fit_predict(df1)
centroids = kmeans.cluster_centers_
ax.scatter(x = centroids[:, 0], y = centroids[:, 1], c = 'r', marker = 'x', s = 100)
model = kmeans
return model
df = pd.read_csv('F:\\CSV files\\CDR.csv')
print(df.head())
df.CallDate = pd.to_datetime(df.CallDate)
df.Duration = pd.to_timedelta(df.Duration)
df.CallTime = pd.to_timedelta(df.CallTime)
print(df.dtypes)
print(df[(df.TowerLat == 32.721986) & (df.TowerLon == -96.890587)]) #the data for second question (post office Lon/Lat))
users = df.In.unique()
print(users)
print("\n\nExamining person: ",6)
user1 = df[(df.In == users[ 6])]
user1 = user1[(user1.DOW == 'Mon') | (user1.DOW == 'Tue') | (user1.DOW == 'Wed') | (user1.DOW == 'Thu') | (user1.DOW == 'Fri')]
user1 = user1[(user1.CallTime < "17:00:00")]
fig=plt.figure()
ax = fig.add_subplot(111)
ax.scatter(user1.TowerLon,user1.TowerLat, c='g', marker='o', alpha=0.2)
ax.set_title('Weekday Calls before 5PM')
model = doKMeans(user1, 3)
midWayClusterIndices = clusterWithFewestSamples(model)
midWaySamples = user1[midWayClusterIndices]
print(" Its Waypoint Time: ", midWaySamples.CallTime.mean())
# visualize the results!
# First draw the X's for the clusters:
ax.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1], s=169, c='r', marker='x', alpha=0.8, linewidths=2)
ax.set_title('Weekday Calls Centroids')
plt.show()
|
[
"noreply@github.com"
] |
shadab4150.noreply@github.com
|
138f7f252d73e83411d17c2c349d424fa7ffc5fb
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03326/s918359184.py
|
35df2f464cf01c216168e48da719752b335b7739
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
N,M = map(int,input().split())
P = []
for _ in range(N):
x,y,z = map(int,input().split())
P.append([x,y,z])
ans = 0
for X in(1,-1):
for Y in (1,-1):
for Z in (1,-1):
A = []
for i in range(len(P)):
val = X*P[i][0] + Y*P[i][1] + Z*P[i][2]
A.append(val)
A.sort(reverse = True)
ans = max(ans,sum(A[:M]))
print(ans)
#O(NlogN)で全列挙
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8cb8a267d2c5699a88f8202a1ecf829665a6f44e
|
a48727b9dbbf05df1409039eeb19101d6f8e019e
|
/killprocess.py
|
97f8c0b288382d1a425925db00bdddd7afef74c4
|
[] |
no_license
|
uthpalaherath/dotfiles
|
6295f14e9879014ecc5d9092bc9af9d83b1d801e
|
a40f7aacb5f0270929de9690cddec3176007ad52
|
refs/heads/master
| 2023-07-24T09:27:48.863480
| 2023-07-20T03:49:18
| 2023-07-20T03:49:18
| 251,228,637
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 875
|
py
|
#!/usr/bin/env python
# Author: Pedram Tavazohi
import psutil
import sys
import time
import datetime
import os
user = os.getenv("USER")
def check_proccess(name):
"""
"""
for proc in psutil.process_iter():
try:
# Check if process name contains the given name string.
if name.lower() in proc.name().lower() and proc.username() == user:
now = datetime.datetime.now()
print(now.strftime("%Y-%m-%d %H:%M:%S"), proc.pid, proc.name())
proc.kill()
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
if len(sys.argv) < 2:
print("Please input a list of process names")
exit()
while True:
for arg in sys.argv[1:]:
print(arg)
check_proccess(arg)
time.sleep(30)
|
[
"ukh0001@mix.wvu.edu"
] |
ukh0001@mix.wvu.edu
|
61c862ac3821e764fea417da189d6089f06a7489
|
a81884be41488f92725b6dae1bb3b6d9eae3380d
|
/build/micros_swarm_framework/swarm_library/olfati_saber_flocking/catkin_generated/pkg.develspace.context.pc.py
|
10acf535eca20f93b4da38546351948c5fc62f87
|
[] |
no_license
|
conniemzhang/RoboSquad
|
a22b76c4b551990926c374390983501658bdf24d
|
e4fd7ca51678fe914316d80488f11ce4b322f001
|
refs/heads/master
| 2021-09-13T18:22:29.336941
| 2018-05-02T21:53:21
| 2018-05-02T21:53:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/connie/robo_squad/src/micros_swarm_framework/swarm_library/olfati_saber_flocking/include".split(';') if "/home/connie/robo_squad/src/micros_swarm_framework/swarm_library/olfati_saber_flocking/include" != "" else []
PROJECT_CATKIN_DEPENDS = "micros_swarm;roscpp;rospy;std_msgs;nav_msgs;geometry_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "olfati_saber_flocking"
PROJECT_SPACE_DIR = "/home/connie/robo_squad/devel"
PROJECT_VERSION = "0.0.17"
|
[
"cmzhang96@gmail.com"
] |
cmzhang96@gmail.com
|
01e353a3b4ade220580e478a7c1f4d7c0b2b10a7
|
a50c5d933bd361271518817a6735907e77f66340
|
/Map.py
|
895c9dd6ee55e24c8b41c650034a709fc4540ec8
|
[] |
no_license
|
amansanghvi/Thesis
|
ffb0c727f9415b8291763658a39a59697d4b1991
|
ea14efc28e9b1de338fd8cb48687fed907c447ee
|
refs/heads/main
| 2023-06-19T20:36:09.281009
| 2021-07-23T07:17:13
| 2021-07-23T07:17:13
| 307,730,159
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,693
|
py
|
from abc import abstractclassmethod
from typing import Any, List, Optional, Tuple, Union, cast
import matplotlib.pyplot as plt
import numpy as np
from lidar import Scan
from models import Pose, Position
MAP_LENGTH = 10 # metres
CELLS_PER_ROW = 100
CELL_SIZE = MAP_LENGTH / CELLS_PER_ROW
RELEVANT_POINT_DIST = 10.0
OCCUPIED_POINT_THRESHOLD = 1.0
class Map:
@abstractclassmethod
def __getitem__(self, idx: int) -> list: # Hacky way to allow double indexing
pass
@abstractclassmethod
def __len__(self) -> int:
pass
@abstractclassmethod
def __str__(self) -> str:
pass
@abstractclassmethod
def get_pr_at(self, pos: Position) -> Optional[float]:
pass
@abstractclassmethod
def update(self, robot_pose: Pose, scan: Scan) -> Any:
pass
# Input is GLOBAL x and y in metres
@abstractclassmethod
def get_cell(self, x: float, y: float) -> Optional[Position]:
pass
@abstractclassmethod
def get_nearby_occ_points(self, curr_cell: Position) -> np.ndarray:
pass
@abstractclassmethod
def get_scan_match(self, rel_scan: Scan, prev_scan: Scan, guess: Pose, pose_range: np.ndarray) -> Tuple[List[float], List[List[float]], float]:
pass
@abstractclassmethod
def is_occ_at(self, x, y) -> bool:
pass
@staticmethod
def get_affected_points(x0: int, y0: int, x1: int, y1: int) -> List[Tuple[int, int]]:
dx = abs(x1 - x0)
dy = abs(y1 - y0)
if (dx == 0):
return [(x0, y) for y in range(y0, y1+1)]
if (dy == 0):
return [(x, y0) for x in range(x0, x1+1)]
xsign = 1 if x1 - x0 > 0 else -1
ysign = 1 if y1 - y0 > 0 else -1
steep = dy > dx
if steep:
dx, dy = dy, dx
D = 2*dy - dx
y = 0
result = []
for x in range(dx + 1):
if (steep):
result.append((x0 + xsign*y, y0 + ysign*x))
else:
result.append((x0 + xsign*x, y0 + ysign*y))
if D >= 0:
y += 1
D -= 2*dx
D += 2*dy
return result
# Does not gives accurate position.
# Uses an arbitrary unit of distance.
@abstractclassmethod
def get_occupied_points(self):
pass
# index to m from origin.
@abstractclassmethod
def index_to_distance(self, i: int) -> float:
pass
@abstractclassmethod
def copy(self) -> Any:
pass
def show(self):
x, y = self.get_occupied_points()
plt.figure()
plt.scatter(x, y, s=2)
plt.show(block=False)
|
[
"aman302@hotmail.co.uk"
] |
aman302@hotmail.co.uk
|
d40b5547fccf65fbb18f85c0e34c54e4c17caa25
|
1b3c967ffa3496b9a4244307672f9e1582882b83
|
/refactor/tilde_essentials/example.py
|
bb28750fd506917847f9fcdeeb200a384f859bb0
|
[
"Apache-2.0"
] |
permissive
|
joschout/tilde
|
ae4a28edd69425583dee26622fc3e475315e6917
|
1403b50842b83f2edd6b16b1fbe24b9bec2d0048
|
refs/heads/master
| 2021-06-30T00:43:02.948058
| 2020-09-21T08:30:38
| 2020-09-21T08:30:38
| 158,675,050
| 21
| 5
|
Apache-2.0
| 2020-03-26T00:31:58
| 2018-11-22T09:34:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
from typing import Iterable
from refactor.tilde_essentials.destuctable import Destructible
class Example(Destructible):
"""
Container class for an example, storing its data and label (types undefined)
"""
def __init__(self, data, label):
self.data = data
self.label = label
def destruct(self):
destruct_method = getattr(self.data, 'destruct', None)
if callable(destruct_method):
self.data.destruct()
def get_labels(examples: Iterable):
labels = set()
for current_example in examples:
# for label in current_example.labels:
labels.add(current_example.label)
return labels
def calculate_majority_class(examples):
"""Calculate the majority class label in the given set of examples.
"""
label_counts = calculate_label_counts(examples)
label_with_max_count = max(label_counts, key=(lambda key: label_counts[key]))
count = label_counts[label_with_max_count] # type: int
return label_with_max_count, count
def calculate_label_counts(examples):
"""Assumes that the examples each have ONE label, and not a distribution over labels"""
label_counts = {}
for example in examples:
label = example.label
label_counts[label] = label_counts.get(label, 0) + 1
return label_counts
def calculate_label_frequencies(examples):
"""Assumes that the examples each have ONE label, and not a distribution over labels"""
label_counts = calculate_label_counts(examples)
for label in label_counts.keys():
label_counts[label] = label_counts[label] / len(examples)
return label_counts
def calculate_label_frequencies_and_absolute_counts(examples):
"""Assumes that the examples each have ONE label, and not a distribution over labels"""
label_counts = calculate_label_counts(examples)
label_frequencies = {}
for label in label_counts.keys():
label_frequencies[label] = label_counts[label] / len(examples)
return label_frequencies, label_counts
|
[
"jonas.schouterden@student.kuleuven.be"
] |
jonas.schouterden@student.kuleuven.be
|
10b9f0f7db2392f39950951bfb506cb3ad55492c
|
c0c6d3e792c3b9a7bcc1db7b6937f6ff0b2ccb60
|
/training_scripts/not_in_paper/affine_pretrain_triangles_unet.py
|
48d084cb6c0508a34d074f2bac1ad84b2b9859cf
|
[
"Apache-2.0"
] |
permissive
|
tbirdso/ICON
|
4ff8ae69b4e54cd264f1b3b2621bce94f4e9244e
|
c87495d1f479297cea456f752dc477c16f3587aa
|
refs/heads/master
| 2023-08-29T16:46:01.704033
| 2021-10-15T23:27:03
| 2021-10-15T23:27:03
| 418,936,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,304
|
py
|
import parent
import torch
import numpy as np
import networks
import visualize
import inverseConsistentNet
import data
import describe
import os
import matplotlib.pyplot as plt
import random
import pickle
batch_size = 128
data_size = 50
d1, d2 = data.get_dataset_triangles(
"train", data_size=data_size, hollow=True, batch_size=batch_size
)
d1_t, d2_t = data.get_dataset_triangles(
"test", data_size=data_size, hollow=True, batch_size=batch_size
)
image_A, image_B = (x[0].cuda() for x in next(zip(d1, d2)))
net_tmp = inverseConsistentNet.InverseConsistentAffineNet(
networks.ConvolutionalMatrixNet(), 100, next(iter(d1))[0].size()
)
net = inverseConsistentNet.InverseConsistentAffineNet(
networks.AffineFromUNet(networks.tallUNet2(), net_tmp.identityMap),
100,
next(iter(d1))[0].size(),
)
net.cuda()
import train
optim = torch.optim.Adam(net.parameters(), lr=0.00001)
net.train().cuda()
xs = []
for _ in range(240):
y = np.array(train.train2d(net, optim, d1, d2, epochs=50))
xs.append(y)
x = np.concatenate(xs)
plt.title("Loss curve for " + type(net.regis_net).__name__)
plt.plot(x[:, :3])
plt.savefig(describe.run_dir + f"loss.png")
plt.clf()
plt.title("Log # pixels with negative Jacobian per epoch")
plt.plot(x[:, 3])
# random.seed(1)
plt.savefig(describe.run_dir + f"lossj.png")
plt.clf()
with open(describe.run_dir + "loss.pickle", "wb") as f:
pickle.dump(x, f)
# torch.manual_seed(1)
# torch.cuda.manual_seed(1)
# np.random.seed(1)
image_A, image_B = (x[0].cuda() for x in next(zip(d1_t, d2_t)))
for N in range(3):
visualize.visualizeRegistration(
net,
image_A,
image_B,
N,
describe.run_dir + f"epoch{_:03}" + "case" + str(N) + ".png",
)
random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
np.random.seed(1)
image_A, image_B = (x[0].cuda() for x in next(zip(d1_t, d2_t)))
os.mkdir(describe.run_dir + "final/")
for N in range(30):
visualize.visualizeRegistrationCompact(net, image_A, image_B, N)
plt.savefig(describe.run_dir + f"final/{N}.png")
plt.clf()
torch.save(net.state_dict(), describe.run_dir + "network.trch")
torch.save(optimizer.state_dict(), describe.run_dir + "opt.trch")
|
[
"tgreer@biag-gpu1.cs.unc.edu"
] |
tgreer@biag-gpu1.cs.unc.edu
|
e61b90ba4e178454779746270d09b9427093070a
|
e65e7b9157b80d4f1d0d37fecd5869416083cad2
|
/silverlink.py
|
a883d83fe4fc81a31bae666745ccfea731882be2
|
[
"MIT"
] |
permissive
|
jherning/link68
|
33964064861c62d917120853172a71ae38389198
|
20f1dc0ca9e90818b06770979ecf6c4cb6dd829f
|
refs/heads/master
| 2023-07-12T00:55:09.716305
| 2021-07-27T20:49:41
| 2021-07-27T20:49:41
| 390,125,600
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,728
|
py
|
#### SilverLink ####
# Notes:
# idVendor=0x0451, idProduct=0xe001
# IN endpoint: 0x81
# OUT endpoint: 0x02
# Always should read 32 bytes at a time, so a read buffer is used because
# we may get some of the next packet.
import usb.core # For Silver Links
import usb.util # For Silver Links
class glink:
def __init__(self):
print('Initializing SilverLink ..')
self.usbdev = usb.core.find(idVendor=0x0451, idProduct=0xe001)
if self.usbdev is None:
print('SilverLink not found.')
quit()
usb.util.dispose_resources(self.usbdev) # Seems to help things. [Had used .reset()]
self.usbdev.set_configuration() # Should only be one configuration, so this should work..
self.readbuf = bytearray() # The read buffer is only used for the SilverLink
def read(self, numbytes):
while len(self.readbuf) < numbytes: # Not enough data is in the buffer, read link:
try:
indata = self.usbdev.read(0x81, 32, 25000) # 25s max packet allowance OK?
except:
print('!! USB link READ error. Quitting.')
quit()
self.readbuf.extend(indata)
data = self.readbuf[0:numbytes]
self.readbuf = self.readbuf[numbytes:]
return data
def write(self, data): # data should be a bytearray
try:
self.usbdev.write(0x02, data, 25000)
except:
print('!! USB link WRITE error. Quitting.')
quit()
def __del__(self):
usb.util.dispose_resources(self.usbdev)
def softreset(self):
self.readbuf = bytearray()
usb.util.dispose_resources(self.usbdev)
self.usbdev.set_configuration()
|
[
"83619895+jherning@users.noreply.github.com"
] |
83619895+jherning@users.noreply.github.com
|
d66aef0bd44d235f1c4c4f1f9e60660a2065f012
|
b5499572b71a7f5d2d3fed2142fbd2be4befa3f3
|
/app/settings.py
|
974ea197af5d4e9166c4111b414eb4fcfbcdfdc6
|
[] |
no_license
|
A-you/goods-give
|
74d78378d6e2cc613f7b2ea70f1b06b0c9b19a15
|
c254908ee46afb044fc3e0f4aa0e72b36f48daec
|
refs/heads/master
| 2022-12-17T00:33:25.210994
| 2019-04-15T12:59:56
| 2019-04-15T12:59:56
| 179,241,999
| 0
| 0
| null | 2022-12-08T05:00:08
| 2019-04-03T08:11:27
|
CSS
|
UTF-8
|
Python
| false
| false
| 340
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/4/2 10:18
# @Author : Ymy
RECENT_BOOK_COUNT = 30
BEANS_UPLOAD_ONE_BOOK = 0.5
MAIL_SERVER= 'smtp.qq.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_USERNAME = '582838918@qq.com'
MAIL_PASSWORD = 'kwidjayghflcbbbj'
#MAIL_SUBJECT_PREFIX = '[小尤]' #开头
#MAIL_SENDER = '' #结尾
|
[
"youyi.ren@foxmail.com"
] |
youyi.ren@foxmail.com
|
c30ffe6c3473b88893cc787d5641d485f00ff058
|
19a8cdf9639235cbb502ebc8e8458ee9630d5a8c
|
/NeuralModel/GRU_Dynamic.py
|
c3e1730a7a78a77c70d026742d8ee34d77d33127
|
[] |
no_license
|
yangliuy/bAbi
|
a9cbd0f66f120bab76977cc27ce5d31139fc3c85
|
0236d7c15cae7320abd8346381061bb301c4d7c1
|
refs/heads/master
| 2021-04-29T07:46:31.390681
| 2016-06-11T15:59:45
| 2016-06-11T15:59:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'benywon'
import theano
import theano.tensor as T
sigmoid = lambda x: 1 / (1 + T.exp(-x))
def GRU_dynamic(embedding_in,attention_resource):
def one_step(self, x_t, h_tm1, W_iz, W_hz, b_z, W_ir, W_hr, b_r, W_ih, W_hh, W_ho, b_o, b_h):
zt = sigmoid(theano.dot(x_t, W_iz) + theano.dot(h_tm1, W_hz) + b_z)
rt = sigmoid(theano.dot(x_t, W_ir) + theano.dot(h_tm1, W_hr) + b_r)
rtht_1 = rt * h_tm1
ht_hat = T.tanh(theano.dot(x_t, W_ih) + theano.dot(rtht_1, W_hh) + b_h)
h_t = (1 - zt) * h_tm1 + zt * ht_hat
y_t = theano.dot(h_t, W_ho) + b_o
y_t = sigmoid(y_t)
if self.ignore_zero:
return [h_t, y_t], theano.scan_module.until(T.eq(T.sum(abs(x_t)), 0))
return [h_t, y_t]
outputs_list, _ = theano.scan(fn=one_step,
sequences=[embedding_in],
outputs_info=outputs_info,
non_sequences=non_sequence)
|
[
"bingning.wang@nlpr.ia.ac.cn"
] |
bingning.wang@nlpr.ia.ac.cn
|
dc865beb6b00a6c637df63a0eec9b5bcbab57be8
|
27141174f9349a76a0541a69f5627a83e3118d17
|
/src/area_none_radio_survey.py
|
d76575c9ea104432a9887ad5b6f11fb2f1b237b8
|
[] |
no_license
|
sdweston/LikelihoodRatio
|
7714b22f2c380f0ab839ba89ed99ff9487dd80cd
|
3277af96ef38fbd8388ba793afe0059c801a2be7
|
refs/heads/master
| 2021-01-17T04:02:22.165101
| 2017-05-08T02:42:34
| 2017-05-08T02:42:34
| 41,524,007
| 1
| 3
| null | 2018-05-16T00:32:00
| 2015-08-28T03:05:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
#===========================================================================
#
# area_none_radio_survey.py
#
# Python script to query SWIRE_ES1 mysql database to determine the
# area of the survey for a sub-set of the survey.
#
#===========================================================================
#
# S. Weston
# AUT University
# March 2013
#===========================================================================
def area_none_radio_survey():
ra2=8.0
ra1=9.5
dec2=-44.5
dec1=-43.0
rad_ra1=math.radians(ra1)
rad_ra2=math.radians(ra2)
dec1_dec2=(dec1+dec2)/2
print "(dec1 + dec2)/2 : %f" % dec1_dec2
term1=ra1-ra2
print "(ra1-ra2) = %f" % term1
term2=dec1-dec2
print "(dec1-dec2) = %f" % term2
term3=math.cos(math.radians(dec1_dec2))
print "math.cos((rad_dec1+rad_dec2)/2) = %f" % term3
area_sqdeg=term1* term3* term2
print "Area square degrees : %f" % area_sqdeg
area_arcsec=area_sqdeg*(3600**2)
print "Area square arcsec : %f" % area_arcsec
return area_arcsec
|
[
"weston.s.d@gmail.com"
] |
weston.s.d@gmail.com
|
9c43b1243714b0ae111247f46d626cbc4f8f00e7
|
62ab0fc1c028073b6eaac8c4bf4651d4a876e15d
|
/DjangoVf/mysite/settings.py
|
fafd0972be447cf3c9ee9136e09055f7e8ce2d00
|
[] |
no_license
|
jeanlucca/CampoMinado-Sd2019
|
aa50646545535cb368af1bb15509bbfe762d3748
|
efbc39d10c9bdb70fa5057031e17a5903cdde852
|
refs/heads/master
| 2020-11-24T08:22:57.651381
| 2019-12-14T16:04:31
| 2019-12-14T16:04:31
| 228,049,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,169
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'oav)yievbo8zbe@a*qqul9y%!$!f$l^ey1bl2-p+)y@2^hm0v7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Fortaleza'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"noreply@github.com"
] |
jeanlucca.noreply@github.com
|
07a96ba0e7ef7e527f3ff4c1455d92773758a154
|
32fdc94d1b8d98085db5d1e8caae4161d3e70667
|
/3rd_party/python3.7/lib/python3.7/site-packages/markdown/serializers.py
|
3cfa6bb9ea50e917f7bea65716a2a9a9bc05b39e
|
[
"Python-2.0"
] |
permissive
|
czfdlut/ticket_proxy
|
fa0f1924a86babfa7ce96cf97e929f7bf78643b7
|
0d7c19448741bc9030484a97c1b8f118098213ad
|
refs/heads/master
| 2022-12-23T05:25:58.207123
| 2019-11-20T03:58:31
| 2019-11-20T03:58:31
| 174,579,562
| 1
| 3
| null | 2022-12-18T01:18:07
| 2019-03-08T17:22:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,770
|
py
|
# markdown/searializers.py
#
# Add x/html serialization to Elementree
# Taken from ElementTree 1.3 preview with slight modifications
#
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import unicode_literals
from xml.etree.ElementTree import ProcessingInstruction
from . import util
import re
ElementTree = util.etree.ElementTree
QName = util.etree.QName
if hasattr(util.etree, 'test_comment'): # pragma: no cover
Comment = util.etree.test_comment
else: # pragma: no cover
Comment = util.etree.Comment
__all__ = ['to_html_string', 'to_xhtml_string']
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
RE_AMP = re.compile(r'&(?!(?:\#[0-9]+|[0-9a-z]+);)', re.I)
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError: # pragma: no cover
pass
def _raise_serialization_error(text): # pragma: no cover
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
# Only replace & when not part of an entity
text = RE_AMP.sub('&', text)
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
# Only replace & when not part of an entity
text = RE_AMP.sub('&', text)
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text
except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
# Only replace & when not part of an entity
text = RE_AMP.sub('&', text)
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text)
def _serialize_html(write, elem, format):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
elif tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, format)
else:
namespace_uri = None
if isinstance(tag, QName):
# QNAME objects store their data as a string: `{uri}tag`
if tag.text[:1] == "{":
namespace_uri, tag = tag.text[1:].split("}", 1)
else:
raise ValueError('QName objects must define a tag.')
write("<" + tag)
items = elem.items()
if items:
items = sorted(items) # lexical order
for k, v in items:
if isinstance(k, QName):
# Assume a text only QName
k = k.text
if isinstance(v, QName):
# Assume a text only QName
v = v.text
else:
v = _escape_attrib_html(v)
if k == v and format == 'html':
# handle boolean attributes
write(" %s" % v)
else:
write(' %s="%s"' % (k, v))
if namespace_uri:
write(' xmlns="%s"' % (_escape_attrib(namespace_uri)))
if format == "xhtml" and tag.lower() in HTML_EMPTY:
write(" />")
else:
write(">")
if text:
if tag.lower() in ["script", "style"]:
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, format)
if tag.lower() not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _write_html(root, format="html"):
assert root is not None
data = []
write = data.append
_serialize_html(write, root, format)
return "".join(data)
# --------------------------------------------------------------------
# public functions
def to_html_string(element):
return _write_html(ElementTree(element).getroot(), format="html")
def to_xhtml_string(element):
return _write_html(ElementTree(element).getroot(), format="xhtml")
|
[
"czfdlut@163.com"
] |
czfdlut@163.com
|
67316cfd4cfb23523ecc72c02a5deff397b79374
|
b13463d9394250c63312c1d2316be91176dd9ebd
|
/riwayattujuantf.py
|
2b802f93000a632bb689a6c6162bda7ed7db94ab
|
[] |
no_license
|
ameriyulina/tubesDAPpython
|
aed044245e915b85f4d1600259b00c616f59c5ac
|
035ecc22116515eb88b0b830f84dd483b90c1b7a
|
refs/heads/master
| 2021-10-27T20:12:51.321405
| 2019-04-19T12:25:49
| 2019-04-19T12:25:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
import json
def tujuan(b):
a = open("usersesama.txt", "w")
b = json.dump(b, a)
|
[
"noreply@github.com"
] |
ameriyulina.noreply@github.com
|
da6de61d9dbc577ebaa94684bcb934edecf913cd
|
26d5a0cfab958aacbaff1b723bb6316cbd9c8f99
|
/rough.py
|
56a219c7883ee6bde6af2cf144c4ea6151a6d3f7
|
[
"MIT"
] |
permissive
|
Praveenstein/bigGanMicro
|
75f586694e009be373c7ebb293f39f7b83b3ecb6
|
d669874c0226907fa41b2140cdc8c46bdef2a283
|
refs/heads/main
| 2023-05-31T10:48:05.145167
| 2021-06-13T16:46:18
| 2021-06-13T16:46:18
| 376,592,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
import pandas as pd
df = pd.read_excel("meta/micro_metadata_5.xlsx", index_col=0, engine='openpyxl', header=0)
print(df.shape)
df.drop(df.loc[df['primary_microconstituent'] == "figure"].index, inplace=True)
print(df.shape)
df.to_excel("meta/micro_metadata_6.xlsx")
|
[
"praveenstein@outlook.com"
] |
praveenstein@outlook.com
|
b931cff1409ef09ae10709f5a39db9edd9385497
|
5a25f4f5f9c7cba03f9b5848eafc01a760c88768
|
/reduction/pipeline_scripts/member.uid___A001_X1296_X1cb.hifa_calimage.casa_pipescript.py
|
df6acfc8555cb46bfa66612b457ab544e469c511
|
[] |
no_license
|
ALMA-IMF/reduction
|
b3579a548fe20193b807a7415a040f351c879beb
|
de606cc6bc542f088223ce84082ff333739c9007
|
refs/heads/master
| 2023-06-22T13:21:13.841999
| 2023-06-12T09:17:50
| 2023-06-12T09:17:50
| 115,018,799
| 9
| 29
| null | 2023-06-12T09:17:51
| 2017-12-21T15:13:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
from recipes.almahelpers import fixsyscaltimes # SACM/JAO - Fixes
__rethrow_casa_exceptions = True
context = h_init()
context.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')
context.set_state('ProjectSummary', 'piname', 'unknown')
context.set_state('ProjectSummary', 'proposal_title', 'unknown')
context.set_state('ProjectStructure', 'ous_part_id', 'X1947779902')
context.set_state('ProjectStructure', 'ous_title', 'Undefined')
context.set_state('ProjectStructure', 'ppr_file', '/opt/dared/opt/c5r1/mnt/dataproc/2017.1.01355.L_2018_03_26T10_21_31.408/SOUS_uid___A001_X1296_X1c7/GOUS_uid___A001_X1296_X1c8/MOUS_uid___A001_X1296_X1cb/working/PPR_uid___A001_X1296_X1cc.xml')
context.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')
context.set_state('ProjectStructure', 'recipe_name', 'hifa_calimage')
context.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')
context.set_state('ProjectStructure', 'ousstatus_entity_id', 'uid://A001/X1296/X1cb')
try:
hifa_importdata(vis=['uid___A002_Xc92fe3_Xe062', 'uid___A002_Xcaf094_X3198'], session=['session_1', 'session_2'])
fixsyscaltimes(vis = 'uid___A002_Xc92fe3_Xe062.ms')# SACM/JAO - Fixes
fixsyscaltimes(vis = 'uid___A002_Xcaf094_X3198.ms')# SACM/JAO - Fixes
h_save() # SACM/JAO - Finish weblog after fixes
h_init() # SACM/JAO - Restart weblog after fixes
hifa_importdata(vis=['uid___A002_Xc92fe3_Xe062', 'uid___A002_Xcaf094_X3198'], session=['session_1', 'session_2'])
hifa_flagdata(pipelinemode="automatic")
hifa_fluxcalflag(pipelinemode="automatic")
hif_rawflagchans(pipelinemode="automatic")
hif_refant(pipelinemode="automatic")
h_tsyscal(pipelinemode="automatic")
hifa_tsysflag(pipelinemode="automatic")
hifa_antpos(pipelinemode="automatic")
hifa_wvrgcalflag(pipelinemode="automatic")
hif_lowgainflag(pipelinemode="automatic")
hif_setmodels(pipelinemode="automatic")
hifa_bandpassflag(pipelinemode="automatic")
hifa_spwphaseup(pipelinemode="automatic")
hifa_gfluxscaleflag(pipelinemode="automatic")
hifa_gfluxscale(pipelinemode="automatic")
hifa_timegaincal(pipelinemode="automatic")
hif_applycal(pipelinemode="automatic")
hifa_imageprecheck(pipelinemode="automatic")
hif_makeimlist(intent='PHASE,BANDPASS,CHECK')
hif_makeimages(pipelinemode="automatic")
hif_checkproductsize(maxcubelimit=40.0, maxproductsize=400.0, maxcubesize=30.0)
hifa_exportdata(pipelinemode="automatic")
hif_mstransform(pipelinemode="automatic")
hifa_flagtargets(pipelinemode="automatic")
hif_makeimlist(specmode='mfs')
hif_findcont(pipelinemode="automatic")
hif_uvcontfit(pipelinemode="automatic")
hif_uvcontsub(pipelinemode="automatic")
hif_makeimages(pipelinemode="automatic")
hif_makeimlist(specmode='cont')
hif_makeimages(pipelinemode="automatic")
hif_makeimlist(pipelinemode="automatic")
hif_makeimages(pipelinemode="automatic")
hif_makeimlist(specmode='repBW')
hif_makeimages(pipelinemode="automatic")
finally:
h_save()
|
[
"keflavich@gmail.com"
] |
keflavich@gmail.com
|
fff35f266b7a945ce23d1b0459b2050cd40e8393
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_mustered.py
|
3c16f077432205ec0353f9eb04415f43f24174b1
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from xai.brain.wordbase.verbs._muster import _MUSTER
#calss header
class _MUSTERED(_MUSTER, ):
def __init__(self,):
_MUSTER.__init__(self)
self.name = "MUSTERED"
self.specie = 'verbs'
self.basic = "muster"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
26d5648ee91fce4d36363b6590dcc75e80f1434a
|
58916bca9a54b2d35dd2c867d7d54c2653cfdb49
|
/DownloadBlobs.py
|
51486086cd07ec857a9312e87ed258576fef04ac
|
[] |
no_license
|
DashboardAnalytics/modelPipeline
|
b6ca968f04fafe123ce431a8033541630adeae41
|
2eb35a4f1aa31548695cd5cd0fcb8568a4f4c9f3
|
refs/heads/master
| 2020-09-29T04:04:20.088951
| 2019-12-17T01:07:27
| 2019-12-17T01:07:27
| 226,945,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
from google.cloud import storage
bucketName = "streamed-videos"
# Get elements name
def download(bucketName):
storageClient = storage.Client()
bucket = storageClient.get_bucket(bucketName)
blobs = storageClient.list_blobs(bucketName)
for blob in blobs:
print("Downloading blob:", blob.name)
# download_to_filename(fileName)
blob.download_to_filename("Results/"+blob.name)
return True
if(download(bucketName)):
print("Downloads complete!")
|
[
"sebastian.garay.p@usach.cl"
] |
sebastian.garay.p@usach.cl
|
6649e2913412a7d351bf1c8939a64e26bd4fa7a4
|
2759be4ce88912798687cde10b6cda436eb02742
|
/dnppy_install/core/list_files.py
|
cf308f1375a7fdced4d9da3bac83136058e8ba42
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
jordanbudi/dnppy
|
427033235aba0bdc2b9bca13e3f80c6abb191c20
|
9383f19296b30ae806d2a0563aa8c7b07e89c6ae
|
refs/heads/master
| 2020-02-26T17:29:41.554760
| 2015-07-06T23:51:19
| 2015-07-06T23:51:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,803
|
py
|
__author__ = 'jwely'
import os
from exists import exists
from enf_list import enf_list
def list_files(recursive, Dir, Contains = False, DoesNotContain = False):
"""
Simple file listing function with more versatility than python builtins or arcpy.List
This function sifts through a directory and returns a list of filepaths for all files
meeting the input criteria. Useful for discriminatory iteration or recursive searches.
Could be used to find all tiles with a given datestring such as 'MOD11A1.A2012', or
perhaps all Band 4 tiles from a directory containing landsat 8 data.
Inputs:
recursive 'True' if search should search subfolders within the directory
'False' if search should ignore files in subfolders.
Dir The directory in which to search for files meeting the criteria
Contains search criteria to limit returned file list. File names must
contain parameters listed here. If no criteriaexists use 'False'
DoesNotContain search criteria to limit returned file list. File names must not
contain parameters listed here. If no criteriaexists use 'False'
Outputs:
filelist An array of full filepaths meeting the criteria.
Example Usage:
from dnppy import core
filelist = core.list_files(True,r'E:\Landsat7','B1',['gz','xml','ovr'])
The above statement will find all the Band 1 tifs in a landsat data directory
without including the associated metadata and uncompressed gz files.
"filelist" variable will contain full filepaths to all files found.
"""
# import modules and set up empty lists
filelist = []
templist = []
# ensure input directory actually exists
if not exists(Dir):
raise Exception("{0} is not a valid file or folder!".format(Dir))
# Ensure single strings are in list format for the loops below
if Contains:
Contains = enf_list(Contains)
if DoesNotContain:
DoesNotContain = enf_list(DoesNotContain)
DoesNotContain.append('sr.lock') # make sure lock files don't get counted
else:
DoesNotContain=['sr.lock'] # make sure lock files don't get counted
# use os.walk commands to search through whole directory if recursive
if recursive:
for root,dirs,files in os.walk(Dir):
for basename in files:
filename = os.path.join(root,basename)
# if both conditions exist, add items which meet Contains criteria
if Contains and DoesNotContain:
for i in Contains:
if i in basename:
templist.append(filename)
# if the entire array of 'Contains' terms were found, add to list
if len(templist)==len(Contains):
filelist.append(filename)
templist=[]
# remove items which do not meet the DoesNotcontain criteria
for j in DoesNotContain:
if j in basename:
try: filelist.remove(filename)
except: pass
# If both conditions do not exist (one is false)
else:
# determine if a file is good. if it is, add it to the list.
if Contains:
for i in Contains:
if i in basename:
templist.append(filename)
# if the entire array of 'Contains' terms were found, add to list
if len(templist)==len(Contains):
filelist.append(filename)
templist=[]
# add all files to the list, then remove the bad ones.
elif DoesNotContain:
filelist.append(filename)
for j in DoesNotContain:
if j in basename:
try: filelist.remove(filename)
except: pass
else:
filelist.append(filename)
# if neither conditionexists
if not Contains and not DoesNotContain:
filelist.append(filename)
# use a simple listdir if recursive is False
else:
# list only files in current directory, not subdir and check criteria
try:
for basename in os.listdir(Dir):
filename = os.path.join(Dir,basename)
if os.path.isfile(filename):
if Contains:
for i in Contains:
if i in basename:
templist.append(filename)
# if the entire array of 'Contains' terms were found, add to list
if len(templist)==len(Contains):
filelist.append(filename)
templist=[]
else:
filelist.append(filename)
# Remove any files from the filelist that fail DoesNotContain criteria
if DoesNotContain:
for j in DoesNotContain:
if j in basename:
try: filelist.remove(filename)
except: pass
except: pass
# Print a quick status summary before finishing up if Quiet is False
print('Files found which meet all input criteria: {0}'.format(len(filelist)))
return filelist
|
[
"jeff.ely.08@gmail.com"
] |
jeff.ely.08@gmail.com
|
f423f079d57cc99517810661096e85715d679271
|
a1657a0c5c8f3f8b51b98074293e2f2e9b16e6f4
|
/eks/demo/.cache/kubeflow/kubeflow-9804feb9fc23fc30075632a857087f4b529294e2/testing/kfctl/conftest.py
|
fbc70d557163bf70aef9ccc4fcb87e8378ccae42
|
[
"Apache-2.0"
] |
permissive
|
PipelineAI/pipeline
|
e8067636f5844dea0653aef84bd894ca2e700fc6
|
0f26e3eaad727c1d10950f592fe1949ece8153aa
|
refs/heads/master
| 2023-01-07T15:27:33.741088
| 2022-10-25T23:01:51
| 2022-10-25T23:01:51
| 38,730,494
| 2,596
| 512
|
Apache-2.0
| 2020-01-30T23:00:08
| 2015-07-08T03:49:23
|
Jsonnet
|
UTF-8
|
Python
| false
| false
| 1,852
|
py
|
import pytest
def pytest_addoption(parser):
parser.addoption(
"--app_path", action="store", default="",
help="Path where the KF application should be stored")
parser.addoption(
"--app_name", action="store", default="",
help="Name of the KF application")
parser.addoption(
"--kfctl_path", action="store", default="",
help="Path to kfctl.")
parser.addoption(
"--namespace", action="store", default="kubeflow",
help="Namespace to use.")
parser.addoption(
"--project", action="store", default="kubeflow-ci-deployment",
help="GCP project to deploy Kubeflow to")
parser.addoption(
"--config_path", action="store", default="",
help="The config to use for kfctl init")
parser.addoption(
"--use_basic_auth", action="store", default="False",
help="Use basic auth.")
parser.addoption(
"--use_istio", action="store", default="False",
help="Use istio.")
@pytest.fixture
def app_path(request):
return request.config.getoption("--app_path")
@pytest.fixture
def app_name(request):
return request.config.getoption("--app_name")
@pytest.fixture
def kfctl_path(request):
return request.config.getoption("--kfctl_path")
@pytest.fixture
def namespace(request):
return request.config.getoption("--namespace")
@pytest.fixture
def project(request):
return request.config.getoption("--project")
@pytest.fixture
def config_path(request):
return request.config.getoption("--config_path")
@pytest.fixture
def use_basic_auth(request):
value = request.config.getoption("--use_basic_auth").lower()
if value in ["t", "true"]:
return True
else:
return False
@pytest.fixture
def use_istio(request):
value = request.config.getoption("--use_istio").lower()
if value in ["t", "true"]:
return True
else:
return False
|
[
"chris@fregly.com"
] |
chris@fregly.com
|
23e945cb7d99b04e8f267992715e80682770df1c
|
28c0bcb13917a277cc6c8f0a34e3bb40e992d9d4
|
/koku/reporting/migrations/0001_initial.py
|
4508d4ab77013aa3f383bf70da436edc07f0ac51
|
[
"Apache-2.0"
] |
permissive
|
luisfdez/koku
|
43a765f6ba96c2d3b2deda345573e1d97992e22f
|
2979f03fbdd1c20c3abc365a963a1282b426f321
|
refs/heads/main
| 2023-06-22T13:19:34.119984
| 2021-07-20T12:01:35
| 2021-07-20T12:01:35
| 387,807,027
| 0
| 1
|
Apache-2.0
| 2021-07-20T13:50:15
| 2021-07-20T13:50:14
| null |
UTF-8
|
Python
| false
| false
| 182,140
|
py
|
# Generated by Django 3.1.2 on 2020-10-05 19:29
import os
import pkgutil
import uuid
from decimal import Decimal
import django.contrib.postgres.fields.jsonb
import django.contrib.postgres.indexes
import django.db.models.deletion
from django.db import connection
from django.db import migrations
from django.db import models
import reporting.partition.models
from koku import migration_sql_helpers as msh
from koku import pg_partition as ppart
from reporting.provider.all.openshift.models import VIEWS as OCP_ALL_VIEWS
from reporting.provider.aws.models import VIEWS as AWS_VIEWS
from reporting.provider.aws.openshift.models import VIEWS as OCP_AWS_VIEWS
from reporting.provider.azure.models import VIEWS as AZURE_VIEWS
from reporting.provider.azure.openshift.models import VIEWS as OCP_AZURE_VIEWS
from reporting.provider.ocp.models import VIEWS as OCP_VIEWS
# Functions from the following migrations need manual copying.
# Move them and any dependencies into this file, then update the
# RunPython operations to refer to the local versions:
def add_views(apps, schema_editor):
"""Create database VIEWS from files."""
for view in AWS_VIEWS:
view_sql = pkgutil.get_data("reporting.provider.aws", f"sql/views/{view}.sql")
view_sql = view_sql.decode("utf-8")
with connection.cursor() as cursor:
cursor.execute(view_sql)
for view in AZURE_VIEWS:
view_sql = pkgutil.get_data("reporting.provider.azure", f"sql/views/{view}.sql")
view_sql = view_sql.decode("utf-8")
with connection.cursor() as cursor:
cursor.execute(view_sql)
for view in OCP_VIEWS:
view_sql = pkgutil.get_data("reporting.provider.ocp", f"sql/views/{view}.sql")
view_sql = view_sql.decode("utf-8")
with connection.cursor() as cursor:
cursor.execute(view_sql)
for view in OCP_AWS_VIEWS:
view_sql = pkgutil.get_data("reporting.provider.aws.openshift", f"sql/views/{view}.sql")
view_sql = view_sql.decode("utf-8")
with connection.cursor() as cursor:
cursor.execute(view_sql)
for view in OCP_AZURE_VIEWS:
view_sql = pkgutil.get_data("reporting.provider.azure.openshift", f"sql/views/{view}.sql")
view_sql = view_sql.decode("utf-8")
with connection.cursor() as cursor:
cursor.execute(view_sql)
for view in OCP_ALL_VIEWS:
view_sql = pkgutil.get_data("reporting.provider.all.openshift", f"sql/views/{view}.sql")
view_sql = view_sql.decode("utf-8")
with connection.cursor() as cursor:
cursor.execute(view_sql)
# =====================================================
# Change reporting_ocpusagelineitem_daily_summary
# to a partitioned table with the same definition
# =====================================================
def convert_ocpusage_lids_to_partitioned(apps, schema_editor):
# Resolve the current schema name
target_schema = ppart.resolve_schema(ppart.CURRENT_SCHEMA)
# This is the table we will model from
source_table = "reporting_ocpusagelineitem_daily_summary"
# This is the target table's name (it will be renamed during the conversion to the source table name)
target_table = f"p_{source_table}"
# We'll want a new sequence copied from the original sequence
new_seq = ppart.SequenceDefinition(
target_schema,
f"{target_table}_id_seq",
copy_sequence={"schema_name": target_schema, "table_name": source_table, "column_name": "id"},
)
# We want to change the target tables's 'id' column default
target_identity_col = ppart.ColumnDefinition(target_schema, target_table, "id", default=ppart.Default(new_seq))
# We also need to include the identity col as part of the primary key definition
new_pk = ppart.PKDefinition(f"{target_table}_pkey", ["usage_start", "id"])
# Init the converter
p_converter = ppart.ConvertToPartition(
source_table,
"usage_start",
target_table_name=target_table,
partition_type=ppart.PARTITION_RANGE,
pk_def=new_pk,
col_def=[target_identity_col],
target_schema=target_schema,
source_schema=target_schema,
)
# Push the button, Frank.
p_converter.convert_to_partition()
# =====================================================
# Change reporting_awscostentrylineitem_daily_summary
# to a partitioned table with the same definition
# =====================================================
def convert_awscostentry_lids_to_partitioned(apps, schema_editor):
# Resolve the current schema name
target_schema = ppart.resolve_schema(ppart.CURRENT_SCHEMA)
# This is the table we will model from
source_table = "reporting_awscostentrylineitem_daily_summary"
# This is the target table's name (it will be renamed during the conversion to the source table name)
target_table = f"p_{source_table}"
# We'll want a new sequence copied from the original sequence
new_seq = ppart.SequenceDefinition(
target_schema,
f"{target_table}_id_seq",
copy_sequence={"schema_name": target_schema, "table_name": source_table, "column_name": "id"},
)
# We want to change the target tables's 'id' column default
target_identity_col = ppart.ColumnDefinition(target_schema, target_table, "id", default=ppart.Default(new_seq))
# We also need to include the identity col as part of the primary key definition
new_pk = ppart.PKDefinition(f"{target_table}_pkey", ["usage_start", "id"])
# Init the converter
p_converter = ppart.ConvertToPartition(
source_table,
"usage_start",
target_table_name=target_table,
partition_type=ppart.PARTITION_RANGE,
pk_def=new_pk,
col_def=[target_identity_col],
target_schema=target_schema,
source_schema=target_schema,
)
# Push the button, Frank.
p_converter.convert_to_partition()
# =====================================================
# Change reporting_azurecostentrylineitem_daily_summary
# to a partitioned table with the same definition
# =====================================================
def convert_azurecostentry_lids_to_partitioned(apps, schema_editor):
# Resolve the current schema name
target_schema = ppart.resolve_schema(ppart.CURRENT_SCHEMA)
# This is the table we will model from
source_table = "reporting_azurecostentrylineitem_daily_summary"
# This is the target table's name (it will be renamed during the conversion to the source table name)
target_table = f"p_{source_table}"
# We'll want a new sequence copied from the original sequence
new_seq = ppart.SequenceDefinition(
target_schema,
f"{target_table}_id_seq",
copy_sequence={"schema_name": target_schema, "table_name": source_table, "column_name": "id"},
)
# We want to change the target tables's 'id' column default
target_identity_col = ppart.ColumnDefinition(target_schema, target_table, "id", default=ppart.Default(new_seq))
# We also need to include the identity col as part of the primary key definition
new_pk = ppart.PKDefinition(f"{target_table}_pkey", ["usage_start", "id"])
# Init the converter
p_converter = ppart.ConvertToPartition(
source_table,
"usage_start",
target_table_name=target_table,
partition_type=ppart.PARTITION_RANGE,
pk_def=new_pk,
col_def=[target_identity_col],
target_schema=target_schema,
source_schema=target_schema,
)
# Push the button, Frank.
p_converter.convert_to_partition()
def apply_partitioned_table_triggers(apps, schema_editor):
path = msh.find_db_functions_dir()
for funcfile in ("partitioned_tables_manage_trigger.sql", "partitioned_tables_active_trigger.sql"):
msh.apply_sql_file(schema_editor, os.path.join(path, funcfile))
class Migration(migrations.Migration):
initial = True
replaces = [
("reporting", "0001_squashed_0090_ocpallcostlineitemdailysummary_ocpallcostlineitemprojectdailysummary"),
("reporting", "0091_aws_compute_cost_correction"),
("reporting", "0092_auto_20200203_1758"),
("reporting", "0093_auto_20200210_1920"),
("reporting", "0094_auto_20200211_1449"),
("reporting", "0095_auto_20200212_1606"),
("reporting", "0096_auto_20200218_2227"),
("reporting", "0097_auto_20200221_1331"),
("reporting", "0098_auto_20200221_2034"),
("reporting", "0099_ocp_performance"),
("reporting", "0100_aws_azure_query_perforance"),
("reporting", "0101_ocpenabledtagkeys"),
("reporting", "0102_auto_20200228_1812"),
(
"reporting",
"0103_azurecomputesummary_azurecostsummary_azurecostsummarybyaccount_azurecostsummarybylocation_azurecosts",
),
(
"reporting",
"0104_ocpallcomputesummary_ocpallcostsummary_ocpallcostsummarybyaccount_ocpallcostsummarybyregion_ocpallco",
),
(
"reporting",
"0105_ocpcostsummary_ocpcostsummarybynode_ocpcostsummarybyproject_ocppodsummary_ocppodsummarybyproject_ocp",
),
("reporting", "0106_ocpawscostsummary"),
(
"reporting",
"0107_ocpazurecomputesummary_ocpazurecostsummary_ocpazurecostsummarybyaccount_ocpazurecostsummarybylocatio",
),
("reporting", "0108_auto_20200405_1316"),
("reporting", "0109_remove_ocpusagelineitemdailysummary_pod"),
("reporting", "0110_summary_indexes"),
("reporting", "0111_drop_azure_service_not_null"),
("reporting", "0112_auto_20200416_1733"),
("reporting", "0113_aws_organizational_units"),
("reporting", "0114_adding_source_uuid"),
("reporting", "0115_populate_source_uuid"),
("reporting", "0116_ocpall_unique_index"),
("reporting", "0117_auto_20200617_1452"),
("reporting", "0118_auto_20200630_1819"),
("reporting", "0119_auto_20200707_1934"),
("reporting", "0120_auto_20200724_1354"),
("reporting", "0121_auto_20200728_2258"),
("reporting", "0122_auto_20200803_2307"),
("reporting", "0123_auto_20200727_2302"),
("reporting", "0124_auto_20200806_1943"),
("reporting", "0125_azure_unit_normalization"),
("reporting", "0126_clear_org_units"),
("reporting", "0127_ocpazure_unit_normalization"),
("reporting", "0128_auto_20200820_1540"),
("reporting", "0129_partitioned_daily_summary"),
("reporting", "0130_auto_20200826_1819"),
("reporting", "0131_auto_20200827_1253"),
("reporting", "0132_auto_20200901_1811"),
("reporting", "0133_auto_20200901_2245"),
("reporting", "0134_auto_20200902_1602"),
("reporting", "0135_auto_20200902_1808"),
("reporting", "0136_auto_20200909_1400"),
("reporting", "0137_partitioned_tables_triggers"),
("reporting", "0138_auto_20200918_1724"),
("reporting", "0139_auto_20200925_1432"),
("reporting", "0140_auto_20200925_1825"),
("reporting", "0141_auto_20201002_1925"),
("reporting", "0142_auto_20201002_1925"),
]
dependencies = [("api", "0001_initial")]
operations = [
###### begin customization; preserve this if you squash migrations ######
migrations.RunSQL(sql="\ncreate extension if not exists pg_trgm schema public;\n"),
###### end customization ######
migrations.CreateModel(
name="AWSAccountAlias",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("account_id", models.CharField(max_length=50, unique=True)),
("account_alias", models.CharField(max_length=63, null=True)),
],
),
migrations.CreateModel(
name="AWSComputeSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("instance_type", models.CharField(max_length=50, null=True)),
(
"resource_ids",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256), null=True, size=None
),
),
("resource_count", models.IntegerField(null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_compute_summary", "managed": False},
),
migrations.CreateModel(
name="AWSComputeSummaryByAccount",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("instance_type", models.CharField(max_length=50, null=True)),
(
"resource_ids",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256), null=True, size=None
),
),
("resource_count", models.IntegerField(null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_compute_summary_by_account", "managed": False},
),
migrations.CreateModel(
name="AWSComputeSummaryByRegion",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("region", models.CharField(max_length=50, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("instance_type", models.CharField(max_length=50, null=True)),
(
"resource_ids",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256), null=True, size=None
),
),
("resource_count", models.IntegerField(null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_compute_summary_by_region", "managed": False},
),
migrations.CreateModel(
name="AWSComputeSummaryByService",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("instance_type", models.CharField(max_length=50, null=True)),
(
"resource_ids",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256), null=True, size=None
),
),
("resource_count", models.IntegerField(null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_compute_summary_by_service", "managed": False},
),
migrations.CreateModel(
name="AWSCostEntry",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("interval_start", models.DateTimeField()),
("interval_end", models.DateTimeField()),
],
),
migrations.CreateModel(
name="AWSCostEntryBill",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("billing_resource", models.CharField(default="aws", max_length=50)),
("bill_type", models.CharField(max_length=50)),
("payer_account_id", models.CharField(max_length=50)),
("billing_period_start", models.DateTimeField()),
("billing_period_end", models.DateTimeField()),
("summary_data_creation_datetime", models.DateTimeField(null=True)),
("summary_data_updated_datetime", models.DateTimeField(null=True)),
("finalized_datetime", models.DateTimeField(null=True)),
("derived_cost_datetime", models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name="AWSCostEntryLineItem",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("tags", models.JSONField(null=True)),
("invoice_id", models.CharField(max_length=63, null=True)),
("line_item_type", models.CharField(max_length=50)),
("usage_account_id", models.CharField(max_length=50)),
("usage_start", models.DateTimeField()),
("usage_end", models.DateTimeField()),
("product_code", models.CharField(max_length=50)),
("usage_type", models.CharField(max_length=50, null=True)),
("operation", models.CharField(max_length=50, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("resource_id", models.CharField(max_length=256, null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("normalization_factor", models.FloatField(null=True)),
("normalized_usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("unblended_rate", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("blended_rate", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("blended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("public_on_demand_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("public_on_demand_rate", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("reservation_amortized_upfront_fee", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
(
"reservation_amortized_upfront_cost_for_usage",
models.DecimalField(decimal_places=9, max_digits=24, null=True),
),
(
"reservation_recurring_fee_for_usage",
models.DecimalField(decimal_places=9, max_digits=24, null=True),
),
("reservation_unused_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("reservation_unused_recurring_fee", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("tax_type", models.TextField(null=True)),
],
),
migrations.CreateModel(
name="AWSCostEntryLineItemDaily",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("line_item_type", models.CharField(max_length=50)),
("usage_account_id", models.CharField(max_length=50)),
("usage_start", models.DateField()),
("usage_end", models.DateField(null=True)),
("product_code", models.CharField(max_length=50)),
("usage_type", models.CharField(max_length=50, null=True)),
("operation", models.CharField(max_length=50, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("resource_id", models.CharField(max_length=256, null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("normalization_factor", models.FloatField(null=True)),
("normalized_usage_amount", models.FloatField(null=True)),
("currency_code", models.CharField(max_length=10)),
("unblended_rate", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("blended_rate", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("blended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("public_on_demand_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("public_on_demand_rate", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("tax_type", models.TextField(null=True)),
("tags", models.JSONField(null=True)),
],
options={"db_table": "reporting_awscostentrylineitem_daily"},
),
migrations.CreateModel(
name="AWSCostEntryLineItemDailySummary",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField(null=True)),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("region", models.CharField(max_length=50, null=True)),
("instance_type", models.CharField(max_length=50, null=True)),
("unit", models.CharField(max_length=63, null=True)),
(
"resource_ids",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256), null=True, size=None
),
),
("resource_count", models.IntegerField(null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("normalization_factor", models.FloatField(null=True)),
("normalized_usage_amount", models.FloatField(null=True)),
("currency_code", models.CharField(max_length=10)),
("unblended_rate", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("blended_rate", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("blended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("public_on_demand_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("public_on_demand_rate", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("tax_type", models.TextField(null=True)),
("tags", models.JSONField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_awscostentrylineitem_daily_summary"},
),
migrations.CreateModel(
name="AWSCostEntryPricing",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("term", models.CharField(max_length=63, null=True)),
("unit", models.CharField(max_length=63, null=True)),
],
),
migrations.CreateModel(
name="AWSCostEntryProduct",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("sku", models.CharField(max_length=128, null=True)),
("product_name", models.TextField(null=True)),
("product_family", models.CharField(max_length=150, null=True)),
("service_code", models.CharField(max_length=50, null=True)),
("region", models.CharField(max_length=50, null=True)),
("instance_type", models.CharField(max_length=50, null=True)),
("memory", models.FloatField(null=True)),
("memory_unit", models.CharField(max_length=24, null=True)),
("vcpu", models.PositiveIntegerField(null=True)),
],
),
migrations.CreateModel(
name="AWSCostEntryReservation",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("reservation_arn", models.TextField(unique=True)),
("number_of_reservations", models.PositiveIntegerField(null=True)),
("units_per_reservation", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("start_time", models.DateTimeField(null=True)),
("end_time", models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name="AWSCostSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_cost_summary", "managed": False},
),
migrations.CreateModel(
name="AWSCostSummaryByAccount",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_cost_summary_by_account", "managed": False},
),
migrations.CreateModel(
name="AWSCostSummaryByRegion",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("region", models.CharField(max_length=50, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_cost_summary_by_region", "managed": False},
),
migrations.CreateModel(
name="AWSCostSummaryByService",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_cost_summary_by_service", "managed": False},
),
migrations.CreateModel(
name="AWSDatabaseSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_database_summary", "managed": False},
),
migrations.CreateModel(
name="AWSNetworkSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_network_summary", "managed": False},
),
migrations.CreateModel(
name="AWSOrganizationalUnit",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("org_unit_name", models.CharField(max_length=250)),
("org_unit_id", models.CharField(max_length=50)),
("org_unit_path", models.TextField()),
("level", models.PositiveSmallIntegerField()),
("created_timestamp", models.DateField(auto_now_add=True)),
("deleted_timestamp", models.DateField(null=True)),
],
),
migrations.CreateModel(
name="AWSStorageSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("product_family", models.CharField(max_length=150, null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_storage_summary", "managed": False},
),
migrations.CreateModel(
name="AWSStorageSummaryByAccount",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_storage_summary_by_account", "managed": False},
),
migrations.CreateModel(
name="AWSStorageSummaryByRegion",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("region", models.CharField(max_length=50, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("product_family", models.CharField(max_length=150, null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_storage_summary_by_region", "managed": False},
),
migrations.CreateModel(
name="AWSStorageSummaryByService",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_aws_storage_summary_by_service", "managed": False},
),
migrations.CreateModel(
name="AWSTagsSummary",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.TextField()),
("values", django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
("usage_account_id", models.TextField(null=True)),
],
options={"db_table": "reporting_awstags_summary"},
),
migrations.CreateModel(
name="AWSTagsValues",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.TextField()),
("value", models.TextField()),
(
"usage_account_ids",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None),
),
(
"account_aliases",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None),
),
],
options={"db_table": "reporting_awstags_values"},
),
migrations.CreateModel(
name="AzureComputeSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("instance_type", models.TextField(null=True)),
(
"instance_ids",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None),
),
("instance_count", models.IntegerField(null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_azure_compute_summary", "managed": False},
),
migrations.CreateModel(
name="AzureCostEntryBill",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("billing_period_start", models.DateTimeField()),
("billing_period_end", models.DateTimeField()),
("summary_data_creation_datetime", models.DateTimeField(null=True)),
("summary_data_updated_datetime", models.DateTimeField(null=True)),
("finalized_datetime", models.DateTimeField(null=True)),
("derived_cost_datetime", models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name="AzureCostEntryLineItemDaily",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("subscription_guid", models.TextField()),
("tags", models.JSONField(null=True)),
("usage_date", models.DateField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
],
options={"db_table": "reporting_azurecostentrylineitem_daily"},
),
migrations.CreateModel(
name="AzureCostEntryLineItemDailySummary",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("subscription_guid", models.TextField()),
("instance_type", models.TextField(null=True)),
("service_name", models.TextField(null=True)),
("resource_location", models.TextField(null=True)),
("tags", models.JSONField(null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField(null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"instance_ids",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None),
),
("instance_count", models.IntegerField(null=True)),
("unit_of_measure", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_azurecostentrylineitem_daily_summary"},
),
migrations.CreateModel(
name="AzureCostEntryProductService",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("instance_id", models.TextField(max_length=512)),
("resource_location", models.TextField(null=True)),
("consumed_service", models.TextField(null=True)),
("resource_type", models.TextField(null=True)),
("resource_group", models.TextField(null=True)),
("additional_info", models.JSONField(null=True)),
("service_tier", models.TextField(null=True)),
("service_name", models.TextField(null=True)),
("service_info1", models.TextField(null=True)),
("service_info2", models.TextField(null=True)),
("instance_type", models.TextField(null=True)),
],
),
migrations.CreateModel(
name="AzureCostSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_azure_cost_summary", "managed": False},
),
migrations.CreateModel(
name="AzureCostSummaryByAccount",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_azure_cost_summary_by_account", "managed": False},
),
migrations.CreateModel(
name="AzureCostSummaryByLocation",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("resource_location", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_azure_cost_summary_by_location", "managed": False},
),
migrations.CreateModel(
name="AzureCostSummaryByService",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_azure_cost_summary_by_service", "managed": False},
),
migrations.CreateModel(
name="AzureDatabaseSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_azure_database_summary", "managed": False},
),
migrations.CreateModel(
name="AzureMeter",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("meter_id", models.UUIDField(editable=False, unique=True)),
("meter_name", models.TextField()),
("meter_category", models.TextField(null=True)),
("meter_subcategory", models.TextField(null=True)),
("meter_region", models.TextField(null=True)),
("resource_rate", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("unit_of_measure", models.TextField(null=True)),
],
),
migrations.CreateModel(
name="AzureNetworkSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_azure_network_summary", "managed": False},
),
migrations.CreateModel(
name="AzureStorageSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_azure_storage_summary", "managed": False},
),
migrations.CreateModel(
name="AzureTagsSummary",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.TextField()),
("values", django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
("subscription_guid", models.TextField(null=True)),
],
options={"db_table": "reporting_azuretags_summary"},
),
migrations.CreateModel(
name="AzureTagsValues",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.TextField()),
("value", models.TextField()),
(
"subscription_guids",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None),
),
],
options={"db_table": "reporting_azuretags_values"},
),
migrations.CreateModel(
name="CostSummary",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("namespace", models.CharField(max_length=253, null=True)),
("pod", models.CharField(max_length=253, null=True)),
("node", models.CharField(max_length=253, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("pod_charge_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=27, null=True)),
("pod_charge_memory_gigabyte_hours", models.DecimalField(decimal_places=9, max_digits=27, null=True)),
(
"persistentvolumeclaim_charge_gb_month",
models.DecimalField(decimal_places=9, max_digits=27, null=True),
),
("infra_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("project_infra_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=27, null=True)),
("project_markup_cost", models.DecimalField(decimal_places=9, max_digits=27, null=True)),
("pod_labels", models.JSONField(null=True)),
("monthly_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
],
options={"db_table": "reporting_ocpcosts_summary"},
),
migrations.CreateModel(
name="GCPCostEntryBill",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("billing_period_start", models.DateTimeField()),
("billing_period_end", models.DateTimeField()),
("summary_data_creation_datetime", models.DateTimeField(blank=True, null=True)),
("summary_data_updated_datetime", models.DateTimeField(blank=True, null=True)),
("finalized_datetime", models.DateTimeField(blank=True, null=True)),
("derived_cost_datetime", models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name="GCPCostEntryLineItemDaily",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("line_item_type", models.CharField(max_length=256)),
("measurement_type", models.CharField(max_length=512)),
("consumption", models.BigIntegerField()),
("unit", models.CharField(blank=True, max_length=63, null=True)),
("cost", models.DecimalField(blank=True, decimal_places=9, max_digits=24, null=True)),
("currency", models.CharField(max_length=10)),
("description", models.CharField(blank=True, max_length=256, null=True)),
("start_time", models.DateTimeField()),
("end_time", models.DateTimeField()),
],
),
migrations.CreateModel(
name="GCPProject",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("account_id", models.CharField(max_length=20)),
("project_number", models.BigIntegerField()),
("project_id", models.CharField(max_length=256, unique=True)),
("project_name", models.CharField(max_length=256)),
("project_labels", models.CharField(blank=True, max_length=256, null=True)),
],
),
migrations.CreateModel(
name="OCPAllComputeSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("product_code", models.CharField(max_length=50)),
("instance_type", models.CharField(max_length=50)),
("resource_id", models.CharField(max_length=253)),
("usage_amount", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("currency_code", models.CharField(max_length=10, null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpall_compute_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAllCostLineItemDailySummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("source_type", models.TextField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
(
"namespace",
django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=253), size=None),
),
("node", models.CharField(max_length=253, null=True)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("instance_type", models.CharField(max_length=50, null=True)),
("region", models.CharField(max_length=50, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("tags", models.JSONField(null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("currency_code", models.CharField(max_length=10, null=True)),
("shared_projects", models.IntegerField(default=1)),
("project_costs", models.JSONField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpallcostlineitem_daily_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAllCostLineItemProjectDailySummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("source_type", models.TextField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("data_source", models.CharField(max_length=64, null=True)),
("namespace", models.CharField(max_length=253)),
("node", models.CharField(max_length=253, null=True)),
("pod_labels", models.JSONField(null=True)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("instance_type", models.CharField(max_length=50, null=True)),
("region", models.CharField(max_length=50, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("usage_amount", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("project_markup_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("pod_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("currency_code", models.CharField(max_length=10, null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpallcostlineitem_project_daily_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAllCostSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpall_cost_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAllCostSummaryByAccount",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpall_cost_summary_by_account", "managed": False},
),
migrations.CreateModel(
name="OCPAllCostSummaryByRegion",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("region", models.CharField(max_length=50, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpall_cost_summary_by_region", "managed": False},
),
migrations.CreateModel(
name="OCPAllCostSummaryByService",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpall_cost_summary_by_service", "managed": False},
),
migrations.CreateModel(
name="OCPAllDatabaseSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("product_code", models.CharField(max_length=50)),
("usage_amount", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("currency_code", models.CharField(max_length=10, null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpall_database_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAllNetworkSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("product_code", models.CharField(max_length=50)),
("usage_amount", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("currency_code", models.CharField(max_length=10, null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpall_network_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAllStorageSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("product_family", models.CharField(max_length=150, null=True)),
("product_code", models.CharField(max_length=50)),
("usage_amount", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("currency_code", models.CharField(max_length=10, null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpall_storage_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAWSComputeSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("instance_type", models.CharField(max_length=50, null=True)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpaws_compute_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAWSCostLineItemDailySummary",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
(
"namespace",
django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=253), size=None),
),
(
"pod",
django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=253), size=None),
),
("node", models.CharField(max_length=253, null=True)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("instance_type", models.CharField(max_length=50, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("availability_zone", models.CharField(max_length=50, null=True)),
("region", models.CharField(max_length=50, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("tags", models.JSONField(null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("normalized_usage_amount", models.FloatField(null=True)),
("currency_code", models.CharField(max_length=10, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("shared_projects", models.IntegerField(default=1)),
("project_costs", models.JSONField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpawscostlineitem_daily_summary"},
),
migrations.CreateModel(
name="OCPAWSCostLineItemProjectDailySummary",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("data_source", models.CharField(max_length=64, null=True)),
("namespace", models.CharField(max_length=253)),
("pod", models.CharField(max_length=253, null=True)),
("node", models.CharField(max_length=253, null=True)),
("pod_labels", models.JSONField(null=True)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("instance_type", models.CharField(max_length=50, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("availability_zone", models.CharField(max_length=50, null=True)),
("region", models.CharField(max_length=50, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("usage_amount", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("normalized_usage_amount", models.FloatField(null=True)),
("currency_code", models.CharField(max_length=10, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("project_markup_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("pod_cost", models.DecimalField(decimal_places=15, max_digits=30, null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpawscostlineitem_project_daily_summary"},
),
migrations.CreateModel(
name="OCPAWSCostSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpaws_cost_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAWSCostSummaryByAccount",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpaws_cost_summary_by_account", "managed": False},
),
migrations.CreateModel(
name="OCPAWSCostSummaryByRegion",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("region", models.CharField(max_length=50, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpaws_cost_summary_by_region", "managed": False},
),
migrations.CreateModel(
name="OCPAWSCostSummaryByService",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpaws_cost_summary_by_service", "managed": False},
),
migrations.CreateModel(
name="OCPAWSDatabaseSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpaws_database_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAWSNetworkSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpaws_network_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAWSStorageSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("usage_amount", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency_code", models.CharField(max_length=10)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpaws_storage_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAWSTagsSummary",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.CharField(max_length=253)),
(
"values",
django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=253), size=None),
),
("usage_account_id", models.CharField(max_length=50, null=True)),
("namespace", models.TextField()),
("node", models.TextField(null=True)),
],
options={"db_table": "reporting_ocpawstags_summary"},
),
migrations.CreateModel(
name="OCPAWSTagsValues",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.TextField()),
("value", models.TextField()),
(
"usage_account_ids",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None),
),
(
"account_aliases",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None),
),
("cluster_ids", django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
(
"cluster_aliases",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None),
),
("namespaces", django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
(
"nodes",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None),
),
],
options={"db_table": "reporting_ocpawstags_values"},
),
migrations.CreateModel(
name="OCPAzureComputeSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("subscription_guid", models.TextField()),
("instance_type", models.TextField(null=True)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpazure_compute_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAzureCostLineItemDailySummary",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
(
"namespace",
django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=253), size=None),
),
(
"pod",
django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=253), size=None),
),
("node", models.CharField(max_length=253, null=True)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("instance_type", models.TextField(null=True)),
("service_name", models.TextField(null=True)),
("resource_location", models.TextField(null=True)),
("tags", models.JSONField(null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=17, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=17, null=True)),
("currency", models.TextField(null=True)),
("unit_of_measure", models.TextField(null=True)),
("shared_projects", models.IntegerField(default=1)),
("project_costs", models.JSONField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpazurecostlineitem_daily_summary"},
),
migrations.CreateModel(
name="OCPAzureCostLineItemProjectDailySummary",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("data_source", models.CharField(max_length=64, null=True)),
("namespace", models.CharField(max_length=253)),
("pod", models.CharField(max_length=253, null=True)),
("node", models.CharField(max_length=253, null=True)),
("pod_labels", models.JSONField(null=True)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("instance_type", models.TextField(null=True)),
("service_name", models.TextField(null=True)),
("resource_location", models.TextField(null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("currency", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=17, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=17, null=True)),
("project_markup_cost", models.DecimalField(decimal_places=9, max_digits=17, null=True)),
("pod_cost", models.DecimalField(decimal_places=6, max_digits=24, null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpazurecostlineitem_project_daily_summary"},
),
migrations.CreateModel(
name="OCPAzureCostSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpazure_cost_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAzureCostSummaryByAccount",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("subscription_guid", models.TextField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpazure_cost_summary_by_account", "managed": False},
),
migrations.CreateModel(
name="OCPAzureCostSummaryByLocation",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("subscription_guid", models.TextField()),
("resource_location", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpazure_cost_summary_by_location", "managed": False},
),
migrations.CreateModel(
name="OCPAzureCostSummaryByService",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("subscription_guid", models.TextField()),
("service_name", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpazure_cost_summary_by_service", "managed": False},
),
migrations.CreateModel(
name="OCPAzureDatabaseSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("subscription_guid", models.TextField()),
("service_name", models.TextField(null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpazure_database_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAzureNetworkSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("subscription_guid", models.TextField()),
("service_name", models.TextField(null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpazure_network_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAzureStorageSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("subscription_guid", models.TextField()),
("service_name", models.TextField(null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpazure_storage_summary", "managed": False},
),
migrations.CreateModel(
name="OCPAzureTagsSummary",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.CharField(max_length=253)),
(
"values",
django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=253), size=None),
),
("subscription_guid", models.TextField(null=True)),
("namespace", models.TextField()),
("node", models.TextField(null=True)),
],
options={"db_table": "reporting_ocpazuretags_summary"},
),
migrations.CreateModel(
name="OCPAzureTagsValues",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.TextField()),
("value", models.TextField()),
(
"subscription_guids",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None),
),
("cluster_ids", django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
(
"cluster_aliases",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None),
),
("namespaces", django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
(
"nodes",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None),
),
],
options={"db_table": "reporting_ocpazuretags_values"},
),
migrations.CreateModel(
name="OCPCostSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.TextField()),
("cluster_alias", models.TextField(null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("infrastructure_raw_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("infrastructure_usage_cost", models.JSONField(null=True)),
("infrastructure_markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("infrastructure_monthly_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("supplementary_usage_cost", models.JSONField(null=True)),
("supplementary_monthly_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocp_cost_summary", "managed": False},
),
migrations.CreateModel(
name="OCPCostSummaryByNode",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.TextField()),
("cluster_alias", models.TextField(null=True)),
("node", models.CharField(max_length=253)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("infrastructure_raw_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("infrastructure_usage_cost", models.JSONField(null=True)),
("infrastructure_markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("infrastructure_monthly_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("supplementary_usage_cost", models.JSONField(null=True)),
("supplementary_monthly_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocp_cost_summary_by_node", "managed": False},
),
migrations.CreateModel(
name="OCPCostSummaryByProject",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.TextField()),
("cluster_alias", models.TextField(null=True)),
("namespace", models.CharField(max_length=253)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("infrastructure_project_raw_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("infrastructure_usage_cost", models.JSONField(null=True)),
(
"infrastructure_project_markup_cost",
models.DecimalField(decimal_places=15, max_digits=33, null=True),
),
("infrastructure_monthly_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("supplementary_usage_cost", models.JSONField(null=True)),
("supplementary_monthly_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocp_cost_summary_by_project", "managed": False},
),
migrations.CreateModel(
name="OCPEnabledTagKeys",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("key", models.CharField(max_length=253, unique=True)),
],
options={"db_table": "reporting_ocpenabledtagkeys"},
),
migrations.CreateModel(
name="OCPNodeLabelLineItem",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("node", models.CharField(max_length=253, null=True)),
("node_labels", models.JSONField(null=True)),
],
options={"db_table": "reporting_ocpnodelabellineitem"},
),
migrations.CreateModel(
name="OCPNodeLabelLineItemDaily",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("node", models.CharField(max_length=253, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("node_labels", models.JSONField(null=True)),
("total_seconds", models.IntegerField()),
],
options={"db_table": "reporting_ocpnodelabellineitem_daily"},
),
migrations.CreateModel(
name="OCPPodSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.TextField()),
("cluster_alias", models.TextField(null=True)),
(
"resource_ids",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256), null=True, size=None
),
),
("resource_count", models.IntegerField(null=True)),
("data_source", models.CharField(max_length=64, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("infrastructure_raw_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("infrastructure_usage_cost", models.JSONField(null=True)),
("infrastructure_markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("supplementary_usage_cost", models.JSONField(null=True)),
("pod_usage_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_request_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_limit_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_usage_memory_gigabyte_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_request_memory_gigabyte_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_limit_memory_gigabyte_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("cluster_capacity_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
(
"cluster_capacity_memory_gigabyte_hours",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocp_pod_summary", "managed": False},
),
migrations.CreateModel(
name="OCPPodSummaryByProject",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.TextField()),
("cluster_alias", models.TextField(null=True)),
("namespace", models.CharField(max_length=253, null=True)),
(
"resource_ids",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256), null=True, size=None
),
),
("resource_count", models.IntegerField(null=True)),
("data_source", models.CharField(max_length=64, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("supplementary_usage_cost", models.JSONField(null=True)),
("infrastructure_raw_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("infrastructure_usage_cost", models.JSONField(null=True)),
("infrastructure_markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("pod_usage_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_request_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_limit_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_usage_memory_gigabyte_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_request_memory_gigabyte_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_limit_memory_gigabyte_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("cluster_capacity_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
(
"cluster_capacity_memory_gigabyte_hours",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocp_pod_summary_by_project", "managed": False},
),
migrations.CreateModel(
name="OCPStorageLineItem",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("namespace", models.CharField(max_length=253)),
("pod", models.CharField(max_length=253, null=True)),
("persistentvolumeclaim", models.CharField(max_length=253)),
("persistentvolume", models.CharField(max_length=253)),
("storageclass", models.CharField(max_length=50, null=True)),
(
"persistentvolumeclaim_capacity_bytes",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"persistentvolumeclaim_capacity_byte_seconds",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"volume_request_storage_byte_seconds",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"persistentvolumeclaim_usage_byte_seconds",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
("persistentvolume_labels", models.JSONField(null=True)),
("persistentvolumeclaim_labels", models.JSONField(null=True)),
],
),
migrations.CreateModel(
name="OCPStorageLineItemDaily",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("namespace", models.CharField(max_length=253)),
("pod", models.CharField(max_length=253, null=True)),
("node", models.CharField(max_length=253, null=True)),
("persistentvolumeclaim", models.CharField(max_length=253)),
("persistentvolume", models.CharField(max_length=253)),
("storageclass", models.CharField(max_length=50, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
(
"persistentvolumeclaim_capacity_bytes",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"persistentvolumeclaim_capacity_byte_seconds",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"volume_request_storage_byte_seconds",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"persistentvolumeclaim_usage_byte_seconds",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
("total_seconds", models.IntegerField()),
("persistentvolume_labels", models.JSONField(null=True)),
("persistentvolumeclaim_labels", models.JSONField(null=True)),
],
options={"db_table": "reporting_ocpstoragelineitem_daily"},
),
migrations.CreateModel(
name="OCPStorageVolumeLabelSummary",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.TextField()),
("values", django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
("namespace", models.TextField()),
("node", models.TextField(null=True)),
],
options={"db_table": "reporting_ocpstoragevolumelabel_summary"},
),
migrations.CreateModel(
name="OCPTagsValues",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.TextField()),
("value", models.TextField()),
("cluster_ids", django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
(
"cluster_aliases",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None),
),
("namespaces", django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
(
"nodes",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None),
),
],
options={"db_table": "reporting_ocptags_values"},
),
migrations.CreateModel(
name="OCPUsageLineItem",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("namespace", models.CharField(max_length=253)),
("pod", models.CharField(max_length=253)),
("node", models.CharField(max_length=253)),
("resource_id", models.CharField(max_length=253, null=True)),
("pod_usage_cpu_core_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_request_cpu_core_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_limit_cpu_core_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_usage_memory_byte_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_request_memory_byte_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_limit_memory_byte_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_cpu_cores", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_cpu_core_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_memory_bytes", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_memory_byte_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_labels", models.JSONField(null=True)),
],
),
migrations.CreateModel(
name="OCPUsageLineItemDaily",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("namespace", models.CharField(max_length=253)),
("pod", models.CharField(max_length=253)),
("node", models.CharField(max_length=253)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("pod_usage_cpu_core_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_request_cpu_core_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_limit_cpu_core_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_usage_memory_byte_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_request_memory_byte_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_limit_memory_byte_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_cpu_cores", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_cpu_core_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_memory_bytes", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_memory_byte_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("cluster_capacity_cpu_core_seconds", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
(
"cluster_capacity_memory_byte_seconds",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
("total_seconds", models.IntegerField()),
("pod_labels", models.JSONField(null=True)),
],
options={"db_table": "reporting_ocpusagelineitem_daily"},
),
migrations.CreateModel(
name="OCPUsageLineItemDailySummary",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("data_source", models.CharField(max_length=64, null=True)),
("namespace", models.CharField(max_length=253, null=True)),
("node", models.CharField(max_length=253, null=True)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("pod_labels", models.JSONField(null=True)),
("pod_usage_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_request_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_limit_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_usage_memory_gigabyte_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_request_memory_gigabyte_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("pod_limit_memory_gigabyte_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_cpu_cores", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
("node_capacity_memory_gigabytes", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
(
"node_capacity_memory_gigabyte_hours",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
("cluster_capacity_cpu_core_hours", models.DecimalField(decimal_places=9, max_digits=73, null=True)),
(
"cluster_capacity_memory_gigabyte_hours",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
("persistentvolumeclaim", models.CharField(max_length=253, null=True)),
("persistentvolume", models.CharField(max_length=253, null=True)),
("storageclass", models.CharField(max_length=50, null=True)),
("volume_labels", models.JSONField(null=True)),
(
"persistentvolumeclaim_capacity_gigabyte",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"persistentvolumeclaim_capacity_gigabyte_months",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"volume_request_storage_gigabyte_months",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"persistentvolumeclaim_usage_gigabyte_months",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"infrastructure_raw_cost",
models.DecimalField(decimal_places=15, default=Decimal("0"), max_digits=33, null=True),
),
(
"infrastructure_project_raw_cost",
models.DecimalField(decimal_places=15, default=Decimal("0"), max_digits=33, null=True),
),
("infrastructure_usage_cost", models.JSONField(null=True)),
("infrastructure_markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
(
"infrastructure_project_markup_cost",
models.DecimalField(decimal_places=15, max_digits=33, null=True),
),
("infrastructure_monthly_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("supplementary_usage_cost", models.JSONField(null=True)),
("supplementary_monthly_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("monthly_cost_type", models.TextField(choices=[("Node", "Node"), ("Cluster", "Cluster")], null=True)),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocpusagelineitem_daily_summary"},
),
migrations.CreateModel(
name="OCPUsagePodLabelSummary",
fields=[
("uuid", models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
("key", models.TextField()),
("values", django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
("namespace", models.TextField()),
("node", models.TextField(null=True)),
],
options={"db_table": "reporting_ocpusagepodlabel_summary"},
),
migrations.CreateModel(
name="OCPUsageReport",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("interval_start", models.DateTimeField()),
("interval_end", models.DateTimeField()),
],
),
migrations.CreateModel(
name="OCPUsageReportPeriod",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("cluster_id", models.CharField(max_length=50)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("report_period_start", models.DateTimeField()),
("report_period_end", models.DateTimeField()),
("summary_data_creation_datetime", models.DateTimeField(null=True)),
("summary_data_updated_datetime", models.DateTimeField(null=True)),
("derived_cost_datetime", models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name="OCPVolumeSummary",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.TextField()),
("cluster_alias", models.TextField(null=True)),
(
"resource_ids",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256), null=True, size=None
),
),
("resource_count", models.IntegerField(null=True)),
("data_source", models.CharField(max_length=64, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("supplementary_usage_cost", models.JSONField(null=True)),
("infrastructure_raw_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("infrastructure_usage_cost", models.JSONField(null=True)),
("infrastructure_markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
(
"persistentvolumeclaim_usage_gigabyte_months",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"volume_request_storage_gigabyte_months",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"persistentvolumeclaim_capacity_gigabyte_months",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocp_volume_summary", "managed": False},
),
migrations.CreateModel(
name="OCPVolumeSummaryByProject",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("cluster_id", models.TextField()),
("cluster_alias", models.TextField(null=True)),
("namespace", models.CharField(max_length=253, null=True)),
(
"resource_ids",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256), null=True, size=None
),
),
("resource_count", models.IntegerField(null=True)),
("data_source", models.CharField(max_length=64, null=True)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("supplementary_usage_cost", models.JSONField(null=True)),
("infrastructure_raw_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("infrastructure_usage_cost", models.JSONField(null=True)),
("infrastructure_markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
(
"persistentvolumeclaim_usage_gigabyte_months",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"volume_request_storage_gigabyte_months",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
(
"persistentvolumeclaim_capacity_gigabyte_months",
models.DecimalField(decimal_places=9, max_digits=73, null=True),
),
("source_uuid", models.UUIDField(null=True)),
],
options={"db_table": "reporting_ocp_volume_summary_by_project", "managed": False},
),
migrations.CreateModel(
name="PartitionedTable",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("schema_name", models.TextField(validators=[reporting.partition.models.validate_not_empty])),
("table_name", models.TextField(validators=[reporting.partition.models.validate_not_empty])),
(
"partition_of_table_name",
models.TextField(validators=[reporting.partition.models.validate_not_empty]),
),
("partition_type", models.TextField(validators=[reporting.partition.models.validate_not_empty])),
("partition_col", models.TextField(validators=[reporting.partition.models.validate_not_empty])),
(
"partition_parameters",
django.contrib.postgres.fields.jsonb.JSONField(
validators=[reporting.partition.models.validate_not_empty]
),
),
("active", models.BooleanField(default=True)),
],
options={"db_table": "partitioned_tables"},
),
migrations.AddIndex(
model_name="partitionedtable",
index=models.Index(fields=["schema_name", "table_name"], name="partable_table"),
),
migrations.AddIndex(
model_name="partitionedtable",
index=models.Index(fields=["partition_type"], name="partable_partition_type"),
),
migrations.AddIndex(
model_name="partitionedtable",
index=django.contrib.postgres.indexes.GinIndex(
fields=["partition_parameters"], name="partable_partition_parameters"
),
),
migrations.AlterUniqueTogether(name="partitionedtable", unique_together={("schema_name", "table_name")}),
migrations.AddField(
model_name="ocpusagereportperiod",
name="provider",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="api.provider"),
),
migrations.AddField(
model_name="ocpusagereport",
name="report_period",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"),
),
migrations.AddField(
model_name="ocpusagepodlabelsummary",
name="report_period",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"),
),
migrations.AddField(
model_name="ocpusagelineitemdailysummary",
name="report_period",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"
),
),
migrations.AddField(
model_name="ocpusagelineitemdaily",
name="report_period",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"
),
),
migrations.AddField(
model_name="ocpusagelineitem",
name="report",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereport"),
),
migrations.AddField(
model_name="ocpusagelineitem",
name="report_period",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"),
),
migrations.AddIndex(
model_name="ocptagsvalues", index=models.Index(fields=["key"], name="openshift_tags_value_key_idx")
),
migrations.AlterUniqueTogether(name="ocptagsvalues", unique_together={("key", "value")}),
migrations.AddField(
model_name="ocpstoragevolumelabelsummary",
name="report_period",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"),
),
migrations.AddField(
model_name="ocpstoragelineitemdaily",
name="report_period",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"
),
),
migrations.AddField(
model_name="ocpstoragelineitem",
name="report",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereport"),
),
migrations.AddField(
model_name="ocpstoragelineitem",
name="report_period",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"),
),
migrations.AddField(
model_name="ocpnodelabellineitemdaily",
name="report_period",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"
),
),
migrations.AddField(
model_name="ocpnodelabellineitem",
name="report",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereport"),
),
migrations.AddField(
model_name="ocpnodelabellineitem",
name="report_period",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"),
),
migrations.AddIndex(
model_name="ocpazuretagsvalues", index=models.Index(fields=["key"], name="ocp_azure_tags_value_key_idx")
),
migrations.AlterUniqueTogether(name="ocpazuretagsvalues", unique_together={("key", "value")}),
migrations.AddField(
model_name="ocpazuretagssummary",
name="cost_entry_bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.azurecostentrybill"),
),
migrations.AddField(
model_name="ocpazuretagssummary",
name="report_period",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"),
),
migrations.AddField(
model_name="ocpazurecostlineitemprojectdailysummary",
name="cost_entry_bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.azurecostentrybill"),
),
migrations.AddField(
model_name="ocpazurecostlineitemprojectdailysummary",
name="report_period",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"
),
),
migrations.AddField(
model_name="ocpazurecostlineitemdailysummary",
name="cost_entry_bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.azurecostentrybill"),
),
migrations.AddField(
model_name="ocpazurecostlineitemdailysummary",
name="report_period",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"
),
),
migrations.AddIndex(
model_name="ocpawstagsvalues", index=models.Index(fields=["key"], name="ocp_aws_tags_value_key_idx")
),
migrations.AlterUniqueTogether(name="ocpawstagsvalues", unique_together={("key", "value")}),
migrations.AddField(
model_name="ocpawstagssummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpawstagssummary",
name="cost_entry_bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.awscostentrybill"),
),
migrations.AddField(
model_name="ocpawstagssummary",
name="report_period",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"),
),
migrations.AddField(
model_name="ocpawsstoragesummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpawsnetworksummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpawsdatabasesummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpawscostsummarybyservice",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpawscostsummarybyregion",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpawscostsummarybyaccount",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpawscostlineitemprojectdailysummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpawscostlineitemprojectdailysummary",
name="cost_entry_bill",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.awscostentrybill"
),
),
migrations.AddField(
model_name="ocpawscostlineitemprojectdailysummary",
name="report_period",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"
),
),
migrations.AddField(
model_name="ocpawscostlineitemdailysummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpawscostlineitemdailysummary",
name="cost_entry_bill",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.awscostentrybill"
),
),
migrations.AddField(
model_name="ocpawscostlineitemdailysummary",
name="report_period",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"
),
),
migrations.AddField(
model_name="ocpawscomputesummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpallstoragesummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpallnetworksummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpalldatabasesummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpallcostsummarybyservice",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpallcostsummarybyregion",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpallcostsummarybyaccount",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpallcostlineitemprojectdailysummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpallcostlineitemdailysummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="ocpallcomputesummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="gcpcostentrylineitemdaily",
name="cost_entry_bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.gcpcostentrybill"),
),
migrations.AddField(
model_name="gcpcostentrylineitemdaily",
name="project",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.gcpproject"),
),
migrations.AddField(
model_name="gcpcostentrybill",
name="provider",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="api.provider"),
),
migrations.AddField(
model_name="costsummary",
name="report_period",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.ocpusagereportperiod"
),
),
migrations.AddIndex(
model_name="azuretagsvalues", index=models.Index(fields=["key"], name="azure_tags_value_key_idx")
),
migrations.AlterUniqueTogether(name="azuretagsvalues", unique_together={("key", "value")}),
migrations.AddField(
model_name="azuretagssummary",
name="cost_entry_bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.azurecostentrybill"),
),
migrations.AddField(
model_name="azuremeter",
name="provider",
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to="api.provider"),
),
migrations.AddField(
model_name="azurecostentryproductservice",
name="provider",
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to="api.provider"),
),
migrations.AddField(
model_name="azurecostentrylineitemdailysummary",
name="cost_entry_bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.azurecostentrybill"),
),
migrations.AddField(
model_name="azurecostentrylineitemdailysummary",
name="meter",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.azuremeter"
),
),
migrations.AddField(
model_name="azurecostentrylineitemdaily",
name="cost_entry_bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.azurecostentrybill"),
),
migrations.AddField(
model_name="azurecostentrylineitemdaily",
name="cost_entry_product",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.azurecostentryproductservice"
),
),
migrations.AddField(
model_name="azurecostentrylineitemdaily",
name="meter",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.azuremeter"
),
),
migrations.AddField(
model_name="azurecostentrybill",
name="provider",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="api.provider"),
),
migrations.AddIndex(
model_name="awstagsvalues", index=models.Index(fields=["key"], name="aws_tags_value_key_idx")
),
migrations.AlterUniqueTogether(name="awstagsvalues", unique_together={("key", "value")}),
migrations.AddField(
model_name="awstagssummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awstagssummary",
name="cost_entry_bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.awscostentrybill"),
),
migrations.AddField(
model_name="awsstoragesummarybyservice",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awsstoragesummarybyservice",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AddField(
model_name="awsstoragesummarybyregion",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awsstoragesummarybyregion",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AddField(
model_name="awsstoragesummarybyaccount",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awsstoragesummarybyaccount",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AddField(
model_name="awsorganizationalunit",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.PROTECT, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awsnetworksummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awsnetworksummary",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AddField(
model_name="awsdatabasesummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awsdatabasesummary",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AddField(
model_name="awscostsummarybyservice",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awscostsummarybyservice",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AddField(
model_name="awscostsummarybyregion",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awscostsummarybyregion",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AddField(
model_name="awscostsummarybyaccount",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awscostsummarybyaccount",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AddIndex(
model_name="awscostentryproduct", index=models.Index(fields=["region"], name="region_idx")
),
migrations.AlterUniqueTogether(
name="awscostentryproduct", unique_together={("sku", "product_name", "region")}
),
migrations.AlterUniqueTogether(name="awscostentrypricing", unique_together={("term", "unit")}),
migrations.AddField(
model_name="awscostentrylineitemdailysummary",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.PROTECT, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awscostentrylineitemdailysummary",
name="cost_entry_bill",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.awscostentrybill"
),
),
migrations.AddField(
model_name="awscostentrylineitemdailysummary",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awsorganizationalunit"
),
),
migrations.AddField(
model_name="awscostentrylineitemdaily",
name="cost_entry_bill",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="reporting.awscostentrybill"
),
),
migrations.AddField(
model_name="awscostentrylineitemdaily",
name="cost_entry_pricing",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awscostentrypricing"
),
),
migrations.AddField(
model_name="awscostentrylineitemdaily",
name="cost_entry_product",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awscostentryproduct"
),
),
migrations.AddField(
model_name="awscostentrylineitemdaily",
name="cost_entry_reservation",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awscostentryreservation"
),
),
migrations.AddField(
model_name="awscostentrylineitem",
name="cost_entry",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.awscostentry"),
),
migrations.AddField(
model_name="awscostentrylineitem",
name="cost_entry_bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.awscostentrybill"),
),
migrations.AddField(
model_name="awscostentrylineitem",
name="cost_entry_pricing",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awscostentrypricing"
),
),
migrations.AddField(
model_name="awscostentrylineitem",
name="cost_entry_product",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awscostentryproduct"
),
),
migrations.AddField(
model_name="awscostentrylineitem",
name="cost_entry_reservation",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="reporting.awscostentryreservation"
),
),
migrations.AddField(
model_name="awscostentrybill",
name="provider",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="api.provider"),
),
migrations.AddField(
model_name="awscostentry",
name="bill",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="reporting.awscostentrybill"),
),
migrations.AddField(
model_name="awscomputesummarybyservice",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awscomputesummarybyservice",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AddField(
model_name="awscomputesummarybyregion",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awscomputesummarybyregion",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AddField(
model_name="awscomputesummarybyaccount",
name="account_alias",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
migrations.AddField(
model_name="awscomputesummarybyaccount",
name="organizational_unit",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsorganizationalunit"
),
),
migrations.AlterUniqueTogether(
name="ocpusagereportperiod", unique_together={("cluster_id", "report_period_start", "provider")}
),
migrations.AddIndex(
model_name="ocpusagereport", index=models.Index(fields=["interval_start"], name="ocp_interval_start_idx")
),
migrations.AlterUniqueTogether(name="ocpusagereport", unique_together={("report_period", "interval_start")}),
migrations.AlterUniqueTogether(
name="ocpusagepodlabelsummary", unique_together={("key", "report_period", "namespace", "node")}
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=models.Index(fields=["usage_start"], name="summary_ocp_usage_idx"),
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=models.Index(fields=["namespace"], name="summary_namespace_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=models.Index(fields=["node"], name="summary_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=models.Index(fields=["data_source"], name="summary_data_source_idx"),
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["pod_labels"], name="pod_labels_idx"),
),
migrations.AddIndex(
model_name="ocpusagelineitemdaily", index=models.Index(fields=["usage_start"], name="ocp_usage_idx")
),
migrations.AddIndex(
model_name="ocpusagelineitemdaily",
index=models.Index(fields=["namespace"], name="namespace_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(model_name="ocpusagelineitemdaily", index=models.Index(fields=["pod"], name="pod_idx")),
migrations.AddIndex(
model_name="ocpusagelineitemdaily",
index=models.Index(fields=["node"], name="node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AlterUniqueTogether(
name="ocpusagelineitem", unique_together={("report", "namespace", "pod", "node")}
),
migrations.AlterUniqueTogether(
name="ocpstoragevolumelabelsummary", unique_together={("key", "report_period", "namespace", "node")}
),
migrations.AddIndex(
model_name="ocpstoragelineitemdaily",
index=models.Index(
fields=["namespace"], name="ocp_storage_li_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpstoragelineitemdaily",
index=models.Index(fields=["node"], name="ocp_storage_li_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AlterUniqueTogether(
name="ocpstoragelineitem", unique_together={("report", "namespace", "persistentvolumeclaim")}
),
migrations.AddIndex(
model_name="ocpnodelabellineitemdaily",
index=models.Index(fields=["usage_start"], name="ocplblnitdly_usage_start"),
),
migrations.AddIndex(
model_name="ocpnodelabellineitemdaily",
index=django.contrib.postgres.indexes.GinIndex(fields=["node_labels"], name="ocplblnitdly_node_labels"),
),
migrations.AlterUniqueTogether(name="ocpnodelabellineitem", unique_together={("report", "node")}),
migrations.AlterUniqueTogether(
name="ocpazuretagssummary",
unique_together={("key", "cost_entry_bill", "report_period", "subscription_guid", "namespace", "node")},
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(fields=["usage_start"], name="ocpazure_proj_usage_start_idx"),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(
fields=["namespace"], name="ocpazure_proj_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(fields=["node"], name="ocpazure_proj_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(fields=["resource_id"], name="ocpazure_proj_resource_id_idx"),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["pod_labels"], name="ocpazure_proj_pod_labels_idx"),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(fields=["service_name"], name="ocpazure_proj_service_name_idx"),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(fields=["instance_type"], name="ocpazure_proj_inst_type_idx"),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemdailysummary",
index=models.Index(fields=["usage_start"], name="ocpazure_usage_start_idx"),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemdailysummary",
index=models.Index(fields=["namespace"], name="ocpazure_namespace_idx"),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemdailysummary",
index=models.Index(fields=["node"], name="ocpazure_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemdailysummary",
index=models.Index(fields=["resource_id"], name="ocpazure_resource_idx"),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemdailysummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["tags"], name="ocpazure_tags_idx"),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemdailysummary",
index=models.Index(fields=["service_name"], name="ocpazure_service_name_idx"),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemdailysummary",
index=models.Index(fields=["instance_type"], name="ocpazure_instance_type_idx"),
),
migrations.AlterUniqueTogether(
name="ocpawstagssummary",
unique_together={("key", "cost_entry_bill", "report_period", "usage_account_id", "namespace", "node")},
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(fields=["usage_start"], name="cost_proj_sum_ocp_usage_idx"),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(
fields=["namespace"], name="cost__proj_sum_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(fields=["node"], name="cost_proj_sum_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(fields=["resource_id"], name="cost_proj_sum_resource_idx"),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["pod_labels"], name="cost_proj_pod_labels_idx"),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(fields=["product_family"], name="ocp_aws_proj_prod_fam_idx"),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(fields=["instance_type"], name="ocp_aws_proj_inst_type_idx"),
),
migrations.AddIndex(
model_name="ocpawscostlineitemdailysummary",
index=models.Index(fields=["usage_start"], name="cost_summary_ocp_usage_idx"),
),
migrations.AddIndex(
model_name="ocpawscostlineitemdailysummary",
index=models.Index(fields=["namespace"], name="cost_summary_namespace_idx"),
),
migrations.AddIndex(
model_name="ocpawscostlineitemdailysummary",
index=models.Index(fields=["node"], name="cost_summary_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpawscostlineitemdailysummary",
index=models.Index(fields=["resource_id"], name="cost_summary_resource_idx"),
),
migrations.AddIndex(
model_name="ocpawscostlineitemdailysummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["tags"], name="cost_tags_idx"),
),
migrations.AddIndex(
model_name="ocpawscostlineitemdailysummary",
index=models.Index(fields=["product_family"], name="ocp_aws_product_family_idx"),
),
migrations.AddIndex(
model_name="ocpawscostlineitemdailysummary",
index=models.Index(fields=["instance_type"], name="ocp_aws_instance_type_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemprojectdailysummary",
index=models.Index(fields=["usage_start"], name="ocpall_proj_usage_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemprojectdailysummary",
index=models.Index(fields=["namespace"], name="ocpall_proj_namespace_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemprojectdailysummary",
index=models.Index(fields=["node"], name="ocpall_proj_node_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemprojectdailysummary",
index=models.Index(fields=["resource_id"], name="ocpall_proj_resource_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemprojectdailysummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["pod_labels"], name="ocpall_proj_pod_labels_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemprojectdailysummary",
index=models.Index(fields=["product_family"], name="ocpall_proj_prod_fam_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemprojectdailysummary",
index=models.Index(fields=["instance_type"], name="ocpall_proj_inst_type_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemdailysummary",
index=models.Index(fields=["usage_start"], name="ocpall_usage_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemdailysummary",
index=models.Index(fields=["namespace"], name="ocpall_namespace_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemdailysummary",
index=models.Index(fields=["node"], name="ocpall_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpallcostlineitemdailysummary",
index=models.Index(fields=["resource_id"], name="ocpall_resource_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemdailysummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["tags"], name="ocpall_tags_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemdailysummary",
index=models.Index(fields=["product_family"], name="ocpall_product_family_idx"),
),
migrations.AddIndex(
model_name="ocpallcostlineitemdailysummary",
index=models.Index(fields=["instance_type"], name="ocpall_instance_type_idx"),
),
migrations.AlterUniqueTogether(
name="gcpcostentrylineitemdaily", unique_together={("start_time", "line_item_type", "project")}
),
migrations.AlterUniqueTogether(
name="gcpcostentrybill", unique_together={("billing_period_start", "provider")}
),
migrations.AddIndex(
model_name="costsummary", index=models.Index(fields=["usage_start"], name="ocpcostsum_usage_start_idx")
),
migrations.AddIndex(
model_name="costsummary",
index=models.Index(
fields=["namespace"], name="ocpcostsum_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="costsummary",
index=models.Index(fields=["node"], name="ocpcostsum_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="costsummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["pod_labels"], name="ocpcostsum_pod_labels_idx"),
),
migrations.AlterUniqueTogether(
name="azuretagssummary", unique_together={("key", "cost_entry_bill", "subscription_guid")}
),
migrations.AlterUniqueTogether(
name="azurecostentryproductservice",
unique_together={("instance_id", "instance_type", "service_tier", "service_name")},
),
migrations.AddIndex(
model_name="azurecostentrylineitemdailysummary",
index=models.Index(fields=["usage_start"], name="ix_azurecstentrydlysumm_start"),
),
migrations.AlterUniqueTogether(
name="azurecostentrybill", unique_together={("billing_period_start", "provider")}
),
migrations.AlterUniqueTogether(
name="awstagssummary", unique_together={("key", "cost_entry_bill", "usage_account_id")}
),
migrations.AddIndex(
model_name="awscostentrylineitemdailysummary",
index=models.Index(fields=["usage_start"], name="summary_usage_start_idx"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdailysummary",
index=models.Index(fields=["product_code"], name="summary_product_code_idx"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdailysummary",
index=models.Index(fields=["usage_account_id"], name="summary_usage_account_id_idx"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdailysummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["tags"], name="tags_idx"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdailysummary",
index=models.Index(fields=["account_alias"], name="summary_account_alias_idx"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdailysummary",
index=models.Index(fields=["product_family"], name="summary_product_family_idx"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdailysummary",
index=models.Index(fields=["instance_type"], name="summary_instance_type_idx"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdaily", index=models.Index(fields=["usage_start"], name="usage_start_idx")
),
migrations.AddIndex(
model_name="awscostentrylineitemdaily",
index=models.Index(fields=["product_code"], name="product_code_idx"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdaily",
index=models.Index(fields=["usage_account_id"], name="usage_account_id_idx"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdaily", index=models.Index(fields=["resource_id"], name="resource_id_idx")
),
migrations.AddIndex(
model_name="awscostentrylineitemdaily",
index=django.contrib.postgres.indexes.GinIndex(fields=["tags"], name="aws_cost_entry"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdaily",
index=django.contrib.postgres.indexes.GinIndex(
fields=["product_code"], name="aws_cost_pcode_like", opclasses=["gin_trgm_ops"]
),
),
migrations.AlterUniqueTogether(
name="awscostentrybill",
unique_together={("bill_type", "payer_account_id", "billing_period_start", "provider")},
),
migrations.AddIndex(
model_name="awscostentry", index=models.Index(fields=["interval_start"], name="interval_start_idx")
),
###### begin customization; preserve this if you squash migrations ######
migrations.RunSQL(
sql="\nALTER TABLE partitioned_tables\n ALTER COLUMN active SET DEFAULT true;\n "
),
migrations.RunPython(code=apply_partitioned_table_triggers),
# =====================================================
# Partition ocpusagelineitemdailysummary
migrations.AlterModelOptions(name="ocpusagelineitemdailysummary", options={"managed": False}),
migrations.RunPython(code=convert_ocpusage_lids_to_partitioned),
# =====================================================
# Partition awscostentrylineitemdailysummary
migrations.AlterModelOptions(name="awscostentrylineitemdailysummary", options={"managed": False}),
migrations.RunPython(code=convert_awscostentry_lids_to_partitioned),
# =====================================================
# Partition azurecostentrylineitemdailysummary
migrations.AlterModelOptions(name="azurecostentrylineitemdailysummary", options={"managed": False}),
migrations.RunPython(code=convert_azurecostentry_lids_to_partitioned),
migrations.RunPython(code=add_views),
migrations.RunSQL(
sql="\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_namespace_idx\n on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_node_idx\n on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_summary_namespace_like_idx\n on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_summary_node_like_idx\n on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_storage_li_namespace_like_idx\n on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_storage_li_node_like_idx\n on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\n/* add node index for like trigram ops */\ncreate index if not exists ocpazure_node_like_idx\n on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\n/* add namespace index for like trigram ops */\ncreate index if not exists ocpazure_proj_namespace_like_idx\n on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocpazure_proj_node_like_idx\n on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\n/* add node index for like trigram ops */\ncreate index if not exists cost_summary_node_like_idx\n on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\n/* add namespace index for like trigram ops */\ncreate index if not exists cost__proj_sum_namespace_like_idx\n on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists cost__proj_sum_node_like_idx\n on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\n/* add namespace index for like trigram ops */\ncreate index if not exists ocpcostsum_namespace_like_idx\n on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocpcostsum_node_like_idx\n on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\nDROP INDEX IF EXISTS ocpall_product_code_ilike;\nCREATE INDEX ocpall_product_code_ilike ON reporting_ocpallcostlineitem_daily_summary USING GIN (upper(product_code) gin_trgm_ops);\n\nDROP INDEX IF EXISTS ocpall_product_family_ilike;\nCREATE INDEX ocpall_product_family_ilike ON reporting_ocpallcostlineitem_daily_summary USING GIN (upper(product_family) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\nDROP INDEX IF EXISTS aws_summ_usage_pfam_ilike;\nCREATE INDEX aws_summ_usage_pfam_ilike ON reporting_awscostentrylineitem_daily_summary USING GIN (upper(product_family) gin_trgm_ops);\n\nDROP INDEX IF EXISTS aws_summ_usage_pcode_ilike;\nCREATE INDEX aws_summ_usage_pcode_ilike ON reporting_awscostentrylineitem_daily_summary USING GIN (upper(product_family) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\nDROP INDEX IF EXISTS ix_azure_costentrydlysumm_service_name;\nCREATE INDEX ix_azure_costentrydlysumm_service_name ON reporting_azurecostentrylineitem_daily_summary USING GIN (upper(service_name) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\nDROP INDEX IF EXISTS ix_ocp_aws_product_family_ilike;\nCREATE INDEX ix_ocp_aws_product_family_ilike ON reporting_ocpawscostlineitem_daily_summary USING GIN (upper(product_family) gin_trgm_ops);\n\nDROP INDEX IF EXISTS ix_ocp_aws_product_code_ilike;\nCREATE INDEX ix_ocp_aws_product_code_ilike ON reporting_ocpawscostlineitem_daily_summary USING GIN (upper(product_code) gin_trgm_ops);\n "
),
migrations.RunSQL(
sql="\nDROP INDEX IF EXISTS ix_ocpazure_service_name_ilike;\nCREATE INDEX ix_ocpazure_service_name_ilike ON reporting_ocpazurecostlineitem_daily_summary USING GIN (upper(service_name) gin_trgm_ops);\n "
),
###### end customization ######
]
|
[
"noreply@github.com"
] |
luisfdez.noreply@github.com
|
1cb5177d2ff89e1ef10c9b35fbf847f4179da0e5
|
4b579888f460ec89ebab6a6b19c1a49b9d76bb40
|
/read_EPIC_output/read_EPIC_output.py
|
38efcea9afc55339b6bb7295d22c621cdc1da3f0
|
[] |
no_license
|
xuesongzhang2004/EPIC-1
|
f706b6ea0baf7e04ed7af65676c9ce5e293082f2
|
e4d8d190289521efacf36576ec24508d83e2901c
|
refs/heads/master
| 2021-01-22T04:14:21.134360
| 2015-04-24T22:00:22
| 2015-04-24T22:00:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
import constants, pandas, pdb
from datetime import datetime, timedelta
df = pandas.read_csv('1.DGN', skiprows = 10, delim_whitespace=True,
parse_dates={"datetime": [0,1,2]}, index_col="datetime",
date_parser=lambda x: pandas.datetime.strptime(x, '%Y %m %d'))
print df.index
print df.head()
pdb.set_trace()
|
[
"ritvik@umd.edu"
] |
ritvik@umd.edu
|
fbccd6964f8fa99865a89acd3914b963964ac0d9
|
555cfc7588c9bfb6193652be2e047eae5fc1fd4b
|
/constructor_listas_1.py
|
4de75248a6460e6aabf35690506805a295ffc0a5
|
[] |
no_license
|
MiguelAAguilarG/practicador_in
|
15e203981f5cd86c4fb63743526c1945eb0afc7e
|
2b6763017fbdf267cf66f5099cd221a572cce9a5
|
refs/heads/master
| 2020-04-07T14:07:29.045129
| 2019-07-02T06:12:39
| 2019-07-02T06:12:39
| 158,434,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,444
|
py
|
def impresor_lista_final(lista_0, lista_1, titulo, indice, lon, longitud):
lista = []
n = 0
x = 0
while x < len(lista_0):
y = 1
lista.append([])
lista[n].append([f'{titulo} {lon}'])
lista[n].append([f'{indice}{lon}'])
lista[n].append([])
lista[n].append([])
while y <= longitud:
if x < len(lista_0):
lista[n][2].append(lista_0[x])
lista[n][3].append(lista_1[x])
y = y+1
x = x+1
n = n+1
lon = lon + 1
for n in range(len(lista)):
print(f'''
n = n+1
lista.append([])
lista[n].append({lista[n][0]})
lista[n].append({lista[n][1]})
lista[n].append({lista[n][2]})
lista[n].append({lista[n][3]})''')
from io import open
titulo = ['partes de la ciudad', 'tiendas y comercios', 'problemas de salud y enfermedades', 'medicinas y remedios', 'el hospital', 'el cuerpo humano', 'partes de la casa', 'el cuarto del bebé', 'el baño', 'el dormitorio', 'el comedor', 'el jardín', 'la cocina', 'la sala', 'el cuarto de servicio', 'taller y herramientas', 'las flores', 'geografía', 'plantas y árboles', 'el universo y el cosmos', 'el tiempo', 'familia y parientes', 'trabajos y profesiones', 'sentimientos y emociones', 'estados de ánimo', 'ropa de hombre', 'ropa de mujer', 'personalidad (rasgos positivos)', 'personalidad (rasgos negativos)', 'países', 'delitos y justicia', 'militares y guerra', 'armas', 'nacionalidades', 'política y gobierno', 'religión', 'escuela y educación', 'colores y patrones', 'envases y cantidades', 'materiales y telas', 'formas y texturas', 'calendario y tiempo', 'puntos en el tiempo', 'aeropuerto y aviones', 'medios de transporte', 'el automóvil', 'la bicicleta', 'las embarcaciones', 'el barco']
numero = ['1.txt', '2.txt', '3.txt', '4.txt', '5.txt', '6.txt', '7.txt', '8.txt', '9.txt', '10.txt', '11.txt', '12.txt', '13.txt', '14.txt', '15.txt', '16.txt', '17.txt', '18.txt', '19.txt', '20.txt', '21.txt', '22.txt', '23.txt', '24.txt', '25.txt', '26.txt', '27.txt', '28.txt', '29.txt', '30.txt', '31.txt', '32.txt', '33.txt', '34.txt', '35.txt', '36.txt', '37.txt', '38.txt', '39.txt', '40.txt', '41.txt', '42.txt', '43.txt', '44.txt', '45.txt', '46.txt', '47.txt', '48.txt', '49.txt']
indice = ['pdlc', 'tyc', 'pdsye', 'myr', 'eh', 'ech', 'pdlc', 'ecdb', 'eb', 'ed', 'ec', 'ej', 'lc', 'ls', 'ecds', 'tyh', 'lf', 'g', 'pyá', 'euyec', 'et', 'fyp', 'typ', 'sye', 'edá', 'rdh', 'rdm', 'p(p', 'p(n', 'p', 'dyj', 'myg', 'a', 'n', 'pyg', 'r', 'eye', 'cyp', 'eyc', 'myt', 'fyt', 'cyt', 'peet', 'aya', 'mdt', 'ea', 'lb', 'le', 'eb']
for ii,xx in enumerate(titulo):
archivo_subtitulos = open(numero[ii],'r')
lista_texto = archivo_subtitulos.readlines()
archivo_subtitulos.close()
lista_texto_a_utilizar = []
for linea in lista_texto:
if linea.find('(') != -1:
menor = linea.find('(')
mayor = linea.find(')')
linea = linea[:menor] + linea[mayor+1:]
lista_texto_a_utilizar.append(linea)
ingles = []
español = []
for linea in lista_texto_a_utilizar:
if linea.find('-') != -1:
division = linea.find('-')
parte_ingles = linea[:division]
parte_español = linea[division+1:]
ingles.append(parte_ingles)
español.append(parte_español)
for i,x in enumerate(ingles):
ingles[i] = ingles[i].replace('\n', '')
ingles[i] = ingles[i].strip(' ')
español[i] = español[i].replace('\n', '')
español[i] = español[i].strip(' ')
impresor_lista_final(ingles, español, titulo[ii], indice[ii], 1, 8)
input()
|
[
"migue_ag18@hotmail.com"
] |
migue_ag18@hotmail.com
|
9970c718359628f934827e8b37e99333f5d2bb23
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/NumberIslands_20200723183716.py
|
0deddac0aacdbd85027aa37e835a7ea505d11b8a
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
def mark_current_island()
def Islands(arr):
# the arr given is a 2 by 2 array
if len(arr) == 0:
return 0
number_of_islands = 0
for i in range(len(arr)):
for j in range(arr[i]):
if arr[i][j] == "1":
mark_current_island(grid,i,j,rows)
number_of_islands +=1
Islands([
["1","1","0","0","0"],["1","1","0","0","0"],["0","0","1","0","0"],["0","0","0","1","1"]
])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
53965bb7e05ef6ca3a0598037ca3eeb3c05de87b
|
d41aa512f8ad7a28121121cf96f2286abc5391c3
|
/scrape_argos/spiders/argos_spider.py
|
5f859a470e55ca70e8b5c68d750aa910a60c08a1
|
[
"MIT"
] |
permissive
|
andyregan/scrape_argos
|
8b1757819b013bbdb0d0c67ee6b205455aff5ea7
|
a3cb44f29173cb4b64e8d73204aecfb40b9edfd9
|
refs/heads/master
| 2021-01-01T06:50:54.760280
| 2013-05-11T10:08:43
| 2013-05-11T10:08:43
| 9,894,606
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,982
|
py
|
from scrapy.contrib.loader import XPathItemLoader
from scrapy.contrib.spiders import XMLFeedSpider
from scrapy.http import Request
from scrape_argos.items import CatalogueItem
class ArgosSpider(XMLFeedSpider):
"""
A spider that crawls the argos.ie products sitemap and returns
CatalogueItems in a format that's easily indexed.
"""
name = "argos"
allowed_domains = ["argos.ie"]
start_urls = [
"http://www.argos.ie/product.xml"
]
# sitemap
namespaces = [('n', 'http://www.sitemaps.org/schemas/sitemap/0.9')]
itertag = 'n:loc'
# catalogue item xpaths
name_path = ".//*[@id='primaryproductinfo']/h1/text()"
catalogue_number_path = ".//*[@id='primaryproductinfo']/span/text()"
price_path = ".//*[@id='pdpForm']/div[2]/ul/li[2]/span/text()[1]"
image_src_path = ".//*[@id='mainimage']/@src"
details_path = ".//*[@id='producttabs']/div[1]"
def make_requests_from_url(self, url):
"""
Overrides the BaseSpider class in order to set the dont_redirect
Request meta.
The argos products sitemap contains a lot of links that return 302
and redirect to a search page. Not following these reduces the crawl
overhead.
"""
return Request(
url,
meta={'dont_redirect': True},
dont_filter=True
)
def parse_node(self, response, selector):
"""
Implements the XMLFeedSpider parse_node.
Parses argos.ie catalogue pages and scrapes Items.
"""
l = XPathItemLoader(
item=CatalogueItem(),
response=response
)
l.add_xpath('name', self.name_path)
l.add_xpath('catalogue_number', self.catalogue_number_path)
l.add_xpath('price', self.price_path)
l.add_xpath('image_src', self.image_src_path)
l.add_xpath('details', self.details_path)
l.add_value('url', response.url)
return l.load_item()
|
[
"andrewjregan@gmail.com"
] |
andrewjregan@gmail.com
|
d0175083db081277fb0f2d0146fdb3975c188091
|
9cac34ba9913efcf2bf9f328cd62de6541102547
|
/flask-restful/app.py
|
940be0749f7e7066055940361be7fcfd4bb2b811
|
[] |
no_license
|
gushiyu01/NLP
|
542fa037cda1ebf96ac8d129ce51e9ee086e8f2a
|
6d2d3dc15b3b38733010275c691fb3159027a4b3
|
refs/heads/master
| 2023-05-26T21:27:36.472838
| 2021-01-18T09:02:06
| 2021-01-18T09:02:06
| 255,058,732
| 0
| 0
| null | 2023-05-22T23:37:49
| 2020-04-12T10:30:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
from flask import Flask, request
from flask_restful import Api, Resource, reqparse
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
app.config.update(RESTFUL_JSON=dict(ensure_ascii=False))
api = Api(app)
parser = reqparse.RequestParser() # 定义全局的解析实体
# 定义参数 data,类型必须是整数
parser.add_argument('data', type=int, help='必须提供参数')
class HelloRestful(Resource):
def get(self):
return {'greet': 'Hello Flask RESTful!'}
# 初始化待办列表
todos = {
'todo_1': "读《程序员的自我修养》",
'todo_2': "买点吃的",
'todo_3': "去看星星"
}
class Todo(Resource):
# 根据 todo_id 获取代办事项
@staticmethod
def get(todo_id):
return {todo_id: todos[todo_id]}
# 新增一个待办事项
@staticmethod
def post(todo_id):
# 获取解析器中定义的参数 并校验
parser.parse_args()
todos[todo_id] = request.form['data']
return {todo_id: todos[todo_id]}
api.add_resource(HelloRestful, '/')
api.add_resource(Todo, '/todo/<string:todo_id>')
if __name__ == '__main__':
app.run(debug=True, port=8080)
|
[
"gushiyu@ictbda.cn"
] |
gushiyu@ictbda.cn
|
793a5b14a11afa9be26bd490eba05252d9be3f6b
|
f065e84587501a10259e113f39fc8ea7140cdd1e
|
/Projects/p2_w2.py
|
3bc57bbb4ce6e9c5ad7631c14b752b97be714ea0
|
[] |
no_license
|
jjivad/my-tries
|
8bf7e7cb83483935c02688f8d35f7a392db57529
|
78be243c836f46d82041270906b7a380e576ff89
|
refs/heads/master
| 2020-07-26T21:18:29.135660
| 2019-09-16T10:15:09
| 2019-09-16T10:15:09
| 208,768,263
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 10 14:34:04 2019
@author: VIJ Global
"""
balance = int(input("please put your balance on your credit card: "))
annualInterestRate = float(input("please put your annual interest rate in decimal: "))
monthlyInterestRate = annualInterestRate/12
i = 0
month = 0
monthlyPaymentLower = int (round((balance/12),-1))
monthlyPaymentUpper = int (round(((balance*(1+monthlyInterestRate)**12)/12.0),-1))
amount = int(round(((monthlyPaymentLower+monthlyPaymentUpper)/2),-1))
diff = 0.01
while abs(amount-balance) >= diff:
if amount < balance and i < 12:
monthlyPaymentUpper = amount
i += 1
print("month: ", month, "amount: ",amount)
else:
break
month += 1
amount = int(round (amount,-1))
print ("Lowest Payment: ", amount, "after ", month, " months")
"""___Real code without bisection___
monthlyPaymentRate = 0
init_balance = balance
monthlyInterestRate = annualInterestRate/12
while balance > 0:
for i in range(12):
balance = balance - monthlyPaymentRate + ((balance - monthlyPaymentRate) * monthlyInterestRate)
if balance > 0:
monthlyPaymentRate += 10
balance = init_balance
elif balance <= 0:
break
print('Lowest Payment:', monthlyPaymentRate)
"""
|
[
"noreply@github.com"
] |
jjivad.noreply@github.com
|
4bc9fefe6b6810184dc76d0ee18f7fe052cc225c
|
0f6e2db3a768d3f28304f23c80ca0e6c32a0b0c9
|
/models/transaction_models.py
|
1c7f8984bb392d4d41091b9dc5fb2223d5ec365e
|
[] |
no_license
|
jorge2692/cajero-api
|
210c5058d653290f06cafc811a77255ed8ac2f8a
|
98aea31c54bc94e6c808f68de3d506c3fcc99077
|
refs/heads/master
| 2023-01-24T18:32:24.203416
| 2020-12-13T01:08:32
| 2020-12-13T01:08:32
| 318,190,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
from pydantic import BaseModel
from datetime import datetime
class TransactionIn(BaseModel):
username: str
value: int
class TransactionOut(BaseModel):
id_transaction: int
username: str
date: datetime
value: int
actual_balance: int
|
[
"jorge@MacBook-Pro.local"
] |
jorge@MacBook-Pro.local
|
a543e63968aea961343d8d01676f463aa1d85e01
|
7b41f5cdff1569dcf6b6c20fd36eb09e1c861771
|
/Game/001/001.py
|
d9df6faa4b4d57455c1e116324514cf5ef0b4304
|
[] |
no_license
|
Lynn524552751/Flynn
|
7a898fb78bce6e87e3c6e49271eb8b3953a54a05
|
b99183f2fb1b40a24212f0939c066aeac7608e3f
|
refs/heads/master
| 2018-09-26T23:59:48.288734
| 2018-09-13T10:48:25
| 2018-09-13T10:48:25
| 114,985,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
#coding:utf-8
num = 0
xx = [2,3,5,6,8,9]
#bw-百位 sw-十位 gw-各位 h2-后两位
for i in range(10,100):
for j in range(10, 100):
num = i*j;
if num<2000:
bw = int(num % 1000)
bw = int(bw/100)
h2w = int(num % 100)
sw = int(h2w/10)
gw = int(h2w%10)
if sw == 4 and gw in xx and bw in xx:
isw = int(i / 10)
igw = int(i % 10)
jsw = int(j / 10)
jgw = int(j % 10)
if isw in xx and igw in xx and jsw in xx and jgw in xx:
print('%s X %s = %s'%(str(i),str(j),str(num)))
#print(num)
#28 X 66 = 1848
#32 X 39 = 1248
#33 X 56 = 1848
#39 X 32 = 1248
#56 X 33 = 1848
#66 X 28 = 1848
|
[
"524552751@qq.com"
] |
524552751@qq.com
|
1689e0603d95b671d46b7a4702509b2d8885653b
|
b1caa409422f6fe6d0802689857fde31e2bebafa
|
/cms/migrations/0003_auto_20200605_1312.py
|
1e9db50dfb361e58b14f1ee8c5828e293fb2dcb1
|
[] |
no_license
|
LelikovAlexandr/BlogCMS
|
a9df8b5d313ef54f5607817d7c3330f172d90d43
|
b081c90f359d57960baac91a8be3d8e7481ca537
|
refs/heads/master
| 2023-03-07T21:54:02.779849
| 2021-02-16T07:14:33
| 2021-02-16T07:14:33
| 266,382,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
# Generated by Django 3.0.6 on 2020-06-05 13:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0002_auto_20200605_1251'),
]
operations = [
migrations.AlterModelOptions(
name='price',
options={'ordering': ['number_of_months']},
),
]
|
[
"houda.lamsaaf@ump.ac.ma"
] |
houda.lamsaaf@ump.ac.ma
|
67e7042acd1d1694f5043145c286b876ae67e8fb
|
6d52a9760ee2118654ec6a7dbe294b6048d3a4c6
|
/Mission_to_Mars_Challenge.py
|
f2c11c6a47ece62e67366abf0ab38437212dbf05
|
[] |
no_license
|
azarowj/Mission-to-Mars
|
227bddc9e5a08b4a4b77c87a3a25fa5de1a05a3e
|
4ae153ead96cff4ce19ed6402f7a3610981a7836
|
refs/heads/main
| 2023-04-14T13:33:00.936431
| 2021-04-27T23:22:31
| 2021-04-27T23:22:31
| 357,365,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,962
|
py
|
#!/usr/bin/env python
# coding: utf-8
# Import Splinter and BeautifulSoup
from splinter import Browser
from bs4 import BeautifulSoup as soup
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# Visit the mars nasa news site
url = 'https://redplanetscience.com'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
html = browser.html
news_soup = soup(html, 'html.parser')
slide_elem = news_soup.select_one('div.list_text')
slide_elem.find('div', class_ = 'content_title')
# Use the parent element to find the first `a` tag and save it as `news_title`
news_title = slide_elem.find('div', class_= 'content_title').get_text()
news_title
# Use the parent element to find the paragraph text
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
news_p
# ### Featured Images
# Visit URL
url = 'https://spaceimages-mars.com'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
# Find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
img_url_rel
# Use the base URL to create an absolute URL
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
img_url
df = pd.read_html('https://galaxyfacts-mars.com')[0]
df.columns=['description', 'Mars', 'Earth']
df.set_index('description', inplace=True)
df
df.to_html()
browser.quit()
# Beginning of Challenge Starter Code
# Import Splinter, BeautifulSoup, and Pandas
from splinter import Browser
from bs4 import BeautifulSoup as soup
import pandas as pd
from webdriver_manager.chrome import ChromeDriverManager
# Set the executable path and initialize Splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path)
# Visit the NASA Mars News Site
# Visit the mars nasa news site
url = 'https://data-class-mars.s3.amazonaws.com/Mars/index.html'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
# Convert the browser html to a soup object and then quit the browser
html = browser.html
news_soup = soup(html, 'html.parser')
slide_elem = news_soup.select_one('div.list_text')
slide_elem.find('div', class_='content_title')
# Use the parent element to find the first a tag and save it as `news_title`
news_title = slide_elem.find('div', class_='content_title').get_text()
news_title
# Use the parent element to find the paragraph text
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
news_p
# JPL Space Images Featured Image
# Visit URL
url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
img_soup
# find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
img_url_rel
# Use the base url to create an absolute url
img_url = f'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/{img_url_rel}'
img_url
# Mars Facts
df = pd.read_html('https://data-class-mars-facts.s3.amazonaws.com/Mars_Facts/index.html')[0]
df.head()
df.columns=['Description', 'Mars', 'Earth']
df.set_index('Description', inplace=True)
df
df.to_html()
# D1: Scrape High-Resolution Mars’ Hemisphere Images and Titles
# Hemispheres
# 1. Use browser to visit the URL
url = 'https://data-class-mars-hemispheres.s3.amazonaws.com/Mars_Hemispheres/index.html'
browser.visit(url)
# 2. Create a list to hold the images and titles.
hemisphere_image_urls = []
# 3. Write code to retrieve the image urls and titles for each hemisphere.
hemi_html = browser.html
hemi_soup = soup(hemi_html, 'html.parser')
items = hemi_soup.find_all('div', class_='item')
for i in items:
url_to_image = i.find('a', class_='itemLink product-item')['href']
browser.visit(f'https://data-class-mars-hemispheres.s3.amazonaws.com/Mars_Hemispheres/{url_to_image}')
img_html = browser.html
img_soup = soup(img_html, 'html.parser')
img_url_rel = img_soup.find('img', class_='wide-image').get('src')
img_url = f'https://data-class-mars-hemispheres.s3.amazonaws.com/Mars_Hemispheres/{img_url_rel}'
title = img_soup.find('h2', class_='title').get_text()
hemisphere_image_urls.append({'img_url': img_url, 'title': title})
browser.visit(url)
# 4. Print the list that holds the dictionary of each image url and title.
hemisphere_image_urls
# 5. Quit the browser
browser.quit()
|
[
"JAzarow@azarowj.lan"
] |
JAzarow@azarowj.lan
|
e8c46ab606c7cc5f2311830a5ed1932bb0e09370
|
4f9b506cc86cbc9d1a29caea572c19fbfed64366
|
/CSD2a/python_basics/src/hello_world.py
|
a129e653a0f9420a26df93f9e31ed58ba526ffe2
|
[] |
no_license
|
w-ensink/CSD2
|
af8259d71e30f89e32ab1a1ddc81ca121feb63dd
|
a11adf905df605ae35ef23b8313327cdd8597143
|
refs/heads/master
| 2023-02-13T08:25:29.915490
| 2021-01-05T09:56:56
| 2021-01-05T09:56:56
| 291,936,876
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
print('Hello, World!')
name = input('Please enter your name: ')
print(f'Hello, {name}!')
|
[
"wouterensink3@gmail.com"
] |
wouterensink3@gmail.com
|
071ce847ec912c1a9566b2cd11c79e32f921ad72
|
2ce68cfbb8d893a29890e15e155593c357f68ffc
|
/tests/test_board_analyzer.py
|
48bf0a315774118b704778998c754ddc55e098f4
|
[] |
no_license
|
yamakanto/Domineering
|
209fad4cd06f37b3a0bd7a1067d43ba32301ad06
|
8b1553e31fd4a3836286b756afb1cbae051036f5
|
refs/heads/main
| 2023-02-15T01:33:32.288760
| 2021-01-07T12:47:11
| 2021-01-07T12:47:11
| 324,605,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
import unittest
from domineering.board import Board
from domineering.board_analyzer import *
class TestBoardAnalyzer(unittest.TestCase):
def test_can_make_move_horizontal(self):
board = Board.from_string('EEV;EVV;VEE')
self.assertTrue(can_make_move(board, (0, 0), False))
self.assertTrue(can_make_move(board, (2, 1), False))
self.assertFalse(can_make_move(board, (0, 1), False))
self.assertFalse(can_make_move(board, (1, 1), False))
self.assertFalse(can_make_move(board, (2, 0), False))
def test_can_make_move_vertical(self):
board = Board.from_string('EEV;EVV;VEE')
self.assertTrue(can_make_move(board, (0, 0), True))
self.assertFalse(can_make_move(board, (2, 1), True))
self.assertFalse(can_make_move(board, (0, 1), True))
self.assertFalse(can_make_move(board, (1, 1), True))
self.assertFalse(can_make_move(board, (2, 0), True))
def test_count_horizontal_moves_empty_skip(self):
board = Board.from_string('EEE;EEE;EEE')
count = count_moves_horizontal(board, True)
self.assertEqual(count, 3)
def test_count_horizontal_moves_empty(self):
board = Board.from_string('EEE;EEE;EEE')
count = count_moves_horizontal(board, False)
self.assertEqual(count, 6)
def test_count_vertical_moves_empty_skip(self):
board = Board.from_string('EEE;EEE;EEE')
count = count_moves_vertical(board, True)
self.assertEqual(count, 3)
def test_count_vertical_moves_empty(self):
board = Board.from_string('EEE;EEE;EEE')
count = count_moves_vertical(board, False)
self.assertEqual(count, 6)
def test_is_safe_move_horizontal(self):
board = Board.from_string('EEEE;HHEE;EEEE')
self.assertTrue(is_safe_move(board, (0, 0), False))
self.assertFalse(is_safe_move(board, (0, 1), False))
self.assertFalse(is_safe_move(board, (0, 2), False))
def test_count_safe_moves_horizontal_skip(self):
board = Board.from_string('EEEE;HHHE;EEEE')
count = count_safe_moves_horizontal(board, True)
self.assertEqual(count, 2)
def test_count_safe_moves_horizontal(self):
board = Board.from_string('EEEE;HHHE;EEEE')
count = count_safe_moves_horizontal(board, False)
self.assertEqual(count, 4)
def test_count_safe_moves_vertical_skip(self):
board = Board.from_string('EVE;EVE;EVE;EEE')
count = count_safe_moves_vertical(board, True)
self.assertEqual(count, 2)
def test_count_safe_moves_vertical(self):
board = Board.from_string('EVE;EVE;EVE;EEE')
count = count_safe_moves_vertical(board, False)
self.assertEqual(count, 4)
|
[
"t.forner@tum.de"
] |
t.forner@tum.de
|
e3d01b107c2fe4527937f7247c7987250cb46730
|
c91c3bd6f29a0d42ede2d5516c0dad9eefcf6b18
|
/wiley_book/ch7_assortativeMating.py
|
8b546290dfc0cfcded8934249c963d6d00e06a19
|
[] |
no_license
|
BoPeng/simuPOP-examples
|
bcc76406411705977b10ed33bce4e66ad126b8c1
|
79ac604bf7ab7ffe4affcc885d521cf0ce7999be
|
refs/heads/master
| 2021-01-09T06:05:56.290019
| 2020-03-04T20:48:58
| 2020-03-04T20:48:58
| 80,897,728
| 1
| 7
| null | 2019-02-28T13:50:51
| 2017-02-04T06:04:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,941
|
py
|
import simuPOP as sim
from random import normalvariate
sigma = 1
def traits(geno):
'genotypes are arranged as a1a2b1b2c1c2... where a,b,c are specified loci'
A = sum(geno[:20]) + normalvariate(0, 2.5)
B = sum(geno[20:40]) + normalvariate(0, 2.5)
I = sum(geno[40:60]) + normalvariate(0, 2.5)
D = B + I - A + normalvariate(0, sigma**2)
return A, B, I, D
pop = sim.Population(100000, loci=[1]*40, infoFields=['A', 'B', 'I', 'D'])
pop.evolve(
initOps=[
sim.InitSex(maleProp=0.5),
sim.InitGenotype(freq=[0.5, 0.5]),
],
preOps=[
sim.PyQuanTrait(func=traits, loci=sim.ALL_AVAIL,
infoFields=['A', 'B', 'I', 'D']),
sim.PyOperator(func=lambda pop: pop.sortIndividuals('D') is None),
],
matingScheme=sim.HomoMating(
chooser=sim.SequentialParentsChooser(),
generator=sim.OffspringGenerator(
ops=sim.MendelianGenoTransmitter(),
numOffspring=2, sexMode=(sim.NUM_OF_MALES, 1))
),
finalOps=sim.PyQuanTrait(func=traits, loci=sim.ALL_AVAIL,
infoFields=['A', 'B', 'I', 'D']),
gen=10
)
from rpy import r
def genoTraitCorrelation(loc, trait):
'Calculate correlation between trait and genotype at a locus'
geno = [ind.allele(loc,0) + ind.allele(loc,1) for ind in pop.individuals()]
qtrait = pop.indInfo(trait)
return r.cor(geno, qtrait)
# correlation between genotype at A loci with trait A
AA = [genoTraitCorrelation(loc, 'A') for loc in range(10)]
print(', '.join(['%.3f' % abs(x) for x in AA]))
# correlation between genotype at A loci with trait B (spurious)
AB = [genoTraitCorrelation(loc, 'B') for loc in range(10)]
print(', '.join(['%.3f' % abs(x) for x in AB]))
# correlation between genotype at unrelated loci with trait A
UA = [genoTraitCorrelation(loc, 'A') for loc in range(30, 40)]
print(', '.join(['%.3f' % abs(x) for x in UA]))
|
[
"ben.bog@gmail.com"
] |
ben.bog@gmail.com
|
bda8d6a4be1ebc02d63eb97491450aa5aa1cb42c
|
977d51ea06d78cfd7d75b8ab6545890843c0710c
|
/Notebooks/Advanced_Keras/NN_scratch_TF.py
|
3e9798737f6450216e50336fe860d5ffdcbf7cef
|
[
"MIT"
] |
permissive
|
Tech-at-DU/ACS-4511-Core-Apps-of-AI
|
ebf269e458f20fb02cbcd0a23c99c12ec04e0e28
|
a3c22cfe71ff9db52926905fb321b74b66834165
|
refs/heads/master
| 2023-08-22T07:51:04.851696
| 2021-10-24T01:24:16
| 2021-10-24T01:24:16
| 390,577,389
| 0
| 0
|
MIT
| 2021-10-21T18:37:55
| 2021-07-29T02:40:15
| null |
UTF-8
|
Python
| false
| false
| 3,334
|
py
|
import numpy as np
import tensorflow as tf
rng = np.random
# check this out:
# https://www.analyticsvidhya.com/blog/2017/05/neural-network-from-scratch-in-python-and-r/
# Input array
X_data=np.array([[1.0, 0.0, 1.0, 0.0],[1.0, 0.0, 1.0, 1.0],[0.0, 1.0, 0.0, 1.0]])
#Output
y_data=np.array([[1.0],[1.0],[0.0]])
#Variable initialization
epoch=5000 #Setting training iterations
lr=0.1 #Setting learning rate
# tf Graph Input
X = tf.placeholder(shape=(1, 4), dtype= tf.float64)
Y = tf.placeholder(shape=(1,), dtype= tf.float64)
# Set model hidden layer weights and bias
W_h = tf.Variable(rng.randn(4, 3), name="weight1")
b_h = tf.Variable(rng.randn(1, 3), name="bias1")
# Set model output layer weights and bias
W_o = tf.Variable(rng.randn(3, 1), name="weight2")
b_o = tf.Variable(rng.randn(1, 1), name="bias2")
# Construct a linear model
h = tf.nn.sigmoid(tf.add(tf.matmul(X, W_h), b_h))
pred = tf.nn.sigmoid(tf.add(tf.matmul(h, W_o), b_o))
# with tf.GradientTape() as t:
# t.watch([W_h])
E = tf.reduce_sum(tf.pow(pred - Y, 2))
dE_dW_h = tf.gradients(E, [W_h])[0]
dE_db_h = tf.gradients(E, [b_h])[0]
dE_dW_o = tf.gradients(E, [W_o])[0]
dE_db_o = tf.gradients(E, [b_o])[0]
# numpy implementation of sigmoid function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
W_h_i = np.random.randn(4, 3)
b_h_i = np.random.randn(1, 3)
W_o_i = np.random.randn(3, 1)
b_o_i = np.random.randn(1, 1)
for i in range(2000):
for batch in range(3):
# Feed_forward: We do not need it because we know the model as defined above
# Feed_Backward
evaluated_dE_dW_h = sess.run(dE_dW_h,
feed_dict={W_h: W_h_i, b_h: b_h_i, W_o: W_o_i, b_o: b_o_i,
X: np.array([X_data[batch]]), Y: np.array(y_data[batch])})
W_h_i = W_h_i - 0.1 * evaluated_dE_dW_h
evaluated_dE_db_h = sess.run(dE_db_h,
feed_dict={W_h: W_h_i, b_h: b_h_i, W_o: W_o_i, b_o: b_o_i,
X: np.array([X_data[batch]]), Y: np.array(y_data[batch])})
b_h_i = b_h_i - 0.1 * evaluated_dE_db_h
evaluated_dE_dW_o = sess.run(dE_dW_o,
feed_dict={W_h: W_h_i, b_h: b_h_i, W_o: W_o_i, b_o: b_o_i,
X: np.array([X_data[batch]]), Y: np.array(y_data[batch])})
W_o_i = W_o_i - 0.1 * evaluated_dE_dW_o
evaluated_dE_db_o = sess.run(dE_db_o,
feed_dict={W_h: W_h_i, b_h: b_h_i, W_o: W_o_i, b_o: b_o_i,
X: np.array([X_data[batch]]), Y: np.array(y_data[batch])})
b_o_i = b_o_i - 0.1 * evaluated_dE_db_o
print(W_h_i)
# Check that model provide good result
for i in range(3):
hidden_layer_input1 = np.dot(X_data[i], W_h_i)
hidden_layer_input = hidden_layer_input1 + b_h_i
hidden_layer_activations = sigmoid(hidden_layer_input)
output_layer_input1 = np.dot(hidden_layer_activations, W_o_i)
output_layer_input = output_layer_input1 + b_o_i
output = sigmoid(output_layer_input)
print(output)
|
[
"miladtoutounchian@Milads-MacBook-Pro.local"
] |
miladtoutounchian@Milads-MacBook-Pro.local
|
a5c9cc9df4bd99702e27a912b9f37f7f32b33abe
|
d094ba0c8a9b1217fbf014aa79a283a49aabe88c
|
/env/lib/python3.6/site-packages/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py
|
328a8e327270073da52a17adab6fad01052ef8f3
|
[
"Apache-2.0"
] |
permissive
|
Raniac/NEURO-LEARN
|
d9274e0baadd97bb02da54bdfcf6ca091fc1c703
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
refs/heads/master
| 2022-12-25T23:46:54.922237
| 2020-09-06T03:15:14
| 2020-09-06T03:15:14
| 182,013,100
| 9
| 2
|
Apache-2.0
| 2022-12-09T21:01:00
| 2019-04-18T03:57:00
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,273
|
py
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..preprocess import PRELUDE
def test_PRELUDE_inputs():
input_map = dict(
args=dict(argstr='%s', ),
complex_phase_file=dict(
argstr='--complex=%s',
mandatory=True,
xor=['magnitude_file', 'phase_file'],
),
end=dict(argstr='--end=%d', ),
environ=dict(
nohash=True,
usedefault=True,
),
label_file=dict(
argstr='--labels=%s',
hash_files=False,
),
labelprocess2d=dict(argstr='--labelslices', ),
magnitude_file=dict(
argstr='--abs=%s',
mandatory=True,
xor=['complex_phase_file'],
),
mask_file=dict(argstr='--mask=%s', ),
num_partitions=dict(argstr='--numphasesplit=%d', ),
output_type=dict(),
phase_file=dict(
argstr='--phase=%s',
mandatory=True,
xor=['complex_phase_file'],
),
process2d=dict(
argstr='--slices',
xor=['labelprocess2d'],
),
process3d=dict(
argstr='--force3D',
xor=['labelprocess2d', 'process2d'],
),
rawphase_file=dict(
argstr='--rawphase=%s',
hash_files=False,
),
removeramps=dict(argstr='--removeramps', ),
savemask_file=dict(
argstr='--savemask=%s',
hash_files=False,
),
start=dict(argstr='--start=%d', ),
threshold=dict(argstr='--thresh=%.10f', ),
unwrapped_phase_file=dict(
argstr='--unwrap=%s',
genfile=True,
hash_files=False,
),
)
inputs = PRELUDE.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_PRELUDE_outputs():
output_map = dict(unwrapped_phase_file=dict(), )
outputs = PRELUDE.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
[
"leibingye@outlook.com"
] |
leibingye@outlook.com
|
7cbbb2a8bce7bf3714e97be1e506397c1947a744
|
07b1331532346b7d423fac07fbbbf2ebc72acf98
|
/backend/project/models/dto.py
|
23fb38167249a85dc5a20845f4f082873a38597e
|
[] |
no_license
|
glebapaulina/itmProj
|
67465b16b15a7275a63b21c0b47fb0056436a988
|
695c2f7402aebee756dd98583324238a8e70cbd5
|
refs/heads/master
| 2020-03-12T07:42:05.565247
| 2018-04-21T20:54:39
| 2018-04-21T20:54:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
class WynikOperacji:
def __init__(self, id=None, wynik=False):
self.id = str(id)
self.wynik = wynik
class WynikRejestracji:
def __init__(self, accessToken=None, refreshToken=None, dodanoUzytkownika=False):
self.accessToken = accessToken
self.refreshToken = refreshToken
self.dodanoUzytkownika = dodanoUzytkownika
class WynikOdswiezeniaTokena:
def __init__(self, accessToken=None, refreshToken=None, odswiezonoPoprawnie=False):
self.accessToken = accessToken
self.refreshToken = refreshToken
self.odswiezonoPoprawnie = odswiezonoPoprawnie
|
[
"michalszmyt95@gmail.com"
] |
michalszmyt95@gmail.com
|
da8ec61d45afc243533780292ffb545c670d1743
|
3624e9f0a026b57ebdafa4e842b93f56e5a8504d
|
/Codeforces/Rockethon 2015/Problem B/a.py
|
dc6fcf72922e601ae0f17845097b2fbfcfa28ff7
|
[
"MIT"
] |
permissive
|
ailyanlu1/Competitive-Programming-2
|
54109c8644d3ac02715dc4570916b212412c25c0
|
6c990656178fb0cd33354cbe5508164207012f24
|
refs/heads/master
| 2020-03-23T07:48:20.560283
| 2018-02-15T06:49:49
| 2018-02-15T06:49:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
from itertools import permutations
st = map(int, raw_input().split())
n, m = st[0], st[1]
l = [0] * n
p = [1]
for i in xrange(n-1):
p.append(p[-1]*2)
cnt = 0
first = 0
last = n-1
i = 2
while cnt < n:
if m <= p[n-i]:
l[first] = cnt+1
first += 1
else:
l[last] = cnt+1
last -= 1
m -= p[n-i];
#m -= p[n-i];
#print m, p[n-i]
cnt += 1
#print l
i += 1
for i in l:
print i,
|
[
"adityapaliwal95@gmail.com"
] |
adityapaliwal95@gmail.com
|
7822a747b3837c20cada8ceca0c20dbeb0055a06
|
2af1e6357f51d0d08b1a991e2bd922b7bdc8c0b6
|
/baekjoon/accepted/그래프, DP/1005 ACM Craft.py
|
7457a165206368ceb76255fbd70075af10846505
|
[] |
no_license
|
grasshopperTrainer/coding_practice
|
530e9912b10952c866d35d69f12c99b96959a22d
|
d1e5e6d6fa3f71f1a0105940fff1785068aec8b0
|
refs/heads/master
| 2023-06-01T13:30:15.362657
| 2021-06-08T08:40:15
| 2021-06-08T08:40:15
| 267,359,225
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
from sys import stdin, setrecursionlimit
setrecursionlimit(101100)
def solution(N, K, T, G, E):
# use 1index
graph = [[] for i in range(N+1)] # record building before
for before, current in E:
graph[current].append(before)
# search using dfs
dp = [None for _ in range(N+1)] # None for not recorded
def dfs(at):
if dp[at] is not None:
return dp[at]
max_time = 0 # max building all previous building
for prev in graph[at]:
max_time = max(max_time, dfs(prev))
# record to reference afterward
dp[at] = max_time + T[at-1] # -1 for 0indexing
return dp[at]
return dfs(G)
for _ in range(int(stdin.readline())):
N, K = [int(c) for c in stdin.readline().strip().split(' ')]
T, E = 0, []
for i in range(K + 1):
row = [int(c) for c in stdin.readline().strip().split(' ')]
if i == 0:
T = row
else:
E.append(row)
G = int(stdin.readline())
print(solution(N, K, T, G, E))
|
[
"grasshoppertrainer@gmail.com"
] |
grasshoppertrainer@gmail.com
|
887bb1aaf0e753e2feb192c0d6136742fdf0f3e9
|
127f5185a70aed31bcdb4251e46e1fbca0bafe1c
|
/util/src/util/knowledge_base/type.py
|
de745a4479aa18cdc4526f902f98e4cc4721167b
|
[] |
no_license
|
Evana13G/RAPDR_babble
|
66eaee5f4b225c46234157a3afb014d8f0454d80
|
58db492b918ae0407004c1fe7b18b5c13378cede
|
refs/heads/master
| 2023-02-17T07:20:09.012859
| 2021-01-17T20:40:46
| 2021-01-17T20:40:46
| 228,928,211
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
#!/usr/bin/env python
class Type(object):
def __init__(self, parent, children):
self.parentType = parent
self.childrenTypes = children
def getChildrenTypes(self):
return self.childrenTypes
def __str__(self):
s = ''
for t in self.childrenTypes:
s = s + t + ' '
s = s + '- ' + self.parentType
return s
|
[
"Evana13G@gmail.com"
] |
Evana13G@gmail.com
|
184f6f49d56c5a523456c098d4c4e773d37d12ae
|
6da945420ecf40a797ae46528d48f2972216164f
|
/Uppgift1 september2020.py
|
f9aa4ae90c64e9eee8b7abbfe90e3d259c97b2b9
|
[] |
no_license
|
callen1991/kyh-practice
|
4e0eb367575fa3fdc5f63ae5383168f98dd7ec37
|
dec016b3f188207ba3e6198c4c0fd12e895ed9f9
|
refs/heads/master
| 2022-12-13T00:53:06.580251
| 2020-09-03T09:08:30
| 2020-09-03T09:08:30
| 291,671,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
Right_answer = "print"
correct_answer = True
input("Vilken funktion används för att skriva ut saker på skärmen? ")
print("Ditt svar: print")
print ("Rätt!")
|
[
"carladam.tornkvist@student.kyh.se"
] |
carladam.tornkvist@student.kyh.se
|
0c19e7e1ce7c1b4fbad6def56d68b9d947efdb72
|
303416ce779a19dd37228d843f66b8466bba06fb
|
/benchmarks/operator_benchmark/pt/groupnorm_test.py
|
c1c638902af2c51c706bebc804c0d01a5f43185b
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
linziyi96/pytorch
|
dc3f5f4c7539a81e3a368c799065a5557af6bbd2
|
c362138f4380c11ddeb07d7e7e34d75300091597
|
refs/heads/master
| 2021-02-10T09:51:31.802098
| 2020-06-25T15:49:04
| 2020-06-25T15:54:05
| 256,582,228
| 4
| 3
|
NOASSERTION
| 2020-04-17T18:38:38
| 2020-04-17T18:38:37
| null |
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for groupnorm operator."""
groupnorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
),
num_groups=(2, 4),
tags=["short"],
)
class GroupNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, num_groups):
self.X = (torch.rand(*dims) - 0.5) * 256
self.num_groups = num_groups
num_channels = dims[1]
self.weight = torch.rand(num_channels, dtype=torch.float)
self.bias = torch.rand(num_channels, dtype=torch.float)
self.eps = 1e-5
def forward(self):
return F.group_norm(
self.X, self.num_groups, weight=self.weight, bias=self.bias, eps=self.eps)
op_bench.generate_pt_test(groupnorm_configs_short, GroupNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
9a1d5bdebdf0044481c583792bccb114722a2f42
|
7ba48f82dac0c19d41d7da51cda3aef5173dd77c
|
/scheduler/models.py
|
b43d43e38b60bb133f1ca80f06c8be369c499f1e
|
[] |
no_license
|
saiful7/Betasmartz
|
09af8b11f816adf3c2dc41ad5a70f170d6dbb981
|
337a79b59498f42294f19e53eea9cd1c8019ee48
|
refs/heads/master
| 2022-05-10T04:14:31.382569
| 2018-11-23T06:50:45
| 2018-11-23T06:50:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,217
|
py
|
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from jsonfield.fields import JSONField
from .constants import SCHEDULE_DELIVERY_CYCLE_CHOICES, SCHEDULE_DELIVERY_DAILY, \
SCHEDULE_TYPE_CHOICES, SCHEDULE_TYPE_LIVE_PORTFOLIO_REPORT, \
SCHEDULE_WEEKDAY_CHOICES
from .utils import should_run_schedule
class Schedule(models.Model):
schedule_type = models.CharField(max_length=64,
choices=SCHEDULE_TYPE_CHOICES,
default=SCHEDULE_TYPE_LIVE_PORTFOLIO_REPORT)
delivery_cycle = models.CharField(max_length=32,
choices=SCHEDULE_DELIVERY_CYCLE_CHOICES,
default=SCHEDULE_DELIVERY_DAILY)
day = models.PositiveIntegerField(null=True, blank=True,
help_text=_('Day of week (0 Mon - 6 Sun), or month (1 - 31), or quarter (1 - 90) based on delivery cycle'))
time = models.TimeField(null=True, blank=True, help_text=_('Time'))
timezone = models.CharField(max_length=32, default='UTC', help_text=_('ISO timezone name'))
meta = JSONField(null=True, blank=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, db_index=True)
object_id = models.PositiveIntegerField(db_index=True)
owner = GenericForeignKey('content_type', 'object_id')
class Meta:
unique_together = ('content_type', 'object_id')
def __str__(self):
return 'Schedule for {}'.format(self.owner)
def should_run_schedule(self):
return should_run_schedule(self)
class SingleScheduleMixin(object):
@cached_property
def schedule(self):
ctype = ContentType.objects.get_for_model(self.__class__)
try:
return Schedule.objects.get(content_type__pk = ctype.id, object_id=self.id)
except:
return None
|
[
"31435513+blueskaie@users.noreply.github.com"
] |
31435513+blueskaie@users.noreply.github.com
|
ba52db75fb59ae3cea092f828953cf7d9751812d
|
da71f159a4e64b04f30438fd8d881886928241a9
|
/ACM/LintCodeInPython/string_to_integer_ii.py
|
622d7865c6191456eb007cb551dacd3718364280
|
[] |
no_license
|
zeroonechange/python
|
0155b980d2a93069a1701ac74ab51c5695388644
|
6cd3f940666657da9a4bba8c5239db84cf39928a
|
refs/heads/master
| 2021-04-18T11:23:16.999942
| 2018-11-18T05:09:31
| 2018-11-18T05:09:31
| 98,433,535
| 0
| 0
| null | 2017-07-26T14:42:26
| 2017-07-26T14:42:26
| null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
# -*- coding: utf-8 -*-
class Solution:
# @param str: a string
# @return an integer
def atoi(self, str):
# write your code here
int_max = 2147483647
int_min = -2147483648
sum = 0
i = 0
str = str.strip()
if not str:
return 0
sign = 1
i = 0
if str[0] == '-':
sign = -1
i = 1
elif str[0] == '+':
i = 1
while i < len(str):
if not str[i].isdigit():
break
digit = int(str[i])
if int_max / 10 > sum:
sum *= 10
else:
return int_max if sign > 0 else int_min
if int_max - digit >= sum:
sum += digit
else:
return int_max if sign > 0 else int_min
i += 1
return sign * sum
|
[
"zeroonechange@gmail.com"
] |
zeroonechange@gmail.com
|
b5fcaf8543eed0b9c5c786f8d3f3d164ee4bd170
|
5d0de63ae64fceb3f26abc6e9e5b0d48a6d0ed86
|
/10000~10999/10950_A+B - 3.py
|
0d5f0fa78f6e17c86a4852c63e5afe8b41cc4fe0
|
[] |
no_license
|
PowerNeverEnds/BaekjoonOnlineJudge_Python
|
5b5cda3a07872f15846190b91c3adf18690abded
|
c1a4aba6c6cbc731a2bc52a73048e32f6a323381
|
refs/heads/main
| 2023-02-13T14:49:53.054363
| 2021-01-01T03:25:25
| 2021-01-01T03:25:25
| 324,266,101
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
import sys
input = sys.stdin.readline
T = int(input())
for _ in range(T):
A,B = map(int,input().split())
print(A+B)
|
[
"PowerNeverEnds8@gmail.com"
] |
PowerNeverEnds8@gmail.com
|
267d9bedd292039dea2fb0b1c5f08c4fa1a2f292
|
15ae2fd8044a3ba6e8fe8004779aaab971c74257
|
/setup.py
|
79c24455e1ff38cc8a6b8e2bb08d68b0861794af
|
[
"MIT"
] |
permissive
|
cbamann/language-model
|
7997997268e6eb54d31af49cc57000336d8d7a21
|
d14410c2302bf42bb771abc4a6b859704847798e
|
refs/heads/master
| 2020-07-24T06:47:31.650362
| 2019-09-11T14:45:33
| 2019-09-11T14:45:33
| 207,834,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,129
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os
# DATA packaged
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
# ML packages
import tensorflow as tf
from keras import backend as K
K.clear_session
# Word embedding
import gensim
#from gensim.models import Word2Vec
from gensim.models.keyedvectors import KeyedVectors
# Text tockenization
from nltk.tokenize import sent_tokenize, word_tokenize
# Miscellaneous
from random import sample
from functools import reduce
from collections import Counter
import itertools # itertools.repeat(x, 3)
###############################################################################
global FOLDER_NN_MODELS, DATA_FOLDER
# Directory of the folder where data and word embeddings are located
PROJECT_FOLDER = "./"
DATA_FOLDER = PROJECT_FOLDER + "data/"
FOLDER_NN_MODELS = PROJECT_FOLDER + "nn_models/"
global NUM_FOR_TEST # How many batches to use for testing
NUM_FOR_TEST = 64*5
# READ AND PREPROCESS LOCAL FILES
exec(open(PROJECT_FOLDER + "read_sentences.py").read())
###############################################################################
# Network parameters
flags = tf.app.flags
FLAGS = flags.FLAGS
# General Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 100, "Dimensionality of word embedding (default: 300)")
tf.flags.DEFINE_integer("vocab_size", 20000, "Vocabulary")
tf.flags.DEFINE_integer("sent_len", 30, "Maximum sentence length")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("clip_gradient", 5, "Clip the norm of the gradients to 5")
tf.flags.DEFINE_float("learning_rate", 0.001, "Default Adam learning rate")
# RNN hyperparameters
tf.flags.DEFINE_integer("hidden_units", 512, "The size of the hidden cell layer")
tf.flags.DEFINE_integer("hidden_units_large", 1024, "The size of the hidden cell layer")
#tf.flags.DEFINE_float('learning_rate', 0.01, 'Learning rate for the optimization algorithms')
# Session Configuraion parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
# TBD
tf.flags.DEFINE_integer("intra_op_parallelism_threads", 4, "Nodes that can use multiple threads to parallelize their execution will schedule the individual pieces into this pool.")
tf.flags.DEFINE_integer("inter_op_parallelism_threads", 4, "All ready nodes are scheduled in this pool.")
tf.flags.DEFINE_integer("intra_op_parallelism_threads_test", 1, "Nodes that can use multiple threads to parallelize their execution will schedule the individual pieces into this pool.")
tf.flags.DEFINE_integer("inter_op_parallelism_threads_test", 1, "All ready nodes are scheduled in this pool.")
session_conf_cluster = tf.ConfigProto(
allow_soft_placement = FLAGS.allow_soft_placement,
log_device_placement = FLAGS.log_device_placement,
intra_op_parallelism_threads = FLAGS.intra_op_parallelism_threads,
inter_op_parallelism_threads = FLAGS.inter_op_parallelism_threads,
)
session_conf_test = tf.ConfigProto(
allow_soft_placement = FLAGS.allow_soft_placement,
log_device_placement = FLAGS.log_device_placement,
intra_op_parallelism_threads = FLAGS.intra_op_parallelism_threads_test,
inter_op_parallelism_threads = FLAGS.inter_op_parallelism_threads_test,
)
###############################################################################
def prepare_batch(df_inp,
batch_size = FLAGS.batch_size,
sent_len = FLAGS.sent_len,
null_elem = vocab_dict["<pad>"]):
"""
prepare standardized batches
Example:
df_inp = train_df_enc[: 46,:]
df_out, added = prepare_batch(df_inp)
"""
df_out, added = df_inp, 0
if len(df_inp) < batch_size:
added = batch_size - len(df_inp)
tmp = null_elem * np.ones((added, FLAGS.sent_len))
df_out = np.concatenate((df_inp, tmp), axis=0)
return (df_out, added)
|
[
"cbamann@student.ethz.ch"
] |
cbamann@student.ethz.ch
|
427a474a63b08ef6de2086ab55b872c0fd16775a
|
bab8e9d07bde113869273e57945d67ee0d6de2a3
|
/apps/document_signatures/managers.py
|
c03e2b5699161607eb0afb734b35a6a2b9ba65b4
|
[
"Apache-2.0"
] |
permissive
|
trillobite/mayan
|
e0df04bf6ac4fe5010a05c2905c5fda0ea851071
|
0b6d30a50de8b0237bdc4ffe29ba65b93366e620
|
refs/heads/master
| 2021-01-10T02:45:15.710159
| 2016-02-19T23:25:56
| 2016-02-19T23:25:56
| 51,715,368
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,568
|
py
|
from __future__ import unicode_literals
import logging
from django.db import models
from django_gpg.exceptions import GPGVerificationError
from django_gpg.runtime import gpg
logger = logging.getLogger(__name__)
class DocumentVersionSignatureManager(models.Manager):
def get_document_signature(self, document_version):
document_signature, created = self.model.objects.get_or_create(
document_version=document_version,
)
return document_signature
def add_detached_signature(self, document_version, detached_signature):
document_signature = self.get_document_signature(
document_version=document_version
)
if document_signature.has_embedded_signature:
raise Exception(
'Document version already has an embedded signature'
)
else:
if document_signature.signature_file:
logger.debug('Existing detached signature')
document_signature.delete_detached_signature_file()
document_signature.signature_file = None
document_signature.save()
document_signature.signature_file = detached_signature
document_signature.save()
def has_detached_signature(self, document_version):
try:
document_signature = self.get_document_signature(
document_version=document_version
)
except ValueError:
return False
else:
if document_signature.signature_file:
return True
else:
return False
def has_embedded_signature(self, document_version):
logger.debug('document_version: %s', document_version)
try:
document_signature = self.get_document_signature(
document_version=document_version
)
except ValueError:
return False
else:
return document_signature.has_embedded_signature
def detached_signature(self, document_version):
document_signature = self.get_document_signature(
document_version=document_version
)
return document_signature.signature_file.storage.open(
document_signature.signature_file.name
)
def verify_signature(self, document_version):
document_version_descriptor = document_version.open(raw=True)
detached_signature = None
if self.has_detached_signature(document_version=document_version):
logger.debug('has detached signature')
detached_signature = self.detached_signature(
document_version=document_version
)
args = (document_version_descriptor, detached_signature)
else:
args = (document_version_descriptor,)
try:
return gpg.verify_file(*args, fetch_key=False)
except GPGVerificationError:
return None
finally:
document_version_descriptor.close()
if detached_signature:
detached_signature.close()
def clear_detached_signature(self, document_version):
document_signature = self.get_document_signature(
document_version=document_version
)
if not document_signature.signature_file:
raise Exception('document doesn\'t have a detached signature')
document_signature.delete_detached_signature_file()
document_signature.signature_file = None
document_signature.save()
|
[
"jparnell0@gmail.com"
] |
jparnell0@gmail.com
|
01e7953945856e906873cc5f3a96960ba79d17c5
|
4a80c8d5ab6af276e3a998643abfa9fdae1cb8cd
|
/RUN_ME.py
|
74fce4d92c22d6d4f8cb869832681afa9ae90025
|
[] |
no_license
|
denjn5/TopicStudy
|
28b64e29f38e88fc040c63b27fee2d0d84d3d36d
|
4e7c22d8fc4caeaf22e4ef8b204ddcbaf24d2637
|
refs/heads/master
| 2021-01-23T04:29:59.845832
| 2017-06-24T05:40:14
| 2017-06-24T05:40:14
| 86,205,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
"""
Running this file makes all of the key stuff happen.
"""
import common
import bible
import tfidf
import topic
import vec_relationships
MAX_TOPICS = 40
SAVE_SOURCE = False
USE_LOCAL_SOURCE=False
def main():
# GET THE TEXTS
bib = bible.Bible("Matthew") # Get properly formatted corpus (a python list of dictionaries).
texts = bib.get_texts(save_source=SAVE_SOURCE, use_local_source=USE_LOCAL_SOURCE)
corpus_name = bib.corpus_name
if len(bib) == 0: # calls bible.__len__
print("No data from get_. Check your args.")
return
# ADD SENTIMENT
common.add_sentiment(texts)
# FIND TOPICS
tb = topic.Topic(corpus_name, texts)
tb.detect_ngram()
tb.prune_topics_and_adopt()
# summary = tb.summarize_texts()
# tfidf.tfidf_tutorial(texts)
# vr = vec_relationships.VecRelationships(corpus_name, texts)
# vr.doc2vec()
# vr.word2vec()
# vr.export_json()
# summary['keySentences'] = fr.key_sentences(summary['text'])
# TODO: send in clean tokens to keywords
# summary['keyWords'] = fr.keywords(summary['text'])
# fr.word2vec(tb.text_token_concat_clean())
# fr.export_json()
# SEND IT TO JSON
tb.export_topics()
common.export_texts(texts, corpus_name)
if __name__ == "__main__":
main()
|
[
"denjn5@gmail.com"
] |
denjn5@gmail.com
|
cdaf1f14474fcbe19d3dea514702fa550dd021d0
|
289e359b1c40a5b434c925267db30bc8d5299807
|
/Lab2/A2_5_py.py
|
db37889a5051ef88fd23d1a4c6a14c0f583f0d3b
|
[] |
no_license
|
KandyKad/Python-3rd-Sem
|
fb960c8e018bb96d173759b10863d776d5574c8f
|
1c54cf903e466f86906828a239b008c4dbe946b0
|
refs/heads/master
| 2021-01-07T11:57:56.355322
| 2020-02-21T16:27:48
| 2020-02-21T16:27:48
| 241,684,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
str = input("Enter a string:")
c,d = 0,0
for i in str:
if i.isdigit():
c=c+1
elif i.isalpha():
d=d+1
print("The number of digits in string are: {}" .format(c))
print("The number of letters in string are: {}" .format(d))
|
[
"noreply@github.com"
] |
KandyKad.noreply@github.com
|
9f285253b5155effbe89ca925465b68c4d1277b8
|
d8c4854d7c4ebd3d643ff50878b6100aa34d2dc3
|
/venv/Lib/site-packages/numpoly/array_function/isclose.py
|
1da61ffe23f6db54a147e591721af120c947898a
|
[] |
no_license
|
quintelabm/PrmFitting
|
35a8b7463d5fddb834eac1901a27a258de0c0da0
|
b384b5bde0cbc2717bea936ddf151df619d8893b
|
refs/heads/master
| 2023-08-06T04:03:16.864147
| 2021-09-17T02:43:45
| 2021-09-17T02:43:45
| 221,717,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,970
|
py
|
"""Return true where two arrays are element-wise equal within a tolerance."""
import numpy
import numpoly
from ..dispatch import implements, simple_dispatch
@implements(numpy.isclose)
def isclose(a, b, rtol=1e-5, atol=1e-8, equal_nan=False):
"""
Return true where two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference `atol`
are added together to compare against the absolute difference between `a`
and `b`.
.. warning:: The default `atol` is not appropriate for comparing numbers
that are much smaller than one (see Notes).
Args:
a, b (numpoly.ndpoly):
Input arrays to compare.
rtol (float):
The relative tolerance parameter (see Notes).
atol (float):
The absolute tolerance parameter (see Notes).
equal_nan (bool):
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns:
(numpy.ndarray):
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
Notes:
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Unlike the built-in `math.isclose`, the above equation is not symmetric
in `a` and `b` -- it assumes `b` is the reference value -- so that
`isclose(a, b)` might be different from `isclose(b, a)`. Furthermore,
the default value of atol is not zero, and is used to determine what
small values should be considered close to zero. The default value is
appropriate for expected values of order unity: if the expected values
are significantly smaller than one, it can result in false positives.
`atol` should be carefully selected for the use case at hand. A zero
value for `atol` will result in `False` if either `a` or `b` is zero.
Examples:
>>> q0, q1 = numpoly.variable(2)
>>> numpoly.isclose([1e10*q0, 1e-7], [1.00001e10*q0, 1e-8])
array([ True, False])
>>> numpoly.isclose([1e10*q0, 1e-8], [1.00001e10*q0, 1e-9])
array([ True, True])
>>> numpoly.isclose([1e10*q0, 1e-8], [1.00001e10*q1, 1e-9])
array([False, True])
>>> numpoly.isclose([q0, numpy.nan],
... [q0, numpy.nan], equal_nan=True)
array([ True, True])
"""
a, b = numpoly.align_polynomials(a, b)
out = numpy.ones(a.shape, dtype=bool)
for key in a.keys:
out &= numpy.isclose(
a[key], b[key], atol=atol, rtol=rtol, equal_nan=equal_nan)
return out
|
[
"46576343+jessica-garbero@users.noreply.github.com"
] |
46576343+jessica-garbero@users.noreply.github.com
|
cc36255e7abfb129b63ea9ff85c20ca44d0d3d5c
|
49c32e44a6e49b72c8454b0d165114808ee4ac90
|
/howard/GrovePi-EE250/ee250/lab08/http_client_example.py
|
e9b0e7672a07b26980456d66001b9b532a86568d
|
[
"MIT"
] |
permissive
|
wenyigao6/ee250
|
85a1991e00e019f3d91f358c2ce0df9695b1ed03
|
651f4a9de4b07df9c6c5903c7fbe22a0eead853d
|
refs/heads/master
| 2021-05-05T05:33:58.976155
| 2019-03-07T04:21:46
| 2019-03-07T04:21:46
| 118,688,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
import requests
import json
from datetime import datetime
import time
"""This file illustrates the typical calls you need from an http client.
More specifically, in your signal_processing.py code, you should have a
request.post() call everytime a movement is classified by your algorithm."""
if __name__ == '__main__':
# This header sets the HTTP request's mimetype to `application/json`. This
# means the payload of the HTTP message will be formatted as a json ojbect
hdr = {
'Content-Type': 'application/json',
'Authorization': None #not using HTTP secure
}
# The payload of our message starts as a simple dictionary. Before sending
# the HTTP message, we will format this into a json object
payload = {
'time': str(datetime.now()),
'event': "Moving Right"
}
while True:
# Send an HTTP POST message and block until a response is given.
# Note: requests() is NOT the same thing as request() under the flask
# library.
response = requests.post("http://0.0.0.0:5000/post-event", headers = hdr,
data = json.dumps(payload))
# Print the json object from the HTTP response
print(response.json())
time.sleep(2)
|
[
"wenyigao@usc.edu"
] |
wenyigao@usc.edu
|
8d1adff878356ce0b1320b63deda9a292ca26375
|
191a7f83d964f74a2b3c7faeb4fc47d9c63d521f
|
/.history/main_20210529115100.py
|
3a9e3953cc01895bd407acc93e9b5d1586d666ea
|
[] |
no_license
|
AndreLiu1225/Kinder-Values-Survey
|
2a317feee8d5b17c27da2b2116742656e35d8ab9
|
090c27da0c822abb7dfc0ec6e13ae1b3dcb7bbf3
|
refs/heads/master
| 2023-05-03T00:26:00.481423
| 2021-06-04T03:24:19
| 2021-06-04T03:24:19
| 371,989,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,910
|
py
|
from flask import Flask, render_template, redirect, url_for, flash, request
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, TextField, SubmitField, IntegerField, SelectField, RadioField
from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError
import datetime
import matplotlib.pyplot as plt
app = Flask(__name__)
app.config['SECRET_KEY'] = "0c8973c8a5e001bb0c816a7b56c84f3a"
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///site.db"
db = SQLAlchemy(app)
class Survey(db.Model):
age = db.Column(db.Integer, nullable=False, primary_key=True)
email = db.Column(db.String(50), unique=False, nullable=False)
profession = db.Column(db.String(50), nullable=False)
power = db.Column(db.Integer, nullable=False)
tradition = db.Column(db.Integer, nullable=False)
achievement = db.Column(db.Integer, nullable=False)
stimulation = db.Column(db.Integer, nullable=False)
hedonism = db.Column(db.Integer, nullable=False)
conformity = db.Column(db.Integer, nullable=False)
security = db.Column(db.Integer, nullable=False)
self_direction = db.Column(db.Integer, nullable=False)
benevolence = db.Column(db.Integer, nullable=False)
universalism = db.Column(db.Integer, nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __repr__(self):
return f"Survey('{self.age}', '{self.name}', '{self.date_posted}')"
class MCQ(FlaskForm):
email = StringField("What is your email?", validators=[DataRequired(), Email(message=('Not a valid email address')), Length(max=50)])
age = IntegerField("Please enter your age", validators=[DataRequired()])
profession = StringField("What is your profession?", validators=[DataRequired(), Length(max=30)])
# Self-Enhancement
power = IntegerField("Do you desire a higher social status and dominance over others? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
hedonism = IntegerField("Is personal gratification the most important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
achievement = IntegerField("Is achievement according to social standards important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Conservation
tradition = IntegerField("Do you care about preserving traditions? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
conformity = IntegerField("Do you think restraint of actions against social norms is important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
security = IntegerField("Do you value safety, harmony and stability of society, of relationships, and of self? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Openness to change
stimulation = IntegerField("Do you prefer novel and exciting challenges in life? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
self_direction = IntegerField("Do you think independent thought and action are important (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Self-transcendence
benevolence = IntegerField("Are preserving and enhancing the welfare of your friends and family the most important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
universalism = IntegerField("I find it important to understand, tolerate, appreciate and protect all ethnicities and people. (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
submit = SubmitField("Submit")
@app.route('/', methods=['POST','GET'])
def values_quiz():
form = MCQ()
if form.validate_on_submit():
post = Survey(age=form.age.data, email=form.email.data, profession=form.profession.data, power=form.power.data,
tradition=form.tradition.data, achievement=form.achievement.data, stimulation=form.stimulation.data,
hedonism=form.hedonism.data, conformity=form.conformity.data, self_direction=form.self_direction.data,
benevolence=form.benevolence.data, universalism=form.universalism.data, security=form.security.data)
# if Survey.is_email_in_database(form.email.data):
# flash(f"The user with {form.email.data} has already filled the survey", "danger")
db.session.add(post)
db.session.commit()
flash(f'Survey is completed by {form.email.data}', 'success')
return redirect(url_for('data_dashboard'))
else:
flash('Ensure all questions are answered correctly', 'warning')
return render_template('MCQ.html', form=form)
@app.route('/results', methods=['GET'])
def data_dashboard():
power = request.form['power']
tradition = request.form['tradition']
achievement = request.form['achievement']
stimulation = request.form['stimulation']
hedonism = request.form['hedonism']
conformity = request.form['conformity']
security = request.form['security']
self_direction = request.form['self_direction']
benevolence = request.form['benevolence']
universalism = request.form['universalism']
values = [power, tradition, achievement, stimulation, hedonism, conformity, security, self_direction, benevolence, universalism]
values_labels = ['Openness to Change', 'Self-Transcendence',
'Conservation', 'Self-Enchancement']
openness = [hedonism, stimulation, self_direction]
self_enhancement = [hedonism, achievement, power]
conservation = [tradition, conformity, security]
self_trans = [universalism, benevolence]
total_sum = sum(values)
open_sum = round(sum(openness)/total_sum*100)
enhance_sum = round(sum(self_enhancement)/total_sum*100)
trans_sum = round(sum(self_trans)/total_sum*100)
cons_sum = round(sum(conservation)/total_sum*100)
sum_v = [open_sum, enhance_sum, trans_sum, cons_sum]
# initiating the range of y ticks
ran = [20,40,60,80,100]
plt.xticks(ran, values_labels)
# Calling bar plot function
plt.bar(ran, sum_v)
plt.title('Percentage obtained on each dynamic values')
plt.ylabel('Percentage')
plt.xlabel('Dynamic value types')
return render_template('data_dashboard.html', image=plt.show())
if __name__ == "__main__":
app.run(debug=True)
|
[
"andreliu2004@gmail.com"
] |
andreliu2004@gmail.com
|
a3156288c68ba13e56fa02089ab9f1f432f33e33
|
05df89f9b5354f1b459d020bd020100a08b9b30b
|
/data/util.py
|
9767a15c05ce7eb6cd911c6f59a8a7babdad94f2
|
[] |
no_license
|
GuoDonger/graduation
|
9e8da55f6ee97fd7648630ebdf9e36ff74910690
|
bd4eb9eae9b6e58d0478a4f4c18982f6695e33e3
|
refs/heads/master
| 2020-05-05T14:03:40.459133
| 2019-05-06T00:48:49
| 2019-05-06T00:48:49
| 179,194,465
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
from urllib import request
from lxml import etree
import pymysql
HOST = '123.56.23.97'
PORT = 3306
USER = 'root'
PASSWORD = '111111'
CHARSET = 'utf8'
DATABASE = 'wumai'
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36',
}
connect = pymysql.connect(host=HOST, port=PORT, user=USER, password=PASSWORD, database=DATABASE, charset=CHARSET)
cursor = connect.cursor()
url = 'http://www.pm25.in/'
response = request.Request(url=url, headers=HEADERS)
result = request.urlopen(response).read().decode('utf-8')
tree = etree.HTML(result)
uls = tree.xpath('//div[@class="all"]//div[@class="bottom"]/ul')
for ul in uls:
initial = ul.xpath('.//b/text()')[0]
city = ul.xpath('.//li/a/text()')
word = ul.xpath('.//li/a/@href')
cities = list(zip(city, word))
for city in cities:
sql = 'insert into data_city(initial,city,word) values(%s,%s,%s);'
result = cursor.execute(sql, [initial, city[0], city[1]])
connect.commit()
print('success')
|
[
"18235445605@163.com"
] |
18235445605@163.com
|
eb8e22f6495242dc6d530e2652a68d046074bfe0
|
2bc36cf3b249407015685726d964f7989c28e974
|
/articles/migrations/0004_article_is_draft.py
|
a890b0c4ce6c0bb0c6d9d29be1932697429f9b4c
|
[] |
no_license
|
AbhishekAmin/severus
|
5ce3f587189bb21ae15067be41bd5f9b549f4d33
|
e60ad55d3af779cdc1ad71f1026ed9ba60cea729
|
refs/heads/master
| 2023-04-29T23:17:31.949837
| 2019-08-20T02:57:45
| 2019-08-20T02:57:45
| 202,535,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# Generated by Django 2.2.4 on 2019-08-13 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0003_article_created_by'),
]
operations = [
migrations.AddField(
model_name='article',
name='is_draft',
field=models.BooleanField(default=True),
),
]
|
[
"amin.abhi297@gmail.com"
] |
amin.abhi297@gmail.com
|
4688c862adbcad7d3cf9e5943df3d12fbd9f594c
|
65d40eeeb94485fb981c138e89ccfadad0387748
|
/NMF.py
|
7dcefcc92a25fdff72b47c307a2a30e4f8359240
|
[] |
no_license
|
johnforrest/MachineLearning
|
14ed7dad4d1da6ebf5db88e3154acb3dee65468b
|
42a0877a950109fd5e8fce36b18b948ebaab4a9d
|
refs/heads/master
| 2021-05-17T02:21:12.077236
| 2019-04-06T09:18:16
| 2019-04-06T09:18:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 17:23:41 2019
@author: Administrator
"""
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn import decomposition
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=RandomState(0))
faces = dataset.data
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest', vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.94, 0.04, 0.)
plot_gallery("First centered Olivetti faces", faces[:n_components])
estimators = [
('Eigenfaces - PCA using randomized SVD',
decomposition.PCA(n_components=6,whiten=True)),
('Non-negative components - NMF',
decomposition.NMF(n_components=6, init='nndsvda', tol=5e-3))
]
for name, estimator in estimators:
print("Extracting the top %d %s..." % (n_components, name))
print(faces.shape)
estimator.fit(faces)
components_ = estimator.components_
plot_gallery(name, components_[:n_components])
plt.show()
|
[
"870407139@qq.com"
] |
870407139@qq.com
|
ccb560c882140ca894d535fca4ac8ab4e79aaa5f
|
7f53c41182a6d9c5da0c58a15716f01725ac0316
|
/2019_1_9_public_test/q.py
|
6e818a749b82cd297051abf4cbcbe60ceacce469
|
[] |
no_license
|
1286211699/2019_1_23_pub_test
|
f6b7ee089e78ad673c56b3cd4ccee9b2154581f6
|
3aed7f4941353d48bf3407e9d30ac85c83b0ed7b
|
refs/heads/master
| 2022-12-19T14:41:15.264627
| 2019-03-21T09:46:08
| 2019-03-21T09:46:08
| 167,125,649
| 1
| 0
| null | 2022-12-08T01:33:30
| 2019-01-23T05:54:52
|
HTML
|
UTF-8
|
Python
| false
| false
| 628
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/9 18:05
# @Author : for
# @File : q.py
# @Software: PyCharm
url = 'http://upos-hz-mirrorks3.acgvideo.com/dspxcode/m190109ws2e8185t3erk002bimbs16gx-1-56.mp4?um_deadline=1547037041&rate=500000&oi=3683615411&um_sign=1e6832d451fcacd171232b97f2609daf&gen=dsp&wsTime=1547037041&platform=html5'
from urllib import request
headers = {
'User - Agent': 'Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 71.0.3578.80 Safari / 537.36',
}
response = request.Request(url=url,headers=headers)
res = request.urlopen(response)
print(res)
|
[
"1286211699@qq.com"
] |
1286211699@qq.com
|
d6e84a3e0ce87ad21e7f7c0a868cb25759fb1d64
|
6cdb8934e6793487a75d6a32c3b2d4c24e8aa120
|
/cluster/link_check.py
|
458ae9b92d7cd363de5850698f6b45db819d48dc
|
[] |
no_license
|
FGPullen/shiny-couscous
|
db2a23a96492ad46fa5671f06cab26cf6ac7dfb7
|
15c33f930f18cff978746c818869a34243c480a0
|
refs/heads/master
| 2021-01-01T06:46:15.373554
| 2017-07-17T17:34:21
| 2017-07-17T17:34:21
| 97,504,924
| 0
| 2
| null | 2017-09-10T09:44:11
| 2017-07-17T17:41:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,238
|
py
|
from pages import allPages
class link_analyzer:
def __init__(self, data_path,dataset):
self.pages = allPages([data_path])
self.dataset = dataset
prefix = data_path
self.file_set = []
for page in self.pages.pages:
self.file_set.append(page.path)
#print file_set
def getAnchor(self):
self.right_list = []
self.total_list = []
self.percentage_list = []
for page in self.pages.pages:
right = 0
total = 0
link_dict = page.getAnchor()
for key,link in link_dict.iteritems():
if self.intraJudge(link):
for item in self.file_set:
if link in item:
right += 1
print link
break
total += 1
if right ==0:
print 0.0
self.percentage_list.append(0.0)
else:
link_dict["percentage"] = float(right)/float(total)
self.percentage_list.append(float(right)/float(total))
self.right_list.append(right)
self.total_list.append(total)
print "average percentage is " + str(sum(self.percentage_list)/float(len(self.percentage_list)))
print "average inlink number is " + str(sum(self.total_list)/float(len(self.total_list)))
def intraJudge(self,url):
# oulink with http or symbol like # and /
# medhelp start from http://www.medhelp.org/user_groups/list and prefix http://www.medhelp.org/
if self.dataset == "stackexchange":
if "http" in url:
return 0
elif "//" in url:
return 0
elif url=="#" or url=="?lastactivity":
return 0
else:
return 1
elif self.dataset == "rottentomatoes":
if len(url)==1 or "http" in url:
if "rottentomatoes.com" in url:
return 1
else:
return 0
elif url[0:2]=="//":
return 0
else:
return 1
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("datasets", choices=["zhihu","stackexchange","rottentomatoes","medhelp","asp","all"], help="the dataset for experiments")
args = parser.parse_args()
if args.datasets!="all":
data_path = "../Crawler/test_data/" + args.datasets + "/"
l = link_analyzer(data_path,args.datasets)
l.getAnchor()
else:
for data in ["zhihu","stackexchange","rottentomatoes","medhelp","asp"]:
data_path = "../Crawler/test_data/" + data + "/"
l = link_analyzer(data_path)
|
[
"xky0714@163.com"
] |
xky0714@163.com
|
eefe06af9a2f5bcf8c275f7eee55686b99bee991
|
b82910ffd88fd90f9241564a7973c3a3cf46b3f7
|
/seattle/vxlan_tool.py
|
34c8fd5451500e507707f361d87c6890ec92e5dd
|
[] |
no_license
|
jlausuch/sfc-work
|
fc8ea9c8952999e788d83d701df9b6925933c4f2
|
f7fcd9648738953f96fab233e9eba20eb191d514
|
refs/heads/master
| 2021-06-05T22:51:19.328188
| 2016-08-23T14:14:36
| 2016-08-23T14:14:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43,535
|
py
|
#
# Copyright (c) 2015 Intel, Inc., Cisco Systems, Inc. and others. All rights
# reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution,
# and is available at http://www.eclipse.org/legal/epl-v10.html
__author__ = "Yi Yang, Reinaldo Penno"
__copyright__ = "Copyright(c) 2015, Intel, Inc. and Cisco Systems, Inc."
__version__ = "0.2"
__email__ = "yi.y.yang@intel.com, rapenno@gmail.com"
__status__ = "beta"
import socket, sys
import pdb
import argparse
from struct import *
from ctypes import Structure, c_ubyte, c_ushort, c_uint
NSH_TYPE1_LEN = 0x6
NSH_MD_TYPE1 = 0x1
NSH_VERSION1 = int('00', 2)
NSH_NEXT_PROTO_IPV4 = int('00000001', 2)
NSH_NEXT_PROTO_OAM = int('00000100', 2)
NSH_NEXT_PROTO_ETH = int('00000011', 2)
NSH_FLAG_ZERO = int('00000000', 2)
IP_HEADER_LEN = 5
IPV4_HEADER_LEN_BYTES = 20
IPV4_VERSION = 4
IPV4_PACKET_ID = 54321
IPV4_TTL = 255
IPV4_TOS = 0
IPV4_IHL_VER = (IPV4_VERSION << 4) + IP_HEADER_LEN
UDP_HEADER_LEN_BYTES = 8
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class VXLAN(Structure):
_fields_ = [('flags', c_ubyte),
('reserved', c_uint, 16),
('next_protocol', c_uint, 8),
('vni', c_uint, 24),
('reserved2', c_uint, 8)]
def __init__(self, flags=int('00001000', 2), reserved=0, next_protocol=0,
vni=int('111111111111111111111111', 2), reserved2=0, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.flags = flags
self.reserved = reserved
self.next_protocol = next_protocol
self.vni = vni
self.reserved2 = reserved2
header_size = 8
def build(self):
return pack('!B H B I',
self.flags,
self.reserved,
self.next_protocol,
(self.vni << 8) + self.reserved2)
class ETHHEADER(Structure):
_fields_ = [('dmac0', c_ubyte),
('dmac1', c_ubyte),
('dmac2', c_ubyte),
('dmac3', c_ubyte),
('dmac4', c_ubyte),
('dmac5', c_ubyte),
('smac0', c_ubyte),
('smac1', c_ubyte),
('smac2', c_ubyte),
('smac3', c_ubyte),
('smac4', c_ubyte),
('smac5', c_ubyte),
('ethertype0', c_ubyte),
('ethertype1', c_ubyte)]
header_size = 14
def build(self):
return pack('!B B B B B B B B B B B B B B',
self.dmac0,
self.dmac1,
self.dmac2,
self.dmac3,
self.dmac4,
self.dmac5,
self.smac0,
self.smac1,
self.smac2,
self.smac3,
self.smac4,
self.smac5,
self.ethertype0,
self.ethertype1)
class BASEHEADER(Structure):
"""
Represent a NSH base header
"""
_fields_ = [('version', c_ushort, 2),
('flags', c_ushort, 8),
('length', c_ushort, 6),
('md_type', c_ubyte),
('next_protocol', c_ubyte),
('service_path', c_uint, 24),
('service_index', c_uint, 8)]
def __init__(self, service_path=1, service_index=255, version=NSH_VERSION1, flags=NSH_FLAG_ZERO,
length=NSH_TYPE1_LEN, md_type=NSH_MD_TYPE1, proto=NSH_NEXT_PROTO_ETH, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.version = version
self.flags = flags
self.length = length
self.md_type = md_type
self.next_protocol = proto
self.service_path = service_path
self.service_index = service_index
header_size = 8
def build(self):
return pack('!H B B I',
(self.version << 14) + (self.flags << 6) + self.length,
self.md_type,
self.next_protocol,
(self.service_path << 8) + self.service_index)
class CONTEXTHEADER(Structure):
_fields_ = [('network_platform', c_uint),
('network_shared', c_uint),
('service_platform', c_uint),
('service_shared', c_uint)]
header_size = 16
def __init__(self, network_platform=0x00, network_shared=0x00, service_platform=0x00, service_shared=0x00, *args,
**kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.network_platform = network_platform
self.network_shared = network_shared
self.service_platform = service_platform
self.service_shared = service_shared
def build(self):
return pack('!I I I I',
self.network_platform,
self.network_shared,
self.service_platform,
self.service_shared)
class IP4HEADER(Structure):
_fields_ = [
('ip_ihl', c_ubyte),
('ip_ver', c_ubyte),
('ip_tos', c_ubyte),
('ip_tot_len', c_ushort),
('ip_id', c_ushort),
('ip_frag_offset', c_ushort),
('ip_ttl', c_ubyte),
('ip_proto', c_ubyte),
('ip_chksum', c_ushort),
('ip_saddr', c_uint),
('ip_daddr', c_uint)]
header_size = 20
def build(self):
ip_header_pack = pack('!B B H H H B B H I I', IPV4_IHL_VER, self.ip_tos, self.ip_tot_len, self.ip_id,
self.ip_frag_offset, self.ip_ttl, self.ip_proto, self.ip_chksum, self.ip_saddr,
self.ip_daddr)
return ip_header_pack
def set_ip_checksum(self, checksum):
self.ip_chksum = checksum
class UDPHEADER(Structure):
"""
Represents a UDP header
"""
_fields_ = [
('udp_sport', c_ushort),
('udp_dport', c_ushort),
('udp_len', c_ushort),
('udp_sum', c_ushort)]
header_size = 8
def build(self):
udp_header_pack = pack('!H H H H', self.udp_sport, self.udp_dport, self.udp_len,
self.udp_sum)
return udp_header_pack
class PSEUDO_TCPHEADER(Structure):
""" Pseudoheader used in the TCP checksum."""
def __init__(self):
self.src_ip = 0
self.dest_ip = 0
self.zeroes = 0
self.protocol = 6
self.length = 0
def build(self):
""" Create a string from a pseudoheader """
p_tcp_header_pack = pack('!I I B B H', self.src_ip, self.dest_ip,
self.zeroes, self.protocol, self.length)
return p_tcp_header_pack
class PSEUDO_UDPHEADER(Structure):
""" Pseudoheader used in the UDP checksum."""
def __init__(self):
self.src_ip = 0
self.dest_ip = 0
self.zeroes = 0
self.protocol = 17
self.length = 0
def build(self):
""" Create a string from a pseudoheader """
p_udp_header_pack = pack('!I I B B H', self.src_ip, self.dest_ip,
self.zeroes, self.protocol, self.length)
return p_udp_header_pack
class TCPHEADER(Structure):
"""
Represents a TCP header
"""
_fields_ = [
('tcp_sport', c_ushort),
('tcp_dport', c_ushort),
('tcp_seq', c_uint),
('tcp_ack', c_uint),
('tcp_offset', c_ubyte),
('tcp_flags', c_ubyte),
('tcp_window', c_ushort),
('tcp_checksum', c_ushort),
('tcp_urgent', c_ushort),]
header_size = 20
def build(self):
tcp_header_pack = pack('!H H I I B B H H H', self.tcp_sport, self.tcp_dport,
self.tcp_seq, self.tcp_ack, self.tcp_offset,
self.tcp_flags, self.tcp_window, self.tcp_checksum,
self.tcp_urgent)
return tcp_header_pack
class ICMPHEADER(Structure):
"""
Represents a ICMP header
"""
_fields_ = [
('icmp_type', c_ubyte),
('icmp_code', c_ubyte),
('icmp_checksum', c_ushort),
('icmp_unused', c_ushort),
('icmp_MTU', c_ushort),
('icmp_iphead', c_uint)]
header_size = 12
def build(self):
icmp_header_pack = pack('!B B H H H I', self.icmp_type, self.icmp_code,
self.icmp_checksum, self.icmp_unused, self.icmp_MTU,
self.icmp_iphead)
return icmp_header_pack
def decode_eth(payload, offset, eth_header_values):
eth_header = payload[offset:(offset+14)]
_header_values = unpack('!B B B B B B B B B B B B B B', eth_header)
eth_header_values.dmac0 = _header_values[0]
eth_header_values.dmac1 = _header_values[1]
eth_header_values.dmac2 = _header_values[2]
eth_header_values.dmac3 = _header_values[3]
eth_header_values.dmac4 = _header_values[4]
eth_header_values.dmac5 = _header_values[5]
eth_header_values.smac0 = _header_values[6]
eth_header_values.smac1 = _header_values[7]
eth_header_values.smac2 = _header_values[8]
eth_header_values.smac3 = _header_values[9]
eth_header_values.smac4 = _header_values[10]
eth_header_values.smac5 = _header_values[11]
eth_header_values.ethertype0 = _header_values[12]
eth_header_values.ethertype1 = _header_values[13]
def decode_ip(payload, ip_header_values):
ip_header = payload[14:34]
_header_values = unpack('!B B H H H B B H I I', ip_header)
ip_header_values.ip_ihl = _header_values[0] & 0x0F
ip_header_values.ip_ver = _header_values[0] >> 4
ip_header_values.ip_tos = _header_values[1]
ip_header_values.ip_tot_len = _header_values[2]
ip_header_values.ip_id = _header_values[3]
ip_header_values.ip_frag_offset = _header_values[4]
ip_header_values.ip_ttl = _header_values[5]
ip_header_values.ip_proto = _header_values[6]
ip_header_values.ip_chksum = _header_values[7]
ip_header_values.ip_saddr = _header_values[8]
ip_header_values.ip_daddr = _header_values[9]
def decode_udp(payload, udp_header_values):
udp_header = payload[34:42]
_header_values = unpack('!H H H H', udp_header)
udp_header_values.udp_sport = _header_values[0]
udp_header_values.udp_dport = _header_values[1]
udp_header_values.udp_len = _header_values[2]
udp_header_values.udp_sum = _header_values[3]
def decode_tcp(payload, offset, tcp_header_values):
tcp_header = payload[(108+offset):(128+offset)]
_header_values = unpack('!H H I I B B H H H', tcp_header)
tcp_header_values.tcp_sport = _header_values[0]
tcp_header_values.tcp_dport = _header_values[1]
tcp_header_values.tcp_seq = _header_values[2]
tcp_header_values.tcp_ack = _header_values[3]
tcp_header_values.tcp_offset = _header_values[4]
tcp_header_values.tcp_flags = _header_values[5]
tcp_header_values.tcp_window = _header_values[6]
tcp_header_values.tcp_checksum = _header_values[7]
tcp_header_values.tcp_urgent = _header_values[8]
def decode_internal_ip(payload, offset, ip_header_values):
ip_header = payload[(88+offset):(108+offset)]
_header_values = unpack('!B B H H H B B H I I', ip_header)
ip_header_values.ip_ihl = _header_values[0] & 0x0F
ip_header_values.ip_ver = _header_values[0] >> 4
ip_header_values.ip_tos = _header_values[1]
ip_header_values.ip_tot_len = _header_values[2]
ip_header_values.ip_id = _header_values[3]
ip_header_values.ip_frag_offset = _header_values[4]
ip_header_values.ip_ttl = _header_values[5]
ip_header_values.ip_proto = _header_values[6]
ip_header_values.ip_chksum = _header_values[7]
ip_header_values.ip_saddr = _header_values[8]
ip_header_values.ip_daddr = _header_values[9]
def decode_vxlan(payload, vxlan_header_values):
"""Decode the VXLAN header for a received packets"""
vxlan_header = payload[42:50]
_header_values = unpack('!B H B I', vxlan_header)
vxlan_header_values.flags = _header_values[0]
vxlan_header_values.reserved = _header_values[1]
vxlan_header_values.next_protocol = _header_values[2]
vni_rsvd2 = _header_values[3]
vxlan_header_values.vni = vni_rsvd2 >> 8
vxlan_header_values.reserved2 = vni_rsvd2 & 0x000000FF
def decode_nsh_baseheader(payload, offset, nsh_base_header_values):
"""Decode the NSH base headers for a received packets"""
base_header = payload[offset:(offset+8)]
_header_values = unpack('!H B B I', base_header)
start_idx = _header_values[0]
nsh_base_header_values.md_type = _header_values[1]
nsh_base_header_values.next_protocol = _header_values[2]
path_idx = _header_values[3]
nsh_base_header_values.version = start_idx >> 14
nsh_base_header_values.flags = start_idx >> 6
nsh_base_header_values.length = start_idx >> 0
nsh_base_header_values.service_path = path_idx >> 8
nsh_base_header_values.service_index = path_idx & 0x000000FF
def decode_nsh_contextheader(payload, offset, nsh_context_header_values):
"""Decode the NSH context headers for a received packet"""
context_header = payload[offset:(offset+16)]
_header_values = unpack('!I I I I', context_header)
nsh_context_header_values.network_platform = _header_values[0]
nsh_context_header_values.network_shared = _header_values[1]
nsh_context_header_values.service_platform = _header_values[2]
nsh_context_header_values.service_shared = _header_values[3]
def compute_internet_checksum(data):
"""
Function for Internet checksum calculation. Works
for both IP and UDP.
"""
checksum = 0
n = len(data) % 2
# data padding
pad = bytearray('', encoding='UTF-8')
if n == 1:
pad = bytearray(b'\x00')
# for i in range(0, len(data + pad) - n, 2):
for i in range(0, len(data)-1, 2):
checksum += (ord(data[i]) << 8) + (ord(data[i + 1]))
if n == 1:
checksum += (ord(data[len(data)-1]) << 8) + (pad[0])
while checksum >> 16:
checksum = (checksum & 0xFFFF) + (checksum >> 16)
checksum = ~checksum & 0xffff
return checksum
# Implements int.from_bytes(s, byteorder='big')
def int_from_bytes(s):
return sum(ord(c) << (i * 8) for i, c in enumerate(s[::-1]))
def build_ethernet_header_swap(myethheader):
""" Build Ethernet header """
newethheader=ETHHEADER()
newethheader.smac0 = myethheader.dmac0
newethheader.smac1 = myethheader.dmac1
newethheader.smac2 = myethheader.dmac2
newethheader.smac3 = myethheader.dmac3
newethheader.smac4 = myethheader.dmac4
newethheader.smac5 = myethheader.dmac5
newethheader.dmac0 = myethheader.smac0
newethheader.dmac1 = myethheader.smac1
newethheader.dmac2 = myethheader.smac2
newethheader.dmac3 = myethheader.smac3
newethheader.dmac4 = myethheader.smac4
newethheader.dmac5 = myethheader.smac5
newethheader.ethertype0 = myethheader.ethertype0
newethheader.ethertype1 = myethheader.ethertype1
return newethheader
def build_ipv4_header(ip_tot_len, proto, src_ip, dest_ip, swap_ip):
"""
Builds a complete IP header including checksum
"""
if src_ip:
ip_saddr = socket.inet_aton(src_ip)
else:
ip_saddr = socket.inet_aton(socket.gethostbyname(socket.gethostname()))
if (swap_ip == True):
new_ip_daddr = int_from_bytes(ip_saddr)
new_ip_saddr = socket.inet_aton(dest_ip)
new_ip_saddr = int_from_bytes(new_ip_saddr)
else:
new_ip_saddr = int_from_bytes(ip_saddr)
new_ip_daddr = int_from_bytes(socket.inet_aton(dest_ip))
ip_header = IP4HEADER(IP_HEADER_LEN, IPV4_VERSION, IPV4_TOS, ip_tot_len, IPV4_PACKET_ID, 0, IPV4_TTL, proto, 0, new_ip_saddr, new_ip_daddr)
checksum = compute_internet_checksum(ip_header.build())
ip_header.set_ip_checksum(checksum)
ip_header_pack = ip_header.build()
return ip_header, ip_header_pack
def build_tcp_reset(mytcpheader, ip_header):
"""
Building an TCP header requires fields from
IP header in order to perform checksum calculation
"""
# build TCP header with sum = 0
tcp_header = TCPHEADER()
tcp_header.tcp_flags = 20
tcp_header.tcp_offset = 80
source_port = mytcpheader.tcp_sport
tcp_header.tcp_sport = mytcpheader.tcp_dport
tcp_header.tcp_dport = source_port
tcp_header.tcp_window = 0
tcp_header.tcp_urgent = 0
ack = mytcpheader.tcp_seq + 1
tcp_header.tcp_seq = 0
tcp_header.tcp_ack = ack
tcp_header.tcp_checksum = 0
tcp_header_pack = tcp_header.build()
# build Pseudo Header
p_header = PSEUDO_TCPHEADER()
p_header.dest_ip = ip_header.ip_daddr
p_header.src_ip = ip_header.ip_saddr
p_header.length = 20
p_header_pack = p_header.build()
tcp_checksum = compute_internet_checksum(p_header_pack + tcp_header_pack)
tcp_header.tcp_checksum = tcp_checksum
# pack TCP header again but this time with checksum
tcp_header_pack = tcp_header.build()
return tcp_header, tcp_header_pack
def build_udp_header(src_port, dest_port, ip_header, data):
"""
Building an UDP header requires fields from
IP header in order to perform checksum calculation
"""
# build UDP header with sum = 0
udp_header = UDPHEADER(src_port, dest_port, UDP_HEADER_LEN_BYTES + len(data), 0)
udp_header_pack = udp_header.build()
# build Pseudo Header
p_header = PSEUDO_UDPHEADER()
p_header.dest_ip = ip_header.ip_daddr
p_header.src_ip = ip_header.ip_saddr
p_header.length = udp_header.udp_len
p_header_pack = p_header.build()
udp_checksum = compute_internet_checksum(p_header_pack + udp_header_pack + data)
udp_header.udp_sum = udp_checksum
# pack UDP header again but this time with checksum
udp_header_pack = udp_header.build()
return udp_header, udp_header_pack
def build_udp_packet(src_ip, dest_ip, src_port, dest_port, data, swap_ip):
"""
Data needs to encoded as Python bytes. In the case of strings
this means a bytearray of an UTF-8 encoding
"""
total_len = len(data) + IPV4_HEADER_LEN_BYTES + UDP_HEADER_LEN_BYTES
# First we build the IP header
ip_header, ip_header_pack = build_ipv4_header(total_len, socket.IPPROTO_UDP, src_ip, dest_ip, swap_ip)
# Build UDP header
udp_header, udp_header_pack = build_udp_header(src_port, dest_port, ip_header, data)
udp_packet = ip_header_pack + udp_header_pack + data
return udp_packet
def getmac(interface):
try:
mac = open('/sys/class/net/'+interface+'/address').readline()
except:
mac = None
return mac
def print_ethheader(ethheader):
print("Eth Dst MAC: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x, Src MAC: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x, Ethertype: 0x%.4x" % (ethheader.dmac0, ethheader.dmac1, ethheader.dmac2, ethheader.dmac3, ethheader.dmac4, ethheader.dmac5, ethheader.smac0, ethheader.smac1, ethheader.smac2, ethheader.smac3, ethheader.smac4, ethheader.smac5, (ethheader.ethertype0<<8) | ethheader.ethertype1))
def print_ipheader(ipheader):
print("IP Version: %s IP Header Length: %s, TTL: %s, Protocol: %s, Src IP: %s, Dst IP: %s" % (ipheader.ip_ver, ipheader.ip_ihl, ipheader.ip_ttl, ipheader.ip_proto, str(socket.inet_ntoa(pack('!I', ipheader.ip_saddr))), str(socket.inet_ntoa(pack('!I', ipheader.ip_daddr)))))
def print_udpheader(udpheader):
print ("UDP Src Port: %s, Dst Port: %s, Length: %s, Checksum: %s" % (udpheader.udp_sport, udpheader.udp_dport, udpheader.udp_len, udpheader.udp_sum))
def print_vxlanheader(vxlanheader):
print("VxLAN/VxLAN-gpe VNI: %s, flags: %.2x, Next: %s" % (vxlanheader.vni, vxlanheader.flags, vxlanheader.next_protocol))
def print_nsh_baseheader(nshbaseheader):
print("NSH base nsp: %s, nsi: %s" % (nshbaseheader.service_path, nshbaseheader.service_index))
def print_nsh_contextheader(nshcontextheader):
print("NSH context c1: 0x%.8x, c2: 0x%.8x, c3: 0x%.8x, c4: 0x%.8x" % (nshcontextheader.network_platform, nshcontextheader.network_shared, nshcontextheader.service_platform, nshcontextheader.service_shared))
def main():
parser = argparse.ArgumentParser(description='This is a VxLAN/VxLAN-gpe + NSH dump and forward tool, you can use it to dump and forward VxLAN/VxLAN-gpe + NSH packets, it can also act as an NSH-aware SF for SFC test when you use --forward option, in that case, it will automatically decrease nsi by one.', prog='vxlan_tool.py')
parser.add_argument('-i', '--interface',
help='Specify the interface to listen')
parser.add_argument('-d', '--do', choices=['dump', 'forward', 'send'],
help='dump/foward/send VxLAN/VxLAN-gpe + NSH or Eth + NSH packet')
parser.add_argument('-t', '--type', choices=['eth_nsh', 'vxlan_gpe_nsh'], default='vxlan_gpe_nsh',
help='Specify packet type for send: eth_nsh or vxlan_gpe_nsh')
parser.add_argument('--outer-source-mac',
help='Specify outer source MAC for packet send')
parser.add_argument('--outer-destination-mac',
help='Specify outer destination MAC for packet send')
parser.add_argument('--outer-source-ip',
help='Specify outer source IP address for packet send')
parser.add_argument('--outer-destination-ip',
help='Specify outer destination IP address for packet send')
parser.add_argument('--outer-source-udp-port', type=int,
help='Specify outer source UDP port for packet send')
parser.add_argument('--inner-source-mac',
help='Specify inner source MAC for packet send')
parser.add_argument('--inner-destination-mac',
help='Specify inner destination MAC for packet send')
parser.add_argument('--inner-source-ip',
help='Specify inner source IP address for packet send')
parser.add_argument('--inner-destination-ip',
help='Specify inner destination IP address for packet send')
parser.add_argument('--inner-source-udp-port', type=int,
help='Specify inner source UDP port for packet send')
parser.add_argument('--inner-destination-udp-port', type=int,
help='Specify inner destination UDP port for packet send')
parser.add_argument('-n', '--number', type=int,
help='Specify number of packet to send')
parser.add_argument('--no-swap-ip', dest='swap_ip', default=True, action='store_false',
help="won't swap ip if provided")
parser.add_argument('-v', '--verbose', choices=['on', 'off'],
help='dump packets when in forward mode')
parser.add_argument('--forward-inner', '-f', dest='forward_inner',
default=False, action='store_true',
help='Strip the outer encapsulation and forward the inner packet')
parser.add_argument('--block', '-b', type=int, default=0,
help='Acts as a firewall dropping packets that match this TCP dst port')
args = parser.parse_args()
macaddr = None
try:
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))
if args.interface is not None:
s.bind((args.interface, 0))
if ((args.do == "forward") or (args.do == "send")):
if args.interface is None:
print("Error: you must specify the interface by -i or --interface for forward and send")
sys.exit(-1)
send_s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
send_s.bind((args.interface, 0))
if args.interface is not None:
macstring = getmac(args.interface)
if (macstring is not None):
macaddr = macstring.split(':')
if (args.do == "send"):
if (args.inner_source_mac is None):
args.inner_source_mac = macstring
if (args.inner_destination_mac is None):
print("Error: you must specify inner destination MAC for packet send")
sys.exit(-1)
if (args.inner_source_ip is None) or (args.inner_destination_ip is None):
print("Error: you must specify inner source IP and inner destination IP for packet send")
sys.exit(-1)
if (args.outer_source_mac is None):
args.outer_source_mac = args.inner_source_mac
if (args.outer_destination_mac is None):
args.outer_destination_mac = args.inner_destination_mac
if (args.outer_source_ip is None):
args.outer_source_ip = args.inner_source_ip
if (args.outer_destination_ip is None):
args.outer_destination_ip = args.inner_destination_ip
if (args.outer_source_udp_port is None):
args.outer_source_udp_port = 55651
if (args.inner_source_udp_port is None):
args.inner_source_udp_port = args.outer_source_udp_port
if (args.inner_destination_udp_port is None):
args.inner_destination_udp_port = 25
if (args.number is None):
args.number = 10
except OSError as e:
print("{}".format(e) + " '%s'" % args.interface)
sys.exit(-1)
do_print = ((args.do != "forward") or (args.verbose == "on"))
vxlan_gpe_udp_ports = [4790, 6633]
vxlan_udp_ports = [4789] + vxlan_gpe_udp_ports
#header len
eth_length = 14
ip_length = 20
udp_length = 8
vxlan_length = 8
nshbase_length = 8
nshcontext_length = 16
""" Send VxLAN/VxLAN-gpe + NSH packet """
if (args.do == "send"):
myethheader = ETHHEADER()
myipheader = IP4HEADER()
myudpheader = UDPHEADER()
myvxlanheader = VXLAN()
mynshbaseheader = BASEHEADER()
mynshcontextheader = CONTEXTHEADER()
""" Set Ethernet header """
dstmacaddr = args.outer_destination_mac.split(":")
myethheader.dmac0 = int(dstmacaddr[0], 16)
myethheader.dmac1 = int(dstmacaddr[1], 16)
myethheader.dmac2 = int(dstmacaddr[2], 16)
myethheader.dmac3 = int(dstmacaddr[3], 16)
myethheader.dmac4 = int(dstmacaddr[4], 16)
myethheader.dmac5 = int(dstmacaddr[5], 16)
myethheader.smac0 = int(macaddr[0], 16)
myethheader.smac1 = int(macaddr[1], 16)
myethheader.smac2 = int(macaddr[2], 16)
myethheader.smac3 = int(macaddr[3], 16)
myethheader.smac4 = int(macaddr[4], 16)
myethheader.smac5 = int(macaddr[5], 16)
myethheader.ethertype0 = 0x08
myethheader.ethertype1 = 0x00
""" Set VxLAN header """
myvxlanheader.flags = 0
myvxlanheader.reserved = 0
myvxlanheader.next_protocol = 0x04
myvxlanheader.vni = 0x1234
myvxlanheader.reserved2 = 0
""" Set NSH base header """
mynshbaseheader.flags = NSH_FLAG_ZERO
mynshbaseheader.length = NSH_TYPE1_LEN
mynshbaseheader.md_type = NSH_MD_TYPE1
mynshbaseheader.next_protocol = NSH_NEXT_PROTO_ETH
mynshbaseheader.service_path = 23
mynshbaseheader.service_index = 45
""" Set NSH context header """
mynshcontextheader.network_platform = int_from_bytes(socket.inet_aton(args.outer_destination_ip))
mynshcontextheader.network_shared = 0x1234
mynshcontextheader.service_platform = 0x12345678
mynshcontextheader.service_shared = 0x87654321
innerippack = build_udp_packet(args.inner_source_ip, args.inner_destination_ip, args.inner_source_udp_port, args.inner_destination_udp_port, "Hellow, World!!!".encode('utf-8'), False)
if (args.type == "vxlan_gpe_nsh"):
outerippack = build_udp_packet(args.outer_source_ip, args.outer_destination_ip, args.outer_source_udp_port, 4790, myvxlanheader.build() + mynshbaseheader.build() + mynshcontextheader.build() + myethheader.build() + innerippack, False)
elif (args.type == "eth_nsh"):
outerippack = mynshbaseheader.build() + mynshcontextheader.build() + myethheader.build() + innerippack
myethheader.ethertype0 = 0x89
myethheader.ethertype1 = 0x4f
""" Build Ethernet packet """
ethpkt = myethheader.build() + outerippack
""" Decode ethernet header """
decode_eth(ethpkt, 0, myethheader)
if (args.type == "eth_nsh"):
offset = eth_length
decode_nsh_baseheader(ethpkt, offset, mynshbaseheader)
decode_nsh_contextheader(ethpkt, offset + nshbase_length, mynshcontextheader)
elif (args.type == "vxlan_gpe_nsh"):
""" Decode IP header """
decode_ip(ethpkt, myipheader)
""" Decode UDP header """
decode_udp(ethpkt, myudpheader)
offset = eth_length + ip_length + udp_length + vxlan_length
decode_nsh_baseheader(ethpkt, offset, mynshbaseheader)
decode_nsh_contextheader(ethpkt, offset + nshbase_length, mynshcontextheader)
pktnum = 0
while (args.number > 0):
""" Send it and make sure all the data is sent out """
pkt = ethpkt
while pkt:
sent = send_s.send(pkt)
pkt = pkt[sent:]
pktnum += 1
if (do_print):
print("\n\nPacket #%d" % pktnum)
""" Print ethernet header """
if (do_print):
print_ethheader(myethheader)
if (args.type == "vxlan_gpe_nsh"):
""" Print IP header """
if (do_print):
print_ipheader(myipheader)
""" Print UDP header """
if (do_print):
print_udpheader(myudpheader)
""" Print VxLAN/VxLAN-gpe header """
if (do_print):
print_vxlanheader(myvxlanheader)
""" Print NSH base header """
if (do_print):
print_nsh_baseheader(mynshbaseheader)
""" Print NSH context header """
if (do_print):
print_nsh_contextheader(mynshcontextheader)
args.number -= 1
sys.exit(0)
# receive a packet
pktnum=0
while True:
packet = s.recvfrom(65565)
#packet string from tuple
packet = packet[0]
myethheader = ETHHEADER()
myinsertedethheader = ETHHEADER()
has_inserted_eth = False
""" Decode ethernet header """
decode_eth(packet, 0, myethheader)
if ((myethheader.ethertype0 != 0x08) or (myethheader.ethertype1 != 0x00)):
if ((myethheader.ethertype0 != 0x89) or (myethheader.ethertype1 != 0x4f)):
continue
if (macaddr is not None):
if ((myethheader.dmac4 != int(macaddr[4], 16)) or (myethheader.dmac5 != int(macaddr[5], 16))):
continue
""" Check if the received packet was ETH + NSH """
if ((myethheader.ethertype0 == 0x89) or (myethheader.ethertype1 == 0x4f)):
pktnum = pktnum + 1
print("\n\nPacket #%d" % pktnum)
""" Eth + NSH """
mynshbaseheader = BASEHEADER()
mynshcontextheader = CONTEXTHEADER()
offset = eth_length
decode_nsh_baseheader(packet, offset, mynshbaseheader)
decode_nsh_contextheader(packet, offset + nshbase_length, mynshcontextheader)
""" Print ethernet header """
print_ethheader(myethheader)
""" Print NSH base header """
print_nsh_baseheader(mynshbaseheader)
""" Print NSH context header """
print_nsh_contextheader(mynshcontextheader)
""" Check if Firewall checking is enabled, and block/drop if its the same TCP port """
if (args.block != 0):
mytcpheader = TCPHEADER()
decode_tcp(packet, 0, mytcpheader)
if (mytcpheader.tcp_dport == args.block):
print bcolors.WARNING + "TCP packet dropped on port: " + str(args.block) + bcolors.ENDC
continue
if ((args.do == "forward") and (args.interface is not None)):
""" nsi minus one for send """
mynshbaseheader.service_index = mynshbaseheader.service_index - 1
""" Build Ethernet header """
newethheader = build_ethernet_header_swap(myethheader)
""" Build Ethernet packet """
pkt = newethheader.build() + mynshbaseheader.build() + mynshcontextheader.build() + packet[eth_length+nshbase_length+nshcontext_length:]
""" Send it and make sure all the data is sent out """
while pkt:
sent = send_s.send(pkt)
pkt = pkt[sent:]
continue
pktnum = pktnum + 1
# if (do_print):
# print("\n\nPacket #%d" % pktnum)
""" Print ethernet header """
# if (do_print):
# print_ethheader(myethheader)
myipheader = IP4HEADER()
""" Decode IP header """
decode_ip(packet, myipheader)
""" Print IP header """
# if (do_print):
# print_ipheader(myipheader)
if (myipheader.ip_proto != 17):
continue
myudpheader = UDPHEADER()
""" Decode UDP header """
decode_udp(packet, myudpheader)
""" Print UDP header """
if (do_print):
print_udpheader(myudpheader)
if (myudpheader.udp_dport not in vxlan_udp_ports):
continue
myvxlanheader = VXLAN()
""" Decode VxLAN/VxLAN-gpe header """
decode_vxlan(packet, myvxlanheader)
""" Print VxLAN/VxLAN-gpe header """
if (do_print):
print_vxlanheader(myvxlanheader)
mynshbaseheader = BASEHEADER()
mynshcontextheader = CONTEXTHEADER()
""" Print NSH header """
if (myudpheader.udp_dport in vxlan_gpe_udp_ports):
offset = eth_length + ip_length + udp_length + vxlan_length
""" Decode inserted ethernet header before NSH """
decode_eth(packet, offset, myinsertedethheader)
if ((myinsertedethheader.ethertype0 == 0x89) and (myinsertedethheader.ethertype1 == 0x4f)):
has_inserted_eth = True
offset += eth_length
decode_nsh_baseheader(packet, offset, mynshbaseheader)
offset += nshbase_length
decode_nsh_contextheader(packet, offset, mynshcontextheader)
offset += nshcontext_length
""" Print NSH base header """
if (do_print):
print_nsh_baseheader(mynshbaseheader)
""" Print NSH context header """
if (do_print):
print_nsh_contextheader(mynshcontextheader)
""" Check if Firewall checking is enabled, and block/drop if its the same TCP port """
if (args.block != 0):
mytcpheader = TCPHEADER()
decode_tcp(packet, eth_length, mytcpheader)
print bcolors.OKBLUE + "FLAGS" + str(mytcpheader.tcp_flags) + bcolors.ENDC
print bcolors.OKBLUE + "OFFSET " + str(mytcpheader.tcp_offset) + bcolors.ENDC
print bcolors.OKBLUE + "SEQ " + str(mytcpheader.tcp_seq) + bcolors.ENDC
print bcolors.OKBLUE + "ACK " + str(mytcpheader.tcp_ack) + bcolors.ENDC
if (mynshcontextheader.service_platform == 0):
if (mytcpheader.tcp_dport == args.block):
print bcolors.WARNING + "TCP packet dropped on port: " + str(args.block) + bcolors.ENDC
continue
else:
print bcolors.WARNING + "TCP packet dropped: " + str(args.block) + " and RESET sent" + bcolors.ENDC
"Activate the RESET flag and exchange tcp ports"
# "We create the ICMP packet"
# print bcolors.WARNING + "TCP packet dropped: " + str(args.block) + " and ICMP sent" + bcolors.ENDC
# old_packet = packet
# myicmpheader = ICMPHEADER()
# myicmpheader.icmp_type = 3
# myicmpheader.icmp_code = 1
# myicmpheader.icmp_checksum = 0
# myicmpheader.icmp_unused = 0
# myicmpheader.icmp_MTU = 1400
# myicmpheader.icmp_iphead = 0
## ip_header_and_data = old_packet[(88+eth_length):(108+eth_length)] + packet[(108+eth_length):(116+eth_length)]
# ip_header_and_data = old_packet[(88+eth_length):]
# icmp_header_aux = myicmpheader.build()
# icmp_header = icmp_header_aux[:8] + ip_header_and_data
# icmp_checksum = compute_internet_checksum(icmp_header)
# myicmpheader.icmp_checksum = icmp_checksum
# icmp_header_aux = myicmpheader.build()
# icmp_header = icmp_header_aux[:8] + ip_header_and_data
# packet_aux = packet[:(108+eth_length)] + icmp_header
# packet = packet_aux
"We do the same but with IP"
myinternalipheader = IP4HEADER()
decode_internal_ip(packet, eth_length, myinternalipheader)
"Use the following parameters for ICMP"
# myinternalipheader.ip_tot_len = 88
# myinternalipheader.ip_tos = 192
# myinternalipheader.ip_proto = 1
myinternalipheader.ip_id = 0
myinternalipheader.ip_tot_len = 40
ip_source = myinternalipheader.ip_saddr
myinternalipheader.ip_saddr = myinternalipheader.ip_daddr
myinternalipheader.ip_daddr = ip_source
new_internalipheader = myinternalipheader.build()
old_internalipheader = packet[(88+eth_length):(108+eth_length)]
packet_aux = packet[:(88+eth_length)] + new_internalipheader + packet[(108+eth_length):]
packet = packet_aux
"We build the new tcp header with the RESET=1"
tcp_header, new_tcpheader = build_tcp_reset(mytcpheader, myinternalipheader)
# "We build the new tcp header with the RESET=1"
# new_tcpheader = mytcpheader.build()
old_tcpheader = packet[(108+eth_length):(128+eth_length)]
"We create an auxiliar variable because strings are immutable"
# packet_aux = packet[:(108+eth_length)] + new_tcpheader + packet[(128+eth_length):]
packet_aux = packet[:(108+eth_length)] + new_tcpheader
"We replace the packet with the new tcp header and save the original one"
packet = packet_aux
# "We do the same but with MAC"
inner_internal_ethheader = ETHHEADER()
inner_offset = eth_length + ip_length + udp_length + vxlan_length + eth_length + nshbase_length + nshcontext_length
decode_eth(packet, inner_offset, inner_internal_ethheader)
newethheader = build_ethernet_header_swap(inner_internal_ethheader)
new_ether_header = newethheader.build()
old_ether_header = packet[(74+eth_length):(88+eth_length)]
packet_aux = packet[:inner_offset] + new_ether_header + packet[inner_offset + eth_length:]
packet = packet_aux
#
#
# "We get the nsp of the symmetric chain which is in the metadata"
# nsp_symm = mynshcontextheader.service_platform
# mynshbaseheader.service_path = nsp_symm
if ((args.do == "forward") and (args.interface is not None) and (mynshbaseheader.service_index > 1)):
""" Build Ethernet header """
newethheader = build_ethernet_header_swap(myethheader)
""" Build the packet, either encapsulated, or the original inner packet """
pkt = None
if args.forward_inner:
""" Just build the original, inner packet """
inner_offset = eth_length + ip_length + udp_length + vxlan_length + nshbase_length + nshcontext_length
inner_ethheader = ETHHEADER()
# Get the inner ethernet header
decode_eth(packet[inner_offset:], inner_ethheader)
# The new SourceMac should be the outer dest, and the new DestMac should be the inner dest
# This call sets the new SourceMac to be the outer dest
newethheader = build_ethernet_header_swap(myethheader)
# Now set the DestMac to be the inner dest
newethheader.dmac0 = inner_ethheader.dmac0
newethheader.dmac1 = inner_ethheader.dmac1
newethheader.dmac2 = inner_ethheader.dmac2
newethheader.dmac3 = inner_ethheader.dmac3
newethheader.dmac4 = inner_ethheader.dmac4
newethheader.dmac5 = inner_ethheader.dmac5
pkt = newethheader.build() + packet[inner_offset + eth_length:]
else:
""" Build IP packet"""
if (myudpheader.udp_dport in vxlan_gpe_udp_ports):
""" nsi minus one """
mynshbaseheader.service_index = mynshbaseheader.service_index - 1
if (has_inserted_eth is True):
ippack = build_udp_packet(str(socket.inet_ntoa(pack('!I', myipheader.ip_saddr))), str(socket.inet_ntoa(pack('!I', myipheader.ip_daddr))), myudpheader.udp_sport, myudpheader.udp_dport, myvxlanheader.build() + myinsertedethheader.build() + mynshbaseheader.build() + mynshcontextheader.build() + packet[offset:], args.swap_ip)
else:
ippack = build_udp_packet(str(socket.inet_ntoa(pack('!I', myipheader.ip_saddr))), str(socket.inet_ntoa(pack('!I', myipheader.ip_daddr))), myudpheader.udp_sport, myudpheader.udp_dport, myvxlanheader.build() + mynshbaseheader.build() + mynshcontextheader.build() + packet[offset:], args.swap_ip)
else:
ippack = build_udp_packet(str(socket.inet_ntoa(pack('!I', myipheader.ip_saddr))), str(socket.inet_ntoa(pack('!I', myipheader.ip_daddr))), myudpheader.udp_sport, myudpheader.udp_dport, packet[eth_length+ip_length+udp_length:], args.swap_ip)
""" Build Ethernet packet """
pkt = newethheader.build() + ippack
""" Send it and make sure all the data is sent out """
while pkt:
sent = send_s.send(pkt)
pkt = pkt[sent:]
if __name__ == "__main__":
main()
|
[
"root@fuel.domain.tld"
] |
root@fuel.domain.tld
|
2a4be14f4fce6bea15a6e689acb94e58aff1b21c
|
fc4625297dd6ffcee239bf332d46c483c42d5e74
|
/02-Database-Socket/day10/exit.py
|
fab05a74c5ebd5d106ccf7d7f45b62d998053be1
|
[] |
no_license
|
Healer0616/aid1902
|
cd6a23b0340b9c6739380377b866051dc8236c75
|
069991fd503931ea889d69a26e3f2819b44c2450
|
refs/heads/master
| 2021-07-08T14:39:21.879983
| 2021-03-19T06:55:37
| 2021-03-19T06:55:37
| 232,581,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
import os,sys
#结束进程,不执行下面语句
#os._exit(0)
sys.exit("进程退出")
print("Proccess over")
|
[
"healer0616@126.com"
] |
healer0616@126.com
|
e4dfb94a30f28fbb76f4e0e88c4a07ccf19c28b9
|
e9a82ed691c23fdb9c3792832dc0137b3cf3bf8c
|
/Tutorial_Backup/circles.py
|
a68551b64fe198f00a847280162e927eeb58b38e
|
[] |
no_license
|
psshankar64/PiGitFolderFDC
|
55375c6b13e79a5c82e98fad58c5512478161ccf
|
5ed7d5b8e8b0f2b936c0661ae5af7f5fd80b88d7
|
refs/heads/master
| 2020-06-25T03:42:49.846552
| 2019-09-25T09:36:39
| 2019-09-25T09:36:39
| 199,189,933
| 1
| 0
| null | 2019-09-07T10:03:47
| 2019-07-27T16:38:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,609
|
py
|
# This script will demonstrate how to draw a rectangle
import sys
import random
import math
import pygame
import pygame.gfxdraw
from pygame.locals import *
#Define some standard colors
FUCHSIA = (255, 0, 255)
PURPLE = (128, 0, 128)
TEAL = (0, 128, 128)
LIME = (0, 255, 0)
GREEN = (0, 255, 0)
OLIVE = (128, 128, 0)
YELLOW = (255, 255, 0)
ORANGE = (255, 165, 0)
RED = (255, 0, 0)
MAROON = (128, 0, 0)
SILVER = (192, 192, 192)
GRAY = (128, 128, 128)
BLUE = (0, 0, 255)
NAVY = (0, 0, 128)
AQUA = (0, 255, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
pygame.init()
DISPLAY_WIDTH = 800
DISPLAY_HEIGHT = 600
DISPLAY_AREA = DISPLAY_WIDTH * DISPLAY_HEIGHT
DS = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))
# FUNCTIONS ------------------------------------------------------------------------------------------------ FUNCTIONS
def event_handler():
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
while True:
event_handler()
# draw a solid green circle at x:0 y:0 (top left corner) with a radius of 100 pixels
pygame.draw.circle(DS, GREEN, (DISPLAY_WIDTH // 2, DISPLAY_HEIGHT // 2), 150, 0) #PY 3 will always return a float so we need to put in the // to get integer
# draw a hollow red circle in the center of the display surface
pygame.draw.circle(DS, RED, (DISPLAY_WIDTH // 2, DISPLAY_HEIGHT // 2), 120, 0)
pygame.draw.circle(DS, GREEN, (DISPLAY_WIDTH // 2, DISPLAY_HEIGHT // 2), 50, 0)
pygame.display.update()
DS.fill([0,0,0])
|
[
"psshankar64@yahoo.com"
] |
psshankar64@yahoo.com
|
eb78f117f39445b509a128d00ce2509547912f46
|
1ee7b843d18834d9bdab122da0a3641bad832515
|
/WebServicesPython/WebServicesPython/asgi.py
|
f8f16dea5f5526f70c12c43e05d0b0d978c229b8
|
[] |
no_license
|
CarlosAlmeida2000/WebServicesPython
|
8da5dbf8e70c7e52fe09f0209c42bd61a0df436e
|
bd2a4f2c53df8fa3724f2e0ecdd47b5018ac75f0
|
refs/heads/main
| 2023-07-08T13:11:03.946832
| 2021-08-11T00:06:16
| 2021-08-11T00:06:16
| 394,281,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
ASGI config for WebServicesPython project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'WebServicesPython.settings')
application = get_asgi_application()
|
[
"carlos.almeida2017@uteq.edu.ec"
] |
carlos.almeida2017@uteq.edu.ec
|
388843ec540100a8577f65678c60bc08e35de2e7
|
08e2f659aeac18351078468d28fbd39a19ba129f
|
/interpreterSample/Expresiones/Casteo.py
|
4228c71f296e746542e7a097bf97dd38742a2aab
|
[] |
no_license
|
josejfss/OLC1_Junio2021
|
fa31b81b8a1248d84759882a9a26fd9056a365d4
|
36437d7da3594b3876a534f0ce9d6ed1fa0ad996
|
refs/heads/main
| 2023-06-06T01:04:27.235343
| 2021-06-30T20:46:25
| 2021-06-30T20:46:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,310
|
py
|
from Abstract.NodoAST import NodoAST
from Abstract.Instruccion import Instruccion
from TS.Excepcion import Excepcion
from TS.Tipo import TIPO, OperadorLogico
class Casteo(Instruccion):
def __init__(self, tipo, expresion, fila, columna):
self.expresion = expresion
self.fila = fila
self.columna = columna
self.tipo = tipo
def interpretar(self, tree, table):
val = self.expresion.interpretar(tree, table)
if self.tipo == TIPO.DECIMAL:
if self.expresion.tipo == TIPO.ENTERO:
try:
return float(self.obtenerVal(self.expresion.tipo, val))
except:
return Excepcion("Semantico", "No se puede castear para Float.", self.fila, self.columna)
elif self.expresion.tipo == TIPO.CADENA:
try:
return float(self.obtenerVal(self.expresion.tipo, val))
except:
return Excepcion("Semantico", "No se puede castear para Float.", self.fila, self.columna)
return Excepcion("Semantico", "Tipo Erroneo de casteo para Double.", self.fila, self.columna)
if self.tipo == TIPO.ENTERO:
if self.expresion.tipo == TIPO.DECIMAL:
try:
return int(self.obtenerVal(self.expresion.tipo, val))
except:
return Excepcion("Semantico", "No se puede castear para Int.", self.fila, self.columna)
elif self.expresion.tipo == TIPO.CADENA:
try:
return int(self.obtenerVal(self.expresion.tipo, val))
except:
return Excepcion("Semantico", "No se puede castear para Int.", self.fila, self.columna)
return Excepcion("Semantico", "Tipo Erroneo de casteo para Int.", self.fila, self.columna)
def getNodo(self):
nodo = NodoAST("CASTEO")
nodo.agregarHijo(str(self.tipo))
nodo.agregarHijoNodo(self.expresion.getNodo())
return nodo
def obtenerVal(self, tipo, val):
if tipo == TIPO.ENTERO:
return int(val)
elif tipo == TIPO.DECIMAL:
return float(val)
elif tipo == TIPO.BOOLEANO:
return bool(val)
return str(val)
|
[
"puac235@gmail.com"
] |
puac235@gmail.com
|
1e7f842df8f57de2892f6c63477347bd518f94bf
|
ba489597ca034481f3446767e7e29606d944780a
|
/main_gui.py
|
176cf0192d916382288975130d761eaca9c2b15b
|
[] |
no_license
|
mkelley88/BiddergyBrowser
|
8dc8105dcc2f5c32e1df8c01e848366a973fb2c6
|
db35aab244be8a01d7a5b35a17614f41f5bd297c
|
refs/heads/master
| 2021-07-11T14:44:46.284517
| 2021-03-26T07:00:48
| 2021-03-26T07:00:48
| 45,760,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,387
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'biddergy3.ui'
#
# Created: Mon Nov 14 18:44:41 2016
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(869, 784)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.widget_2 = QtGui.QWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_2.sizePolicy().hasHeightForWidth())
self.widget_2.setSizePolicy(sizePolicy)
self.widget_2.setObjectName("widget_2")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.widget_2)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.imgBiddergyLogo = QtGui.QLabel(self.widget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.imgBiddergyLogo.sizePolicy().hasHeightForWidth())
self.imgBiddergyLogo.setSizePolicy(sizePolicy)
self.imgBiddergyLogo.setText("")
self.imgBiddergyLogo.setPixmap(QtGui.QPixmap("img/biddergy_new_logo.png"))
self.imgBiddergyLogo.setScaledContents(True)
self.imgBiddergyLogo.setAlignment(QtCore.Qt.AlignCenter)
self.imgBiddergyLogo.setObjectName("imgBiddergyLogo")
self.horizontalLayout_2.addWidget(self.imgBiddergyLogo)
self.txtSearch = QtGui.QLineEdit(self.widget_2)
self.txtSearch.setObjectName("txtSearch")
self.horizontalLayout_2.addWidget(self.txtSearch)
self.comSearchType = QtGui.QComboBox(self.widget_2)
self.comSearchType.setObjectName("comSearchType")
self.comSearchType.addItem("")
self.comSearchType.addItem("")
self.comSearchType.addItem("")
self.horizontalLayout_2.addWidget(self.comSearchType)
self.pushButton = QtGui.QPushButton(self.widget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setSizeIncrement(QtCore.QSize(0, 0))
self.pushButton.setObjectName("pushButton")
self.horizontalLayout_2.addWidget(self.pushButton)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.gridLayout_2.addWidget(self.widget_2, 0, 1, 1, 1)
self.tab_myAccount = QtGui.QTabWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tab_myAccount.sizePolicy().hasHeightForWidth())
self.tab_myAccount.setSizePolicy(sizePolicy)
self.tab_myAccount.setMinimumSize(QtCore.QSize(460, 410))
self.tab_myAccount.setTabShape(QtGui.QTabWidget.Rounded)
self.tab_myAccount.setTabsClosable(False)
self.tab_myAccount.setObjectName("tab_myAccount")
self.tabSummary = QtGui.QWidget()
self.tabSummary.setObjectName("tabSummary")
self.gridLayout_4 = QtGui.QGridLayout(self.tabSummary)
self.gridLayout_4.setObjectName("gridLayout_4")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.tabWidget = QtGui.QTabWidget(self.tabSummary)
self.tabWidget.setObjectName("tabWidget")
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName("tab_2")
self.btnRefreshSummary = QtGui.QPushButton(self.tab_2)
self.btnRefreshSummary.setGeometry(QtCore.QRect(360, 290, 85, 27))
self.btnRefreshSummary.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.btnRefreshSummary.setObjectName("btnRefreshSummary")
self.widget = QtGui.QWidget(self.tab_2)
self.widget.setGeometry(QtCore.QRect(0, 10, 87, 134))
self.widget.setObjectName("widget")
self.gridLayout_5 = QtGui.QGridLayout(self.widget)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setObjectName("gridLayout_5")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.lblWatching = QtGui.QLabel(self.widget)
self.lblWatching.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lblWatching.setObjectName("lblWatching")
self.gridLayout.addWidget(self.lblWatching, 0, 0, 1, 1)
self.lblBidding = QtGui.QLabel(self.widget)
self.lblBidding.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lblBidding.setObjectName("lblBidding")
self.gridLayout.addWidget(self.lblBidding, 1, 0, 1, 1)
self.lblWon = QtGui.QLabel(self.widget)
self.lblWon.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lblWon.setObjectName("lblWon")
self.gridLayout.addWidget(self.lblWon, 2, 0, 1, 1)
self.lblNotWon = QtGui.QLabel(self.widget)
self.lblNotWon.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lblNotWon.setObjectName("lblNotWon")
self.gridLayout.addWidget(self.lblNotWon, 3, 0, 1, 1)
self.lblPurchases = QtGui.QLabel(self.widget)
self.lblPurchases.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lblPurchases.setObjectName("lblPurchases")
self.gridLayout.addWidget(self.lblPurchases, 4, 0, 1, 1)
self.lblInvoices = QtGui.QLabel(self.widget)
self.lblInvoices.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lblInvoices.setObjectName("lblInvoices")
self.gridLayout.addWidget(self.lblInvoices, 5, 0, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout, 0, 0, 1, 1)
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.valWatching = QtGui.QLabel(self.widget)
self.valWatching.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.valWatching.setObjectName("valWatching")
self.gridLayout_3.addWidget(self.valWatching, 0, 0, 1, 1)
self.valBidding = QtGui.QLabel(self.widget)
self.valBidding.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.valBidding.setObjectName("valBidding")
self.gridLayout_3.addWidget(self.valBidding, 1, 0, 1, 1)
self.valWon = QtGui.QLabel(self.widget)
self.valWon.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.valWon.setObjectName("valWon")
self.gridLayout_3.addWidget(self.valWon, 2, 0, 1, 1)
self.valNotWon = QtGui.QLabel(self.widget)
self.valNotWon.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.valNotWon.setObjectName("valNotWon")
self.gridLayout_3.addWidget(self.valNotWon, 3, 0, 1, 1)
self.valPurchases = QtGui.QLabel(self.widget)
self.valPurchases.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.valPurchases.setObjectName("valPurchases")
self.gridLayout_3.addWidget(self.valPurchases, 4, 0, 1, 1)
self.valInvoices = QtGui.QLabel(self.widget)
self.valInvoices.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.valInvoices.setObjectName("valInvoices")
self.gridLayout_3.addWidget(self.valInvoices, 5, 0, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_3, 0, 1, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName("tab_3")
self.tabWidget.addTab(self.tab_3, "")
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName("tab_4")
self.tabWidget.addTab(self.tab_4, "")
self.tab_5 = QtGui.QWidget()
self.tab_5.setObjectName("tab_5")
self.tabWidget.addTab(self.tab_5, "")
self.tab_6 = QtGui.QWidget()
self.tab_6.setObjectName("tab_6")
self.tabWidget.addTab(self.tab_6, "")
self.tab_7 = QtGui.QWidget()
self.tab_7.setObjectName("tab_7")
self.tabWidget.addTab(self.tab_7, "")
self.horizontalLayout.addWidget(self.tabWidget)
self.gridLayout_4.addLayout(self.horizontalLayout, 1, 1, 1, 2)
self.tab_myAccount.addTab(self.tabSummary, "")
self.tab = QtGui.QWidget()
self.tab.setObjectName("tab")
self.gridLayout_6 = QtGui.QGridLayout(self.tab)
self.gridLayout_6.setObjectName("gridLayout_6")
self.splitter = QtGui.QSplitter(self.tab)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.wid_statusBlock = QtGui.QWidget(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.wid_statusBlock.sizePolicy().hasHeightForWidth())
self.wid_statusBlock.setSizePolicy(sizePolicy)
self.wid_statusBlock.setMinimumSize(QtCore.QSize(270, 160))
self.wid_statusBlock.setObjectName("wid_statusBlock")
self.val_itemTitle = QtGui.QLabel(self.wid_statusBlock)
self.val_itemTitle.setGeometry(QtCore.QRect(10, 5, 241, 31))
font = QtGui.QFont()
font.setPointSize(14)
font.setWeight(75)
font.setBold(True)
self.val_itemTitle.setFont(font)
self.val_itemTitle.setObjectName("val_itemTitle")
self.val_itemFormat = QtGui.QLabel(self.wid_statusBlock)
self.val_itemFormat.setGeometry(QtCore.QRect(10, 26, 41, 16))
font = QtGui.QFont()
font.setPointSize(8)
font.setWeight(75)
font.setBold(True)
self.val_itemFormat.setFont(font)
self.val_itemFormat.setObjectName("val_itemFormat")
self.val_itemCurrentPrice = QtGui.QLabel(self.wid_statusBlock)
self.val_itemCurrentPrice.setGeometry(QtCore.QRect(180, 50, 71, 16))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(75)
font.setBold(True)
self.val_itemCurrentPrice.setFont(font)
self.val_itemCurrentPrice.setFrameShadow(QtGui.QFrame.Plain)
self.val_itemCurrentPrice.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.val_itemCurrentPrice.setObjectName("val_itemCurrentPrice")
self.lbl_itemBids = QtGui.QLabel(self.wid_statusBlock)
self.lbl_itemBids.setGeometry(QtCore.QRect(10, 50, 31, 16))
self.lbl_itemBids.setObjectName("lbl_itemBids")
self.lbl_itemLocation = QtGui.QLabel(self.wid_statusBlock)
self.lbl_itemLocation.setGeometry(QtCore.QRect(10, 100, 51, 16))
self.lbl_itemLocation.setObjectName("lbl_itemLocation")
self.lbl_itemStarts = QtGui.QLabel(self.wid_statusBlock)
self.lbl_itemStarts.setGeometry(QtCore.QRect(10, 120, 31, 16))
self.lbl_itemStarts.setObjectName("lbl_itemStarts")
self.lbl_itemEnds = QtGui.QLabel(self.wid_statusBlock)
self.lbl_itemEnds.setGeometry(QtCore.QRect(10, 140, 31, 16))
self.lbl_itemEnds.setObjectName("lbl_itemEnds")
self.lbl_itemHighBidder = QtGui.QLabel(self.wid_statusBlock)
self.lbl_itemHighBidder.setGeometry(QtCore.QRect(10, 70, 71, 16))
self.lbl_itemHighBidder.setObjectName("lbl_itemHighBidder")
self.val_itemBids = QtGui.QLabel(self.wid_statusBlock)
self.val_itemBids.setGeometry(QtCore.QRect(40, 50, 31, 16))
self.val_itemBids.setObjectName("val_itemBids")
self.val_itemHighBidder = QtGui.QLabel(self.wid_statusBlock)
self.val_itemHighBidder.setGeometry(QtCore.QRect(80, 70, 171, 16))
self.val_itemHighBidder.setObjectName("val_itemHighBidder")
self.val_itemLocation = QtGui.QLabel(self.wid_statusBlock)
self.val_itemLocation.setGeometry(QtCore.QRect(60, 100, 191, 16))
self.val_itemLocation.setObjectName("val_itemLocation")
self.val_itemStart = QtGui.QLabel(self.wid_statusBlock)
self.val_itemStart.setGeometry(QtCore.QRect(50, 120, 191, 16))
self.val_itemStart.setObjectName("val_itemStart")
self.val_itemEnd = QtGui.QLabel(self.wid_statusBlock)
self.val_itemEnd.setGeometry(QtCore.QRect(50, 140, 191, 16))
self.val_itemEnd.setObjectName("val_itemEnd")
self.lbl_itemLotNumber = QtGui.QLabel(self.wid_statusBlock)
self.lbl_itemLotNumber.setGeometry(QtCore.QRect(50, 26, 21, 16))
font = QtGui.QFont()
font.setPointSize(8)
font.setWeight(75)
font.setBold(True)
self.lbl_itemLotNumber.setFont(font)
self.lbl_itemLotNumber.setObjectName("lbl_itemLotNumber")
self.val_itemLotNumber = QtGui.QLabel(self.wid_statusBlock)
self.val_itemLotNumber.setGeometry(QtCore.QRect(74, 26, 41, 16))
font = QtGui.QFont()
font.setPointSize(8)
font.setWeight(75)
font.setBold(True)
self.val_itemLotNumber.setFont(font)
self.val_itemLotNumber.setObjectName("val_itemLotNumber")
self.lbl_itemListingNumber = QtGui.QLabel(self.wid_statusBlock)
self.lbl_itemListingNumber.setGeometry(QtCore.QRect(110, 26, 41, 16))
font = QtGui.QFont()
font.setPointSize(8)
font.setWeight(75)
font.setBold(True)
self.lbl_itemListingNumber.setFont(font)
self.lbl_itemListingNumber.setObjectName("lbl_itemListingNumber")
self.val_itemListingNumber = QtGui.QLabel(self.wid_statusBlock)
self.val_itemListingNumber.setGeometry(QtCore.QRect(146, 26, 41, 16))
font = QtGui.QFont()
font.setPointSize(8)
font.setWeight(75)
font.setBold(True)
self.val_itemListingNumber.setFont(font)
self.val_itemListingNumber.setObjectName("val_itemListingNumber")
self.txt_itemDescription = QtGui.QTextBrowser(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.txt_itemDescription.sizePolicy().hasHeightForWidth())
self.txt_itemDescription.setSizePolicy(sizePolicy)
self.txt_itemDescription.setAutoFillBackground(False)
self.txt_itemDescription.setFrameShape(QtGui.QFrame.StyledPanel)
self.txt_itemDescription.setFrameShadow(QtGui.QFrame.Sunken)
self.txt_itemDescription.setObjectName("txt_itemDescription")
self.gridLayout_6.addWidget(self.splitter, 0, 0, 1, 1)
self.web_itemImage = QtWebKit.QWebView(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.web_itemImage.sizePolicy().hasHeightForWidth())
self.web_itemImage.setSizePolicy(sizePolicy)
self.web_itemImage.setUrl(QtCore.QUrl("about:blank"))
self.web_itemImage.setObjectName("web_itemImage")
self.gridLayout_6.addWidget(self.web_itemImage, 0, 1, 1, 1)
self.tab_myAccount.addTab(self.tab, "")
self.tab_browse = QtGui.QWidget()
self.tab_browse.setObjectName("tab_browse")
self.gridLayout_7 = QtGui.QGridLayout(self.tab_browse)
self.gridLayout_7.setObjectName("gridLayout_7")
self.widget_3 = QtGui.QWidget(self.tab_browse)
self.widget_3.setObjectName("widget_3")
self.gridLayout_8 = QtGui.QGridLayout(self.widget_3)
self.gridLayout_8.setContentsMargins(0, 0, 0, 0)
self.gridLayout_8.setObjectName("gridLayout_8")
self.listWidget = QtGui.QListWidget(self.widget_3)
self.listWidget.setObjectName("listWidget")
self.gridLayout_8.addWidget(self.listWidget, 0, 0, 1, 1)
self.gridLayout_7.addWidget(self.widget_3, 0, 0, 1, 1)
self.tab_myAccount.addTab(self.tab_browse, "")
self.gridLayout_2.addWidget(self.tab_myAccount, 4, 0, 1, 4)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 869, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionPreferences = QtGui.QAction(MainWindow)
self.actionPreferences.setObjectName("actionPreferences")
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionGeneral_Help = QtGui.QAction(MainWindow)
self.actionGeneral_Help.setObjectName("actionGeneral_Help")
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionSummary_Refresh = QtGui.QAction(MainWindow)
self.actionSummary_Refresh.setObjectName("actionSummary_Refresh")
self.menuFile.addAction(self.actionPreferences)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuHelp.addAction(self.actionGeneral_Help)
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tab_myAccount.setCurrentIndex(2)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(
QtGui.QApplication.translate("MainWindow", "Biddergy", None, QtGui.QApplication.UnicodeUTF8))
self.txtSearch.setPlaceholderText(
QtGui.QApplication.translate("MainWindow", "Search", None, QtGui.QApplication.UnicodeUTF8))
self.comSearchType.setItemText(0, QtGui.QApplication.translate("MainWindow", "Lot #", None,
QtGui.QApplication.UnicodeUTF8))
self.comSearchType.setItemText(1, QtGui.QApplication.translate("MainWindow", "Listing #", None,
QtGui.QApplication.UnicodeUTF8))
self.comSearchType.setItemText(2, QtGui.QApplication.translate("MainWindow", "Keyword", None,
QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(
QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.tab_myAccount.setToolTip(
QtGui.QApplication.translate("MainWindow", "<html><head/><body><p><br/></p></body></html>", None,
QtGui.QApplication.UnicodeUTF8))
self.tabSummary.setToolTip(
QtGui.QApplication.translate("MainWindow", "<html><head/><body><p><br/></p></body></html>", None,
QtGui.QApplication.UnicodeUTF8))
self.btnRefreshSummary.setText(
QtGui.QApplication.translate("MainWindow", "Refresh", None, QtGui.QApplication.UnicodeUTF8))
self.lblWatching.setText(
QtGui.QApplication.translate("MainWindow", "Watching:", None, QtGui.QApplication.UnicodeUTF8))
self.lblBidding.setText(
QtGui.QApplication.translate("MainWindow", "Bidding:", None, QtGui.QApplication.UnicodeUTF8))
self.lblWon.setText(QtGui.QApplication.translate("MainWindow", "Won:", None, QtGui.QApplication.UnicodeUTF8))
self.lblNotWon.setText(
QtGui.QApplication.translate("MainWindow", "Not Won:", None, QtGui.QApplication.UnicodeUTF8))
self.lblPurchases.setText(
QtGui.QApplication.translate("MainWindow", "Purchases:", None, QtGui.QApplication.UnicodeUTF8))
self.lblInvoices.setText(
QtGui.QApplication.translate("MainWindow", "Invoices:", None, QtGui.QApplication.UnicodeUTF8))
self.valWatching.setText(QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.valBidding.setText(QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.valWon.setText(QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.valNotWon.setText(QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.valPurchases.setText(QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.valInvoices.setText(QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),
QtGui.QApplication.translate("MainWindow", "Summary", None,
QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3),
QtGui.QApplication.translate("MainWindow", "Watching", None,
QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4),
QtGui.QApplication.translate("MainWindow", "Bidding", None,
QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5),
QtGui.QApplication.translate("MainWindow", "Won", None,
QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_6),
QtGui.QApplication.translate("MainWindow", "Not Won", None,
QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_7),
QtGui.QApplication.translate("MainWindow", "Purchases", None,
QtGui.QApplication.UnicodeUTF8))
self.tab_myAccount.setTabText(self.tab_myAccount.indexOf(self.tabSummary),
QtGui.QApplication.translate("MainWindow", "My Account", None,
QtGui.QApplication.UnicodeUTF8))
self.val_itemTitle.setText(
QtGui.QApplication.translate("MainWindow", "Item Title", None, QtGui.QApplication.UnicodeUTF8))
self.val_itemFormat.setText(
QtGui.QApplication.translate("MainWindow", "format", None, QtGui.QApplication.UnicodeUTF8))
self.val_itemCurrentPrice.setText(
QtGui.QApplication.translate("MainWindow", "$0", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_itemBids.setText(
QtGui.QApplication.translate("MainWindow", "Bids:", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_itemLocation.setText(
QtGui.QApplication.translate("MainWindow", "Location:", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_itemStarts.setText(
QtGui.QApplication.translate("MainWindow", "Starts:", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_itemEnds.setText(
QtGui.QApplication.translate("MainWindow", "Ends:", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_itemHighBidder.setText(
QtGui.QApplication.translate("MainWindow", "High Bidder:", None, QtGui.QApplication.UnicodeUTF8))
self.val_itemBids.setText(QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.val_itemHighBidder.setText(
QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.val_itemLocation.setText(
QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.val_itemStart.setText(
QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.val_itemEnd.setText(QtGui.QApplication.translate("MainWindow", "-", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_itemLotNumber.setText(
QtGui.QApplication.translate("MainWindow", "Lot #", None, QtGui.QApplication.UnicodeUTF8))
self.val_itemLotNumber.setText(
QtGui.QApplication.translate("MainWindow", "000000", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_itemListingNumber.setText(
QtGui.QApplication.translate("MainWindow", "Listing #", None, QtGui.QApplication.UnicodeUTF8))
self.val_itemListingNumber.setText(
QtGui.QApplication.translate("MainWindow", "000000", None, QtGui.QApplication.UnicodeUTF8))
self.tab_myAccount.setTabText(self.tab_myAccount.indexOf(self.tab),
QtGui.QApplication.translate("MainWindow", "Item", None,
QtGui.QApplication.UnicodeUTF8))
self.tab_myAccount.setTabText(self.tab_myAccount.indexOf(self.tab_browse),
QtGui.QApplication.translate("MainWindow", "Browse", None,
QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(
QtGui.QApplication.translate("MainWindow", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.menuHelp.setTitle(
QtGui.QApplication.translate("MainWindow", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.actionPreferences.setText(
QtGui.QApplication.translate("MainWindow", "&Preferences", None, QtGui.QApplication.UnicodeUTF8))
self.actionExit.setText(
QtGui.QApplication.translate("MainWindow", "&Exit", None, QtGui.QApplication.UnicodeUTF8))
self.actionGeneral_Help.setText(
QtGui.QApplication.translate("MainWindow", "&General Help", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout.setText(
QtGui.QApplication.translate("MainWindow", "&About", None, QtGui.QApplication.UnicodeUTF8))
self.actionSummary_Refresh.setText(
QtGui.QApplication.translate("MainWindow", "Summary Refresh", None, QtGui.QApplication.UnicodeUTF8))
from PySide import QtWebKit
|
[
"mkelley88@gmail.com"
] |
mkelley88@gmail.com
|
ce9db6f9e843b7aa7979f8e3123b354b1a7549d8
|
b1b492715300bee008eacc2708ff3aa9f6ff34ab
|
/mps_shape_completion/shape_completion_training/src/shape_completion_training/metric.py
|
66a8c8d0064e050027826f98451b43f4c3214978
|
[] |
no_license
|
minlattnwe/unreliable-deform-manipulation
|
1dd60a1f91af37c30e699b218ed374c3b65d0f9b
|
b04485ed98d78a5f35f5b4d29aac715e2c0ef6a5
|
refs/heads/master
| 2023-03-19T20:16:57.552013
| 2020-12-09T21:13:25
| 2020-12-09T21:13:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,131
|
py
|
import tensorflow as tf
class Metric:
@staticmethod
def is_better_than(a, b):
raise NotImplementedError()
@staticmethod
def key():
raise NotImplementedError()
@staticmethod
def worst():
raise NotImplementedError()
class LossMetric(Metric):
@staticmethod
def is_better_than(a, b):
return a < b
@staticmethod
def key():
return "loss"
@staticmethod
def worst():
return 1000
class AccuracyMetric(Metric):
@staticmethod
def is_better_than(a, b):
if b is None:
return True
return a > b
@staticmethod
def key():
return "accuracy"
@staticmethod
def worst():
return 0
# TODO make tests for these
def fp(y_true, y_pred, threshold=0.5):
return tf.cast(tf.math.count_nonzero((1 - y_true) * tf.cast(y_pred > threshold, tf.float32)), tf.float32)
def tn(y_true, y_pred, threshold=0.5):
return tf.cast(tf.math.count_nonzero((1 - y_true) * tf.cast(y_pred <= threshold, tf.float32)), tf.float32)
def fn(y_true, y_pred, threshold=0.5):
return tf.cast(tf.math.count_nonzero(y_true * tf.cast(y_pred <= threshold, tf.float32)), tf.float32)
def tp(y_true, y_pred, threshold=0.5):
return tf.cast(tf.math.count_nonzero(y_true * tf.cast(y_pred > threshold, tf.float32)), tf.float32)
def accuray_on_negatives(y_true, y_pred, threshold=0.5):
true_negatives = tn(y_true, y_pred, threshold=threshold)
false_positives = fp(y_true, y_pred, threshold=threshold)
return tf.math.divide_no_nan(true_negatives, true_negatives + false_positives)
def recall(y_true, y_pred, threshold=0.5):
true_positives = tp(y_true, y_pred, threshold=threshold)
false_negatives = fn(y_true, y_pred, threshold=threshold)
return tf.math.divide_no_nan(true_positives, true_positives + false_negatives)
def precision(y_true, y_pred, threshold=0.5):
true_positives = tp(y_true, y_pred, threshold=threshold)
false_positives = fp(y_true, y_pred, threshold=threshold)
return tf.math.divide_no_nan(true_positives, true_positives + false_positives)
|
[
"pmitrano@armstorm"
] |
pmitrano@armstorm
|
6d98248215769200c25572a4147a419d46be734e
|
dad40dc4fdba73b1dc074f3fb8373b8a3335cabf
|
/7.py
|
d78c77d1e482a9276de407b0c94d1bb5b63a139c
|
[] |
no_license
|
chronosvv/exercise
|
cdab7c09fefc63f8ddd942d0246b277ea644866c
|
9056d7a59af8b4cc447b88089415556000516d85
|
refs/heads/master
| 2020-03-12T23:35:12.191339
| 2018-04-24T14:43:34
| 2018-04-24T14:43:34
| 130,869,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
class Solution(object):
def convert(self, s, numRows):
if not numRows or numRows == 1 or len(s) <= numRows:
return s
dic = {}
count = 0
reverse = False
for i in range(len(s)):
if count < numRows and not reverse:
if count in dic:
dic[count] += s[i]
else:
dic[count] = s[i]
count += 1
if count == numRows:
reverse = True
else:
if count == numRows:
count -= 2
else:
count -= 1
dic[count] += s[i]
if count == 0:
count += 1
reverse = False
rtn = ""
for i in range(numRows):
rtn += dic[i]
return rtn
# dic = {}
# s = "anmksced"
# dic[0] = s[1]
# dic[0] += s[2]
# print(dic)
class Solution2(object):
def convert(self, s, numRows):
if not numRows or numRows == 1 or len(s) <= numRows:
return s
dic = {}
count = 0
reverse = False
for i in range(len(s)):
if count < numRows and not reverse:
if count in dic:
dic[count] += s[i]
else:
dic[count] = s[i]
count += 1
if count == numRows:
reverse = True
else:
if count == numRows:
count -= 2
else:
count -= 1
dic[count] += s[i]
if count == 0:
count += 1
reverse = False
rtn = ""
for i in range(numRows):
rtn += dic[i]
return rtn
|
[
"2662282459@qq.com"
] |
2662282459@qq.com
|
fcd162157a215cb564359d193535f5473bd0fe4d
|
11c68a5008331b94c6dad3b6c99a43355fa7fc3f
|
/test_package/person.py
|
cd3dbe038442c7305b4cacbe099c65779db268de
|
[] |
no_license
|
sean0923/python3-net-ninja
|
27b30268b3a960e2a62c9f64df00626e7c33cc04
|
d6c93829d25034b85c720504a85e856b924d0e21
|
refs/heads/master
| 2020-03-19T13:11:56.378690
| 2018-06-11T01:08:39
| 2018-06-11T01:08:39
| 136,567,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
class Person:
# class level attribute
isHuman = True
def __init__(self, name, age, weight):
# instance attributes
self.name = name
self.age = age
self.weight = weight
def eat_food(self, food):
print(f'{self.name} is eating {food}')
@classmethod
def print_class_method(cls):
print('print class method')
@staticmethod
def render_static_method():
print('I am staticmethod and I do not need arguments')
|
[
"iamseanhong@gmail.com"
] |
iamseanhong@gmail.com
|
9b09e7a645c9a90cda1d2800058bb219cecad885
|
4f98ccf8edab76c10a526b83837c79185b9e0132
|
/applications/persona/views.py
|
4fca4d1b1e3e592c43ebacf0390b02cdf374340b
|
[] |
no_license
|
armandoSandino/empleado_dj
|
f1f8b86187ad9fe7218a5e95afc2eae01edc0772
|
5f652706308746b97a6a230f618062193a5c1dcb
|
refs/heads/master
| 2022-12-03T21:00:29.197532
| 2020-09-01T00:10:36
| 2020-09-01T00:10:36
| 290,268,814
| 2
| 0
| null | 2020-09-01T00:10:38
| 2020-08-25T16:40:08
|
Python
|
UTF-8
|
Python
| false
| false
| 8,875
|
py
|
from django.shortcuts import render
# Nos permite invocar rutas
from django.urls import reverse_lazy
from django.views.generic import (
ListView,
DetailView,
CreateView,
TemplateView,
UpdateView,
DeleteView
)
# models
from .models import Empleado
from django.http import HttpResponseRedirect
#Importar formulario personalizado
from .forms import EmpleadoForm
class InitView(TemplateView):
""" Pagina de inicio """
template_name = 'index.html'
class ListAllEmplados(ListView):
template_name = 'persona/list_all.html'
# Agregar paginacion
# cuando se agrega paginacion genera implicitamente un objeto 'page_obj' y un 'paginator'
paginate_by = 5
# Ordenar resultados
ordering = 'first_name'
# Definir el modelo
# model = Empleado
# Definir variable que nos servira para acceder a la lista de empleados resultante
context_object_name = 'listaEmpleado'
# puede acceder a los datos del modelo mediante 'context_object_name' o 'object_list'
# context_object_name = 'data'
def get_queryset(self):
# obtener valores pasados en un form asegurado con 'csrf_token'
palabra_clave = self.request.GET.get('termino','')
# __icontains busca la existencia de una cadena en otra, como funcionaria un 'like'
return Empleado.objects.filter(
full_name__icontains = palabra_clave
)
class ListaEmpladosAdmin(ListView):
template_name = 'persona/lista_empleados.html'
# Agregar paginacion
# cuando se agrega paginacion genera implicitamente un objeto 'page_obj' y un 'paginator'
paginate_by = 10
# Ordenar resultados
ordering = 'first_name'
# Definir el modelo
model = Empleado
# Definir variable que nos servira para acceder a la lista de empleados resultante
context_object_name = 'listaEmpleado'
class ListByAreaEmpleado(ListView):
""" Listar todos los empleados de un area de la empresa """
# Definir template
template_name = 'persona/list_by_area.html'
# Definir lista de datos a manipular desde la vista
context_object_name = 'listEmployee'
def get_context_data(self, **kwargs):
context = super(ListByAreaEmpleado, self).get_context_data(**kwargs)
context['title'] = 'Empleados en el area de ' + self.kwargs['termino']
return context
# filtrar empleado por departamento
"""
queryset = Empleado.objects.filter(
departamento__short_name ='AC'
)
"""
def get_queryset(self):
# mediante 'self.kwargs['parametro']' podemos recibir parametros pasados por url a una ruta
term = self.kwargs['termino']
lista = Empleado.objects.filter(
departamento__short_name=term
)
return lista
class ListarEmpleadoByKword(ListView):
""" Listar empleado por palabra clave """
template_name = 'persona/by_kword.html'
context_object_name = 'me_data'
def get_queryset(self):
# obtener valores pasados en un form asegurado con 'csrf_token'
palabra_clave = self.request.GET.get('termino','')
return Empleado.objects.filter(
first_name = palabra_clave
)
class ListarEmpleadoByWorks(ListView):
""" Listar Empleado por ocupacion/trabajo """
# Declarar plantilla
template_name = 'persona/filter_by_works.html'
# Añadir paginacion
paginate_by= 5
# Añadir ordenamiento
ordering = 'first_name'
def get_queryset(self):
# Obtenemos el parametro pasado por URL
trabajo = self.kwargs['your_work']
return Empleado.objects.filter(
job=trabajo
)
class ListarHabilidadesEmpleados(ListView):
""" Listar habilidades de un empleado """
template_name = 'persona/habilidades.html'
context_object_name = 'dataEmpleado'
def get_queryset(self):
try:
# Obtener parametro
id_empleado = self.kwargs['key']
# Obtener el empleado
empleado = Empleado.objects.get(id=id_empleado)
# Retornar sus habilidades, es una relacion Many to Many
return empleado.habilidades.all()
except ValueError:
return []
class EmpleadoDetailView(DetailView):
# En DetailView indicar el modelo a trabajar obligatoriamente
model = Empleado
# Declarar plantilla
template_name = 'persona/detail_empleado.html'
# Nos permite enviar variables extras al template, campos que no estan en nuestro modelo
def get_context_data(self, **kwargs):
context = super(EmpleadoDetailView, self ).get_context_data(**kwargs)
# or this
# context = super().get_context_data(**kwargs)
context['title'] = 'Detalle del empleado '
return context
class SuccessViewEmpleadoCreateView(TemplateView):
# Definir template
template_name = 'persona/success_add_employee.html'
class EmpleadoCreateView(CreateView):
# Definir template
template_name = 'persona/add_employee.html'
# Definir el modelo a utilizar es obligatorio
model = Empleado
# Definir campos de nuestro modelo que queremos trabajar
# Pude indicar que se trabaje con todos los campos del modelo, asi
# fields = (__all__)
# Puede indicar determinados compos del modelo con los que trabajar
#fields = ['first_name','last_name','job', 'departamento', 'habilidades', 'avatar']
# fields = ('__all__')
# Definir formulario personalizado a utilizar
form_class = EmpleadoForm
# Definir la ruta de rediccion cuando el registro se agrego correctamente, con '.' se cargara la misma pagina
# success_url = '/success-add-employe'
success_url = reverse_lazy('persona_app:empleados-admin')
# Definir variables extras a pasar al template
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Agregar empleado'
return context
# validar datos a procesar para el modelo
def form_valid(self, form):
# Obtener los valores de los campos en el formulario
# empleado = form.save()
empleado = form.save(commit=False)
# Actualizando el campo full_name
empleado.full_name = empleado.first_name + ' ' + empleado.last_name
# Guadar los cambios
empleado.save()
return super(EmpleadoCreateView, self).form_valid(form)
class EmpleadoUpdateView(UpdateView):
# Definir plantilla
template_name = 'persona/update_employee.html'
# Definir Modelo
model = Empleado
# Definir campos a trabajar
fields = ['first_name','last_name','job', 'departamento', 'habilidades']
# Definir url de redireccion
success_url = reverse_lazy('persona_app:empleados-admin')
# Definir variables extras a pasar al template
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title_page'] = 'Actualizar empleado'
return context
# Realizar algun proceso previo al guardado de datos o validaciones de datos
# Tanto el 'post' como el 'form_valid' pueden realizar la misma tarea si ese es el caso.
# Primeramente cuando se realiza el request a la ruta que implementa el UpdateView se ejeucta el 'post' antes del 'form_valid'
def form_valid(self, form):
# Obtener valores
employee = form.save(commit=False)
# Actualiza determinados campos explicitamente
full = [employee.first_name,' ', employee.last_name]
employee.full_name = ''.join(full)
# Guarda los cambios
employee.save()
return super(EmpleadoUpdateView, self).form_valid(form)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
# Obtener los valores del form desde el request
# print(request.POST), print(request.POST['first_name'])
return super().post(request, *args, **kwargs)
class EmpleadoDeteleView(DeleteView):
# Definir template
template_name = 'persona/delete_employee.html'
# Definir modelo
model = Empleado
# Definir ruta de redireccionamiento
# success_url = reverse_lazy('persona_app:success-employe')
# Definir variables extras a pasar al template
def get_context_data(self, **kwargs):
context = super(EmpleadoDeteleView, self).get_context_data(**kwargs)
context['title_delete'] = 'Borrar empleado'
return context
def delete(self,request,*args,**kwargs):
# Obtener el registro a borrar
self.object = self.get_object()
# success_url = self.get_success_url()
# Ruta de redireccionamiento
success_url = reverse_lazy('persona_app:empleados-admin')
# Borrar el registro
self.object.delete()
return HttpResponseRedirect(success_url)
|
[
"jsandino@aimservices.tech"
] |
jsandino@aimservices.tech
|
acafc2d39a45e3a0656ed56466fa2f8fb1875bc8
|
efa2158f1666ab0fc22fbc16912a95a1f2bfb8ce
|
/pandas_inputoutput.py
|
30cdb3dc86cbddce37cd9c866fdb5a6641b09b6b
|
[] |
no_license
|
parthpm/PowerfulPandas
|
c90a42a780d38e6a85f480c3542134f60480355c
|
80c1e1bb9e1ce22882af5ee8a86aad3ef40c7b23
|
refs/heads/master
| 2020-03-31T03:20:29.382971
| 2018-10-07T18:15:15
| 2018-10-07T18:15:15
| 151,861,289
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
#pandas can read a variety of file types using its pd.read_ methods
import pandas as pd
df=pd.read_csv('example.csv')
print(df)
#reading a excel file
df1=pd.read_excel('Excel_Sample.xlsx',sheet_name='sheet1')
|
[
"noreply@github.com"
] |
parthpm.noreply@github.com
|
c2f77ebb3a5adfe51742f05b51d786453271740b
|
f5a2376a1051c223af80bd6645aaf19a2c78711c
|
/project/Python/User.py
|
1fc7f20a15635d924edc986ec7a226d30c0617ad
|
[] |
no_license
|
matthewjhoward/cmps203
|
6b204fc431e9baa0ac358862e3ea3dfa5ca7bea4
|
0dfbe4029f8e531a3a5b480c05258e10fb79f08e
|
refs/heads/master
| 2020-05-06T20:04:10.250340
| 2019-06-21T01:52:50
| 2019-06-21T01:52:50
| 180,219,279
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
class User:
# Fields
# firstname = None
# lastname = None
# username = None
# password = None
# Constructor
def __init__(self, firstname, lastname, username, password):
self.firstname = firstname
self.lastname = lastname
self.username = username
self.password = password
# Setters
def setFirstname(self, firstname):
self.firstname = firstname
def setLastname(self, lastname):
self.lastname = lastname
def setUsername(self, username):
self.username = username
def setPassword(self, password):
self.password = password
# Getters
def getFirstname(self):
return self.firstname
def getLastname(self):
return self.lastname
def getUsername(self):
return self.username
def getPassword(self):
return self.password
|
[
"alexps2master@sbcglobal.net"
] |
alexps2master@sbcglobal.net
|
667df5b8b5587191f37edc36f70b6425da2df46f
|
da63007b563f46da41bed7c770d3b4163bebb114
|
/divisiors.py
|
0a892d759a6b28a8a85e02188d4fef2ecca3e630
|
[] |
no_license
|
rajabade01/study
|
8bac38d35126a41bef46077b21ad0a9fbf409029
|
24766508b668de944dcaffee79cbcc994a49289c
|
refs/heads/master
| 2020-04-12T19:12:28.515253
| 2020-02-24T07:10:28
| 2020-02-24T07:10:28
| 162,702,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
def accum(s):
# your code
li = list(s)
#output = []
str1 = ""
for i, value in enumerate(s):
final = (value.upper())+(i * value) + "-"
str1 = str1 + final
return str1[:-1]
if __name__ == "__main__":
string = "abcdef"
output = accum(string)
print(output)
|
[
"noreply@github.com"
] |
rajabade01.noreply@github.com
|
be38242ac0b77a4b5205087ba42cf818e451a4d1
|
3ed65ce239f2db5de08b5c45caa97525a7379beb
|
/src/websocketpp_02/examples/echo_server/SConscript
|
abaa7c030c78827750ab09806c38a3b00872a1c8
|
[
"BSD-3-Clause",
"MIT-Wu",
"ISC",
"BSL-1.0",
"MIT"
] |
permissive
|
moorecoin/MooreCoinService
|
9466aac3683ccc52e7ea89906e2bc6c90dae9922
|
6de5f5032972147c0d43c3ae48a088f9e1fa7d28
|
refs/heads/master
| 2021-01-10T06:05:51.501738
| 2015-11-14T13:18:47
| 2015-11-14T13:18:47
| 46,175,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
## echo_server
##
import('env')
import('boostlibs')
import('wslib')
import('platform_libs')
localenv = env.clone ()
sources = ["echo_server.cpp"]
libs = [wslib, platform_libs] + boostlibs(['system',
'date_time',
'regex',
'thread'])
prg = localenv.program('echo_server', sources, libs = libs)
return('prg')
|
[
"mooreccc@foxmail.com"
] |
mooreccc@foxmail.com
|
|
f8ca460657ca11ed2132d963217761141a03d7ad
|
44dbb043e52f00c9a797b1bea8f1df50dd621842
|
/builtin-eval-example-2.py
|
558669a927a77e9ef2f20d4d97d2669c3e51bef6
|
[] |
no_license
|
peterdocter/standardmodels
|
140c238d3bef31db59641087e3f3d5413d4baba1
|
7addc313c16b416d0970461998885833614570ad
|
refs/heads/master
| 2020-12-30T16:59:30.489486
| 2016-12-13T06:32:03
| 2016-12-13T06:32:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
print eval("__import__('os').getcwd()")
print eval("__import__('os').remove('file')",{'__builtins__':{}})
|
[
"415074476@qq.com"
] |
415074476@qq.com
|
c488091f73195a4278f7b079f1cae84b9bd6af6b
|
0eb5c5a8324200affb0ddc076c1115e802415595
|
/练习/2.糗事百科.py
|
5ad9139f35654a54d8d3af5c0531b18dace7bdeb
|
[] |
no_license
|
jiangsy163/pythonProject
|
5b7986fb5e89943fc949301c22d03e97bc34b41d
|
b27f0a5a09ca36063fb45d61ca6ebd06a494ea67
|
refs/heads/master
| 2023-05-12T03:42:45.885487
| 2021-06-04T09:21:53
| 2021-06-04T09:21:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
import random
import time
import requests
from utils.headers import headers_with_ChromeUA
for page in range(1,21):
time.sleep(random.randint(2,5))
import requests
requests.adapters.DEFAULT_RETRIES = 5 # 增加重连次数
s = requests.session()
s.keep_alive = False # 关闭多余连接
url = f"https://m2.qiushibaike.com/article/list/text?page={page}&count=12"
s.get(url) # 你需要的网址l.
response = requests.get(url,headers_with_ChromeUA,verify=False)
for item in response.json()["items"]:
print(item)
|
[
"1247371788@qq.com"
] |
1247371788@qq.com
|
653b4f86e1043a6ee11d346d1b4a9a030501f633
|
87e90dff9ff6f0f7af3a2d0f4c2464a9d4dca337
|
/Do-it-first-python/mission/6-01.py
|
6200b9a244c644f4202e49ab55171ab8ebf4f9be
|
[
"MIT"
] |
permissive
|
siyoon210/Python-Practice
|
462b0a0f33488bc8434f6328fd55f03e30c2d4f7
|
778922a8be2faaa564915bcbcab761d39753b1f8
|
refs/heads/master
| 2022-08-03T21:27:43.186667
| 2020-05-31T11:59:18
| 2020-05-31T11:59:18
| 267,727,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
jeju_word = ['혼저 옵서', '지꺼지게', '놀당 갑서양']
# Key = 제주 방언, Value = 표준어인 딕셔너리를 만들어 주세요
jeju_dict = {'혼저 옵서':'어서오세요', '지꺼지게':'즐겁게', '놀당 갑서양':'놀다 가세요'}
# jeju_word에 담긴 제주 방언의 표준어를 한 줄에 하나씩 출력하는 반복문을 완성하세요
for i in jeju_word:
print(jeju_dict[i])
|
[
"siyoon210@gmail.com"
] |
siyoon210@gmail.com
|
3088c132d5c5c3a5203113ab711859901af846d5
|
25249760f40553495f7281a5f6c94f5bbba18e85
|
/Numpy_notes/numpy_broadcasting.py
|
6420d517543417faea2f874a8ea496247ba5e43a
|
[] |
no_license
|
mandarspringboard/notes
|
98d592b26a3eba4280e6a94abcfb93dba560feec
|
28b5da39b90773b378303903cc46326b04dfa790
|
refs/heads/master
| 2023-08-11T05:34:33.979449
| 2021-09-30T07:26:05
| 2021-09-30T07:26:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,551
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 14:22:55 2020
@author: Gokhale
"""
# the * operator in numpy is elementwise
# use the dot method or the @ operator for matrix products
# what happens when we try to take * products when array sizes don't match?
# answer: broadcasting!
# other elementwise operations, +,-,/ are also broadcast
# https://numpy.org/devdocs/user/basics.broadcasting.html#basics-broadcasting
# two dimensions are compatible when: 1) they are equal
# 2) one of them is 1
# also
# https://numpy.org/devdocs/user/quickstart.html#broadcasting-rules
import numpy as np
import itertools
import sys
# Case 1
a = np.arange(1,7).reshape(3,2)
b = np.arange(1,3).reshape(2)
c = a*b
# a and b do not have the same dimensions. So how is a*b defined?
# step 1: arrange the arrays so that dimensions are right justified
# i.e. a.shape = (3,2)
# b.shape = ( 2)
# step 2: add a 'fake' first dimension to b to make it two dimensional
# b.shape = (1,2)
# since we are not increasing the number of elements
b=b.reshape(1,2)
# step 3: expand the first dimension to 3
# the entries (0,1), (0,2) in b are already defined
# we define the entries (i,1) and (i,2) to be equal to (0,1) and (0,2)
# for i > 0
# to make things simple we can define a array d to take the place
# of expanded b
d = np.zeros(6).reshape(3,2)
for i,j in itertools.product(range(3),range(2)):
d[i][j] = b[0][j]
e = a*d
#print('c=\n',c)
#print('e=\n',e)
#print('norm(c-e)=',np.linalg.norm(c-e))
#print('-'*50)
c = a+b
e = a+d
#print('c=\n',c)
#print('e=\n',e)
#print('norm(c-e)=',np.linalg.norm(c-e))
#print('-'*50)
#sys.exit()
# take a more general case
a = np.arange(48).reshape(8,1,6,1)
b = np.arange(35).reshape(7,1,5)
c = a*b
# as before, we right justify the matrices
# a.shape = (8,1,6,1)
# b.shape = ( 7,1,5)
# add a dummy dimension to b
b=b.reshape(1,7,1,5)
# now that the number of dimensions are the same
# a.shape = (8,1,6,1)
# b.shape = (1,7,1,5)
# we need to expand a and b
# we define two expanded arrays
a_ex = np.zeros(8*7*6*5).reshape(8,7,6,5)
b_ex = np.zeros(8*7*6*5).reshape(8,7,6,5)
# using the power of numpy,the previous expansion algorithm can be written as
a_ex[:,:,:,:] = a[:,0:1,:,0:1]
b_ex[:,:,:,:] = b[0:1,:,0:1,:]
# using a[:,0,:,0] will not work
# this itself is also referred to as broadcasting
# one can change 0:1 to ,0:2, 0:3 infact 0:n ,n>=1 because only one element
# exists in that range
a_ex_manual = np.zeros(8*7*6*5).reshape(8,7,6,5)
b_ex_manual = np.zeros(8*7*6*5).reshape(8,7,6,5)
# nested for loops can be simplified using itertools
for i,j,k,l in itertools.product(range(8),range(7),range(6),range(5)):
a_ex_manual[i,j,k,l]=a[i][0][k][0]
b_ex_manual[i,j,k,l]=b[0][j][0][l]
print('norm(a_ex - a_ex_manual) =',np.linalg.norm(a_ex - a_ex_manual))
print('norm(b_ex - b_ex_manual) =',np.linalg.norm(b_ex - b_ex_manual))
c_check = a_ex*b_ex
print('norm(c-c_check) =',np.linalg.norm(c-c_check))
# check 2 - explicitly perform the elementwise product
# the dot product is over common dimensions only
c_check_2 = np.zeros(8*7*6*5).reshape(8,7,6,5)
for i,j,k,l in itertools.product(range(8),range(7),range(6),range(5)):
c_check_2[i,j,k,l]=a_ex[i,j,k,l]*b_ex[i,j,k,l]
print('norm(c-c_check_2) =',np.linalg.norm(c-c_check_2))
|
[
"gokhalen@gmail.com"
] |
gokhalen@gmail.com
|
6b05b22a7ebab49d2eccf8fa2c311828e7cb5235
|
02870a6deae799b1689afe75885aaa841f8a912e
|
/mongo.py
|
215688717120fd0d27f07c64754567a9d91ff725
|
[
"MIT"
] |
permissive
|
tbkraf08/xml2json
|
c3c0babafe68585437633bc4be02d5b771067011
|
9c3ff9f15f069e87252d120b647c88d333d7f803
|
refs/heads/master
| 2021-01-15T20:52:33.985200
| 2013-08-22T13:16:34
| 2013-08-22T13:16:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,674
|
py
|
#!/usr/bin/env python
# Author: Toma Kraft
# Date: July 9th, 2013
import logging
import pymongo
import sys
import traceback #traceback.print_exc(file=sys.stdout)
import nltk
import csv
import types
import math
import datetime
from dateutil.relativedelta import relativedelta
def removeNonAscii(s): return "".join(i for i in s if ord(i)<128)
class MongoBase(object):
def __init__(self, host=None, port=None, db_name=None, verbose=True):
#logging.basicConfig(filename='logs/mongo.log',level=logging.DEBUG)
self.mTag = '[MongoBase]'
self.verbose = verbose
if not host:
host = 'localhost'
if not port:
port = '27017'
if not db_name:
db_name = 'default'
# set class vars
self.host = host
self.port = port
self.db_name = db_name
mongodb_uri = 'mongodb://'+host+':'+port
try:
# pymongo objects
self.conn = pymongo.Connection(mongodb_uri)
self.db = self.conn[self.db_name]
if verbose:
print self.mTag,'successfully connected to:', mongodb_uri, 'using db:', db_name
logging.info('[CREATED] '+self.__str__())
except:
print self.mTag,'[CONNECTION ERROR] [__init__]'
traceback.print_exc(file=sys.stdout)
self.conn = None
def close(self):
if self.conn:
self.conn.disconnect()
if self.verbose:
print self.mTag, 'Closed connection to', self.host,'db:',self.db_name
def setMtag(self, mTag):
self.mTag = mTag
def __str__(self):
host = self.host
port = self.port
db_name = self.db_name
mTag = self.mTag
return mTag+' object: '+'mongodb://'+host+':'+port+' db: '+db_name
def __repr__(self):
return self.__str__()
def __exit__(self):
self.close()
def __del__(self):
self.__exit__()
class MongoDict(MongoBase):
def __init__(self, dictName, persistant=True, host=None, port=None, db='dict', verbose=False):
# All MongoDicts stored in db='dict' unless specified otherwise
MongoBase.__init__(self, host, port, db, verbose)
self.setMtag('[MongoDict]')
self.persistant = persistant
self.dictName = dictName
#self.cache = {}
#self.cache[dictName] = {}
def __call__(self, newName):
self.dictName = newName
#if not newName in self.cache:
# self.cache[newName] = {}
def __setitem__(self, key, value):
name = self.dictName
mTag = self.mTag
db = self.db
conn = self.conn
verbose = self.verbose
#cache = self.cache
if conn:
value['_id'] = key
db[name].save(value)
# save for future reference
#cache[name][key] = value
if verbose:
print mTag, '[__setitem__] id:', key, 'doc:', value
else:
print mTag, '[CONNECTION ERROR] [__getitem__]'
def __getitem__(self, key):
name = self.dictName
mTag = self.mTag
db = self.db
conn = self.conn
verbose = self.verbose
#cache = self.cache
if conn:
# lookup the cache first
#if name in cache:
#if key in cache[name]:
#return cache[name][key]
#else:
#cache[name] = {}
# key not in cache, load from db if availble
result = list(db[name].find({'_id':key}).limit(1))
if result:
if verbose:
print mTag, '[__getitem__] _id:',key, 'doc:', result[0]
# so that furture request for key will be faster
#cache[name][key] = result[0]
return result[0]
else:
if verbose:
print mTag, '[__getitem__] _id:', key, 'not found'
return False
else:
print mTag, '[CONNECTION ERROR] [__getitem__]'
def __iter__(self):
name = self.dictName
mTag = self.mTag
db = self.db
conn = self.conn
verbose = self.verbose
if conn:
results = list(db[name].find({},{'_id':1}))
if results:
for doc in results:
yield doc['_id']
else:
yield
def __len__(self):
name = self.dictName
mTag = self.mTag
db = self.db
conn = self.conn
verbose = self.verbose
if conn:
return db[name].count()
else:
return 0
def __contains__(self, item):
name = self.dictName
mTag = self.mTag
db = self.db
conn = self.conn
verbose = self.verbose
#cache = self.cache
if conn:
#if name in cache:
#if item in cache[name]:
#return True
#else:
#cache[name] = {}
result = list(db[name].find({'_id':item}).limit(1))
if result:
# already made a request to db, might aswell save the result
#cache[name][item] = result[0]
return True
else:
return False
else:
return False
def __exit__(self):
name = self.dictName
mTag = self.mTag
db = self.db
conn = self.conn
verbose = self.verbose
persistant = self.persistant # if data is persistant leave intact, otherwise drop collection after deletion
if conn:
if not persistant:
db[name].drop()
if verbose:
print mTag, '[__exit__] dropped collection:', name
self.close()
def add(self, doc):
name = self.dictName
mTag = self.mTag
db = self.db
conn = self.conn
verbose = self.verbose
if conn:
print self.mTag, 'added:', doc,'\n'
db[name].insert(doc)
# verbose is set to true
class Mongo(MongoDict):
def __init__(self, host=None, port=None, db=None, default_collection='default'):
# dict name, persistant, host, port, db, verbose
MongoDict.__init__(self, default_collection, True, host, port, db, True)
self.setMtag('[Mongo]')
def __getattr__(self, name):
mTag = self.mTag
db = self.db
conn = self.conn
verbose = self.verbose
if not conn:
logging.debug(self.__str__()+' -- no conn!')
# sets the collection name
self(name)
def handle_doc(_id=None, doc=None, update=None, bulk=None, agg=None):
# add a document to mongo
if doc:
if not _id:
_id = doc['_id']
self.__setitem__(_id, doc)
return
# retreives a document from mongo
if _id:
return self.__getitem__(_id)
# updates the _id given the update mongo query
if update:
if '_id' in update:
_id = update['_id']
del update['_id']
query = {
'_id':_id
}
self.db[name].update(query, update, upsert=True) # True for upsert
return
# need to increment the fields passed in
# adds a list of documents
if bulk:
self.db[name].insert(bulk)
return
# should be a list of dictionaries specifing the aggregation pipeline
if agg:
return self.db[name].aggregate(agg)
return handle_doc
def main(args):
m = Mongo('caprica.uncc.edu','27017','xml', 'wiki')
if __name__ == '__main__':
argv = sys.argv[1:]
args = {}
if argv:
args = {}
# there should be an specifier for each parameter
# so there should always be an even number of args
# set the first item to be the specifier of the dict
# and the second is the value
if len(argv) % 2 == 0:
for i in range(len(argv)-1):
args[argv[i]] = argv[i+1]
main(args)
|
[
"tkraft3@uncc.edu"
] |
tkraft3@uncc.edu
|
c1856835b721605747ed512749780fb027a7b384
|
51ec23083c01ad26f489ca0b033de0743286a93f
|
/app/routers/game.py
|
ea007c914809f48e84d20e18df8c2ed98f639fcc
|
[] |
no_license
|
jads-dev/joegamevoting
|
ea8c292569b9976ea30c312a8b3ff6f42d1f0019
|
5ea768b708211457bd29b28e5aaacdf5c0436753
|
refs/heads/master
| 2023-03-14T15:15:42.698967
| 2021-03-10T23:59:31
| 2021-03-10T23:59:31
| 346,521,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,671
|
py
|
from os import name
from fastapi import Depends, APIRouter
from app.models.game import get_game_search, get_game, get_game_platforms, calc_votes, vote_game, get_game_voters, pitch_game, get_game_pitches
from app.routers.auth import User, get_userid, get_optional_current_user
from pydantic import BaseModel, constr
class ParamsVote(BaseModel):
upvote: bool
poll: int = 0
class ParamsPitch(BaseModel):
pitch: constr(max_length=2000)
router = APIRouter()
@router.get("/game/search")
async def _get_games(search_term: str):
return get_game_search(search_term)
@router.get("/game/{id}")
async def _get_game(id: int, user_id: int = Depends(get_userid)):
return get_game(id, user_id)
@router.get("/game/{id}/voters")
async def _get_game_voters(id: int):
return get_game_voters(poll=0, game_id=id)
@router.get("/game/{id}/pitches")
async def _get_game_pitches(id: int):
return get_game_pitches(game_id=id)
@router.post("/game/{id}/vote")
async def _vote_game(id: int, params: ParamsVote, current_user: User = Depends(get_optional_current_user)):
if current_user["can_vote"]:
vote_game(params.poll, id, current_user["user_id"], params.upvote)
await calc_votes()
@router.post("/game/{id}/pitch")
async def _pitch_game(id: int, params: ParamsPitch, current_user: User = Depends(get_optional_current_user)):
if current_user["can_vote"]:
pitch_game(id, current_user["user_id"], params.pitch)
await calc_votes()
@router.get("/game/platforms/{id}")
async def _get_game_platforms(id: int):
return get_game_platforms(id)
@router.get("/game_test")
async def test_shit():
await calc_votes()
|
[
"me@nodja.com"
] |
me@nodja.com
|
7de00d560f70d9f34b9a9d1ecf15a69abf3af1a3
|
71b15561bde017c0dd4903e42f6b38312c5b0f40
|
/dateandtimelibrary/datetimedemo.py
|
a76ae7cd8ed638ee7123abbc8c6521a181f1a401
|
[] |
no_license
|
Aditya-Lamaniya/PythonLearning
|
1fc0d68f56bef045f1076706be047976fbcace12
|
fa8500eaf9d7b67129829b4801ebd71284f99a0c
|
refs/heads/main
| 2023-05-27T00:05:24.401305
| 2021-06-19T12:28:04
| 2021-06-19T12:28:04
| 378,397,766
| 0
| 0
| null | 2021-06-19T12:18:41
| 2021-06-19T11:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 267
|
py
|
import time,datetime
epochseconds=time.time()
print(epochseconds)
t=time.ctime(epochseconds)
print(t)
dt=datetime.datetime.today()
print('current date : {}/{}/{}'.format(dt.day,dt.month,dt.year))
print('current time : {}:{}:{}'.format(dt.hour,dt.minute,dt.second))
|
[
"lamaniya.aditya@gmail.com"
] |
lamaniya.aditya@gmail.com
|
ee3b63a377f084f45bb320521be752079041b9e6
|
a0a787923477b8c944b0973c932aaef379b573f5
|
/DualBlock_fc.py
|
6dfef3549f6adf977b40bf7ed30925406c84441d
|
[] |
no_license
|
bdus/Action-Recognition
|
553e0b91ce54c0b049c826273b8c16df733075a1
|
e2081963afbb89c4db12034f0168377d0369b789
|
refs/heads/master
| 2022-10-15T08:56:23.448630
| 2020-06-16T14:34:52
| 2020-06-16T14:34:52
| 218,713,321
| 1
| 0
| null | 2020-01-22T10:49:04
| 2019-10-31T07:57:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,998
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2020-2-5 18:17:19
@author: bdus
this is the model for idea1 experiment 4
video frames are take part into foregrounds and backgrounds
then feed into a dual stream network respectively
the network will fusion the result in difference method
the input is bgs and fgs frame
"""
import os
import mxnet as mx
from mxnet import init
from mxnet.gluon import nn
from mxnet.gluon.nn import HybridBlock
from model_zoo import get_model as myget
__all__ = ['DualBlock','get_dualnet']
class DualBlock(HybridBlock):
def __init__(self,nclass,num_segments,fgs_model,bgs_model,fusion_method='avg',num_crop=1,input_channel=3,dropout_ratio=0.9, init_std=0.001,feat_dim=4096,**kwargs):
super(DualBlock, self).__init__(**kwargs)
self.nclass = nclass
self.num_segments = num_segments
self.feat_dim = feat_dim
self.dropout_ratio=dropout_ratio
self.init_std=init_std
self.num_crop=num_crop
self.fusion_method = fusion_method
self.fgs_model = fgs_model
self.bgs_model = bgs_model
print('fusion_method:',fusion_method)
with self.name_scope():
self.pretrained_model_bgs = myget(name=self.bgs_model, nclass=self.nclass, num_segments=self.num_segments,input_channel=input_channel,pretrained=True)
self.dp = nn.Dropout(rate=self.dropout_ratio)
self.fc = nn.HybridSequential(prefix='')
self.fc.add( nn.Dense(units=512, in_units=512+1024,
weight_initializer=init.Normal(sigma=self.init_std)),
nn.Dense(units=self.nclass, in_units=512,
weight_initializer=init.Normal(sigma=self.init_std)) )
self.pretrained_model_fgs = myget(name=self.fgs_model,nclass=self.nclass,num_segments=self.num_segments,input_channel=input_channel,pretrained=True)
self.fc.initialize()
def hybrid_forward(self, F, x_bgs, x_fgs):
#print(x_bgs.shape)#(80, 3, 224, 224)
#print(x_fgs.shape)#(80, 3, 224, 224)
#x_bgs = self.pretrained_model_bgs(x_bgs)
#x_fgs = self.pretrained_model_fgs(x_fgs)
if 'resnet18_v1b' in self.bgs_model:
x_bgs = self.pretrained_model_bgs.conv1(x_bgs)
x_bgs = self.pretrained_model_bgs.bn1(x_bgs)
x_bgs = self.pretrained_model_bgs.relu(x_bgs)
x_bgs = self.pretrained_model_bgs.maxpool(x_bgs)
x_bgs = self.pretrained_model_bgs.layer1(x_bgs)
x_bgs = self.pretrained_model_bgs.layer2(x_bgs)
x_bgs = self.pretrained_model_bgs.layer3(x_bgs)
x_bgs = self.pretrained_model_bgs.layer4(x_bgs)
x_bgs = self.pretrained_model_bgs.avgpool(x_bgs)
x_bgs = self.pretrained_model_bgs.flat(x_bgs)
x_bgs = self.pretrained_model_bgs.drop(x_bgs)
#print('res:',x_bgs.shape) (160, 512)
x_bgs = F.reshape(x_bgs, shape=(-1, self.num_segments * self.num_crop, 512))
x_bgs = F.mean(x_bgs, axis=1)
else:
raise ValueError('fusion_method not supported')
if 'eco_resnet18_v1b' in self.fgs_model:
x_fgs = self.pretrained_model_fgs.conv1(x_fgs)
x_fgs = self.pretrained_model_fgs.bn1(x_fgs)
x_fgs = self.pretrained_model_fgs.relu(x_fgs)
x_fgs = self.pretrained_model_fgs.maxpool(x_fgs)
x_fgs = self.pretrained_model_fgs.layer1(x_fgs)
x_fgs = self.pretrained_model_fgs.layer2(x_fgs)
x_fgs = x_fgs.reshape((-1,self.num_segments,128,28,28))
x_fgs = x_fgs.transpose(axes=(0,2,1,3,4))
x_fgs = self.pretrained_model_fgs.features_3d(x_fgs)
x_fgs = F.flatten(x_fgs)
else:
raise ValueError('fusion_method not supported')
#print(x_bgs.shape) (20, 512)
#print(x_fgs.shape) (20, 1024)
x = F.concat(x_bgs,x_fgs,dim=1)
#print('x:',x.shape)
#if self.fusion_method == 'avg':
#x = F.mean(x, axis=1)
#elif self.fusion_method == 'max':
#x = F.max(x,axis=1)
#else:
#raise ValueError('fusion_method not supported')
#print('x:',x.shape)
x = self.dp(x)
x = self.fc(x)
return x
def dualnet_avg(fgs_model,bgs_model,fgs_path,bgs_path,**kwargs):
return get_dualnet(fgs_model,bgs_model,fgs_path,bgs_path,fusion_method='avg',**kwargs)
def dualnet_max(fgs_model,bgs_model,fgs_path,bgs_path,**kwargs):
return get_dualnet(fgs_model,bgs_model,fgs_path,bgs_path,fusion_method='max',**kwargs)
def get_dualnet(fgs_model,bgs_model,fgs_path,bgs_path, **kwargs):
net = DualBlock(fgs_model=fgs_model,bgs_model=bgs_model,**kwargs)
print(bgs_path,',',os.path.exists(bgs_path))
#net.pretrained_model_bgs.load_parameters(bgs_path)
#net.pretrained_model_fgs.load_parameters(fgs_path)
return net
|
[
"rovingthrough@163.com"
] |
rovingthrough@163.com
|
f0ee178adb59a9cacbdca23a12c8f609140aea58
|
42241260923cd24dac14a908be892064e9d29158
|
/training.py
|
5044d02852e37f1a1135f96e440c36d48df01bba
|
[] |
no_license
|
yaoweihu/ImageClassification
|
ce1ae7eb9724d8ab33ade4c1febc5bf80a8c9fc0
|
c91ed04a7781b8159aeea4e7ae06a7dc920708b0
|
refs/heads/master
| 2020-09-02T09:26:33.410827
| 2019-11-18T05:25:00
| 2019-11-18T05:25:00
| 219,190,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
import torch
from metrics import accuracy
from utils import AverageMeter
def train(train_loader, model, criterion, optimizer):
losses = AverageMeter()
top1 = AverageMeter()
model.train()
for input, target in train_loader:
if torch.cuda.is_available():
input, target = input.cuda(), target.cuda()
output = model(input)
loss = criterion(output, target)
prec = accuracy(output, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec.item(), input.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
return top1.avg, losses.avg
def validate(val_loader, model, criterion):
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
with torch.no_grad():
for input, target in val_loader:
if torch.cuda.is_available():
input, target = input.cuda(), target.cuda()
output = model(input)
loss = criterion(output, target)
prec = accuracy(output, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec.item(), input.size(0))
return top1.avg, losses.avg
|
[
"huyaowei1992@gmail.com"
] |
huyaowei1992@gmail.com
|
b306ba76b5775d50c493b8e1fb27a87afcf4b412
|
c6fa5a9806d219b9fb56aef7e08c07cdcff4772f
|
/model.py
|
132879e39dac8130b423e45a1caae3dc5e260952
|
[
"MIT"
] |
permissive
|
mengkunzhao/tf2-unet
|
c0b84bc9af9c660382c232b0a43acbc864ce3ff5
|
552fba0d234a69a40c11447aff59fde2ddd11d29
|
refs/heads/master
| 2022-03-15T11:32:03.390877
| 2019-12-05T12:13:38
| 2019-12-05T12:13:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,460
|
py
|
import numpy as np
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras import backend as keras
def unet(input_size=(64, 80, 1), num_classes=10, use_sep_conv=False, use_deconv=False):
inputs = Input(input_size)
if use_sep_conv:
conv1 = Conv2D(8, 1, padding='same')(inputs)
conv1 = Conv2D(16, 1, padding='same',
kernel_initializer='he_normal')(DepthwiseConv2D(3,
padding='same',
kernel_initializer='he_normal')(conv1))
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
conv1 = Conv2D(16, 1, padding='same',
kernel_initializer='he_normal')(DepthwiseConv2D(3,
padding='same',
kernel_initializer='he_normal')(conv1))
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
else:
conv1 = Conv2D(8, 3, padding='same', kernel_initializer='he_normal')(inputs)
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
conv1 = Conv2D(8, 3, padding='same', kernel_initializer='he_normal')(conv1)
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
if use_sep_conv:
conv2 = Conv2D(20, 1, padding='same',
kernel_initializer='he_normal')(DepthwiseConv2D(3,
padding='same',
kernel_initializer='he_normal')(pool1))
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
conv2 = Conv2D(20, 1, padding='same',
kernel_initializer='he_normal')(DepthwiseConv2D(3,
padding='same',
kernel_initializer='he_normal')(conv2))
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
else:
conv2 = Conv2D(12, 3, padding='same', kernel_initializer='he_normal')(pool1)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
conv2 = Conv2D(12, 3, padding='same', kernel_initializer='he_normal')(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
if use_sep_conv:
conv3 = Conv2D(32, 1, padding='same',
kernel_initializer='he_normal')(DepthwiseConv2D(3,
padding='same',
kernel_initializer='he_normal')(pool2))
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
conv3 = Conv2D(32, 1, padding='same',
kernel_initializer='he_normal')(DepthwiseConv2D(3,
padding='same',
kernel_initializer='he_normal')(conv3))
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
else:
conv3 = Conv2D(16, 3, padding='same', kernel_initializer='he_normal')(pool2)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
conv3 = Conv2D(16, 3, padding='same', kernel_initializer='he_normal')(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
if use_sep_conv:
conv4 = Conv2D(32, 1, padding='same',
kernel_initializer='he_normal')(DepthwiseConv2D(3,
padding='same',
kernel_initializer='he_normal')(pool3))
conv4 = BatchNormalization()(conv4)
conv4 = Activation('relu')(conv4)
conv4 = Conv2D(32, 1, padding='same',
kernel_initializer='he_normal')(DepthwiseConv2D(3,
padding='same',
kernel_initializer='he_normal')(conv4))
conv4 = BatchNormalization()(conv4)
conv4 = Activation('relu')(conv4)
else:
conv4 = Conv2D(16, 3, padding='same', kernel_initializer='he_normal')(pool3)
conv4 = BatchNormalization()(conv4)
conv4 = Activation('relu')(conv4)
conv4 = Conv2D(16, 3, padding='same', kernel_initializer='he_normal')(conv4)
conv4 = BatchNormalization()(conv4)
conv4 = Activation('relu')(conv4)
if use_sep_conv:
up5 = Conv2D(48, 1, padding='same', kernel_initializer='he_normal')(DepthwiseConv2D(
3, padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2),
interpolation='bilinear')(conv4)))
elif use_deconv:
up5 = Conv2DTranspose(12, 3, 2, activation='relu', padding='same', kernel_initializer='he_normal')((conv4))
else:
up5 = Conv2D(12, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2), interpolation='bilinear')(conv4))
up5 = BatchNormalization()(up5)
up5 = Activation('relu')(up5)
merge5 = Concatenate(axis=3)([conv3, up5])
conv5 = Conv2D(12, 3, padding='same', kernel_initializer='he_normal')(merge5)
conv5 = BatchNormalization()(conv5)
conv5 = Activation('relu')(conv5)
conv5 = Conv2D(12, 3, padding='same', kernel_initializer='he_normal')(conv5)
conv5 = BatchNormalization()(conv5)
conv5 = Activation('relu')(conv5)
if use_sep_conv:
up6 = Conv2D(36, 1, padding='same', kernel_initializer='he_normal')(DepthwiseConv2D(
3, padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2),
interpolation='bilinear')(conv5)))
elif use_deconv:
up6 = Conv2DTranspose(12, 3, 2, padding='same', kernel_initializer='he_normal')((conv5))
else:
up6 = Conv2D(12, 3, padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2), interpolation='bilinear')(conv5))
up6 = BatchNormalization()(up6)
up6 = Activation('relu')(up6)
merge6 = Concatenate(axis=3)([conv2, up6])
conv6 = Conv2D(12, 3, padding='same', kernel_initializer='he_normal')(merge6)
conv6 = BatchNormalization()(conv6)
conv6 = Activation('relu')(conv6)
conv6 = Conv2D(12, 3, padding='same', kernel_initializer='he_normal')(conv6)
conv6 = BatchNormalization()(conv6)
conv6 = Activation('relu')(conv6)
if use_sep_conv:
up7 = Conv2D(24, 1, padding='same', kernel_initializer='he_normal')(DepthwiseConv2D(
3, padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2),
interpolation='bilinear')(conv6)))
elif use_deconv:
up7 = Conv2DTranspose(8, 3, 2, padding='same', kernel_initializer='he_normal')((conv6))
else:
up7 = Conv2D(8, 3, padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2), interpolation='bilinear')(conv6))
up7 = BatchNormalization()(up7)
up7 = Activation('relu')(up7)
merge7 = Concatenate(axis=3)([conv1, up7])
conv7 = Conv2D(8, 3, padding='same', kernel_initializer='he_normal')(merge7)
conv7 = BatchNormalization()(conv7)
conv7 = Activation('relu')(conv7)
conv7 = Conv2D(8, 3, padding='same', kernel_initializer='he_normal')(conv7)
conv7 = BatchNormalization()(conv7)
conv7 = Activation('relu')(conv7)
conv8 = Conv2D(num_classes, 1, activation='softmax')(conv7)
model = Model(inputs=inputs, outputs=conv8)
return model
|
[
"zhengankun@163.com"
] |
zhengankun@163.com
|
a5a2b893eee8228ae7d67d54efdd3555d3ba7b7d
|
dedf3f08e8fe8fbf3445e40ad85f4ea53f92b7f7
|
/his_diary/diary/admin.py
|
5babb89ae834920ad578cfb962ee5c3b4957078f
|
[] |
no_license
|
timo7656/New-Project
|
606fe8116012b399653f3edef221c1c08f049dc9
|
3b01f628fe6eb2f0c846b166baf31c5580025fbf
|
refs/heads/master
| 2021-01-23T03:54:33.573531
| 2017-03-25T08:32:53
| 2017-03-25T08:32:53
| 86,137,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
from django.contrib import admin
from .models import Article, Comment
# Register your models here.
#admin 항목들 추가!!
@admin.register(Article, Comment)
class DiaryAdmin(admin.ModelAdmin):
pass
|
[
"Sungjunwg@choeseongjun-ui-MacBook-Pro.local"
] |
Sungjunwg@choeseongjun-ui-MacBook-Pro.local
|
dedf38ac505de5262415143ffbc2b42322fedceb
|
afb8489bf5c16d47472eedf017f2d76016d6c386
|
/blog/models.py
|
32fbf436f11896956e5a3346f5704dbfd1403c9b
|
[] |
no_license
|
FugaHosomi/my-first-blog
|
8bf04a07136ed5b30dc2995ac48acafad441ca2e
|
652be98f118f93e72af25d1d4a1c21c1b3be9855
|
refs/heads/master
| 2023-08-17T18:03:24.263968
| 2021-10-12T13:05:42
| 2021-10-12T13:05:42
| 396,006,063
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
from django.conf import settings
from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
[
"m.hosomi@main.domain.local"
] |
m.hosomi@main.domain.local
|
e1a5f9f329b21a96c5fd92d94a2f4f61246a1101
|
952c71fea9dbab69aba63f56a4a6d1da538fdba4
|
/accounts/migrations/0006_auto_20180506_2231.py
|
3dea182e89fce70e685572099b0c318e1b8f12e8
|
[] |
no_license
|
Jd0824um/Django--VideoStreamingSite
|
40e29a54e851da80ebbdc60027242f4a150d0498
|
69df77df7d64b2c8a853fb988aa7146961dc3d36
|
refs/heads/master
| 2022-12-12T23:38:11.015258
| 2018-05-08T03:31:51
| 2018-05-08T03:31:51
| 131,908,246
| 1
| 0
| null | 2022-11-22T02:28:55
| 2018-05-02T21:34:36
|
Python
|
UTF-8
|
Python
| false
| false
| 516
|
py
|
# Generated by Django 2.0.5 on 2018-05-07 03:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20180506_2205'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"jd0824um@go.minneapolis.edu"
] |
jd0824um@go.minneapolis.edu
|
a609e0105364c9a594fdd0f5bcc9bc447c9635de
|
f094dad8af03ddb18a4357b09105e55f1362f040
|
/FlowersML/utils.py
|
609ca247b317984902ddd407ec97a1efceb98a42
|
[] |
no_license
|
hmilien/UdacityML
|
5fa301561df3d5810142529ec80a5297286f3142
|
7b661f2a45c0ecc42711e83ba022936c8744d9d7
|
refs/heads/master
| 2020-12-31T11:13:45.346640
| 2020-04-03T16:35:26
| 2020-04-03T16:35:26
| 239,014,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,473
|
py
|
import torch
from torch.autograd import Variable
from torch import nn
from torch import optim
from torchvision import datasets, transforms,models
from PIL import Image
import numpy as np
import json
def load_data(isTrainMode, dirPath):
if(isTrainMode):
dataTransforms = transforms.Compose([transforms.Resize([224,224]),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
else:
dataTransforms = transforms.Compose([transforms.Resize([224,224]),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
# TODO: Load the datasets with ImageFolder
imageData = datasets.ImageFolder(dirPath, transform=dataTransforms)
if(isTrainMode):
return imageData, torch.utils.data.DataLoader(imageData, batch_size=64, shuffle=True)
else:
return imageData, torch.utils.data.DataLoader(imageData, batch_size=32)
def load_category(cat_to_name):
if(cat_to_name != ''):
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
def process_image(img):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
pil_image = Image.open(img)
pil_image.load()
#Resizing
if pil_image.size[0] > pil_image.size[1]:
pil_image.thumbnail((10000, 256))
else:
pil_image.thumbnail((256, 10000))
#Cropping
size = pil_image.size
pil_image = pil_image.crop((size[0]//2 -(224/2),
size[1]//2 - (224/2),
size[0]//2 +(224/2),
size[1]//2 + (224/2)))
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image =(np_image - mean)/std
np_image = np_image.transpose((2, 0, 1))
return np_image
|
[
"heavens.milien@gmail.com"
] |
heavens.milien@gmail.com
|
0073c2800cb9c1491b9cba4519ee1a388aa6e2d6
|
b84e7da785a09d010edaca222fe19b0d52184c63
|
/work_dir/experimento/faster_rcnn_r101_fpn_bn_50e_coco_pre/faster_rcnn_r101_fpn_bn_50e_coco_pre.py
|
9888b50bfd745e4ea85664a9b8f36600b573e2b1
|
[] |
no_license
|
camilaandrad/object-detect
|
78f5d38d632c1107be3a788898a10ff888232e14
|
7c9435a3f492320af5dfbf68874eda6f39198ef1
|
refs/heads/master
| 2022-11-06T22:31:26.368140
| 2020-07-20T15:03:35
| 2020-07-20T15:03:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,137
|
py
|
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))))
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=2,
train=dict(
type='CocoDataset',
ann_file=
'/home/Documentos/doutorado/experimentos/train.json',
img_prefix=
'/home/Documentos/doutorado/experimentos/Images',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
],
classes=('capacete', 'colete', 'trabalhador')),
val=dict(
type='CocoDataset',
ann_file=
'/home/Documentos/doutorado/experimentos/val.json',
img_prefix=
'/home/Documentos/doutorado/experimentos/Images',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
],
classes=('capacete', 'colete', 'trabalhador')),
test=dict(
type='CocoDataset',
ann_file=
'/home/Documentos/doutorado/experimentos/test.json',
img_prefix=
'/home/Documentos/doutorado/experimentos/Images',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
],
classes=('capacete', 'colete', 'trabalhador')))
evaluation = dict(interval=1, metric='bbox')
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 19])
total_epochs = 50
checkpoint_config = dict(interval=10)
log_config = dict(interval=1, hooks=[dict(type='TextLoggerHook')])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
classes = ('capacete', 'colete', 'trabalhador')
prefix_path = '/home/Documentos/doutorado/experimentos/'
work_dir = '/home/Documentos/doutorado/experimentos/mmdetection/work_dir/faster_rcnn_r101_fpn_bn_50e_coco_pre'
gpu_ids = range(0, 1)
|
[
"noreply@github.com"
] |
camilaandrad.noreply@github.com
|
2c79b23142060d76510c32b2e38cb7be973b788b
|
75d89d8f3a6b6b06da95105243eebb7101922d07
|
/test.py
|
24fcb2666798184e4509ed7d9218ef46819e69c6
|
[] |
no_license
|
kelvinL3/kMeans-Fitting-Lines-to-Points
|
28d63d0d852c2d2b07cb2011cf3e96c340c4d214
|
049bd63541707a929163346e5758104894915003
|
refs/heads/master
| 2020-03-09T08:54:54.062583
| 2018-04-12T16:29:51
| 2018-04-12T16:29:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import math
import scipy.io as sio
# for graphing
def graph(function):
x = np.array(range(0,80))
y = eval(function)
plt.plot(x, y)
Il = sio.loadmat('edges.mat')
I = Il['bw05']
# print(I)
# plt.matshow(I)
plt.imshow(I, cmap='Greys')
graph('-3.75/0.45 + (0.88/0.45)*x')
graph('6/0.91 + (0.41/0.91)*x')
graph('87/0.61 - (0.73/0.61)*x')
plt.show()
|
[
"kelvinliu1234@gmail.com"
] |
kelvinliu1234@gmail.com
|
f9bc8486e87a590776570961aba3fe8dbfcc4259
|
c855cbe713f247b59562c5045273766ee288dd6a
|
/user_profile/migrations/0002_userprofile_avatar.py
|
92da5c9c12d3237a28c3b36487fd30c3afb14e0d
|
[] |
no_license
|
underpr00f/drfreact
|
27c3709c155579708d5e379a6973c1292f7479da
|
47fd7a6c1f8340583ac3bcbc154a81a37b24d6fd
|
refs/heads/master
| 2020-03-24T01:42:09.927601
| 2019-05-12T12:35:49
| 2019-05-12T12:35:49
| 142,347,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
# Generated by Django 2.1.7 on 2019-03-15 20:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='avatar',
field=models.ImageField(blank=True, null=True, upload_to='static/images'),
),
]
|
[
"underproof2014@gmail.com"
] |
underproof2014@gmail.com
|
8dcae2e44dc7f1cba2e8821e9cdb60f5b0619eee
|
2d39088748666f7a07337a6d7fd69a1b1718caf2
|
/testfile/test_CornerNetCls.py
|
7a1389a46abc6e28764adbd43438c7bf049aaaf2
|
[
"BSD-3-Clause"
] |
permissive
|
philiptzou/DeepRule
|
d74f3c4731cb06d890096ef1d919b06ebbff1220
|
ab309ceb7232e9e9c46aaff99116d8265c5bd45e
|
refs/heads/master
| 2023-09-02T17:19:02.603433
| 2021-11-12T16:24:28
| 2021-11-12T16:24:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,965
|
py
|
import os
import cv2
import json
import numpy as np
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
from config import system_configs
from utils import crop_image, normalize_
import external.nms as nms
def _rescale_points(dets, ratios, borders, sizes):
xs, ys = dets[:, :, 2], dets[:, :, 3]
xs /= ratios[0, 1]
ys /= ratios[0, 0]
xs -= borders[0, 2]
ys -= borders[0, 0]
np.clip(xs, 0, sizes[0, 1], out=xs)
np.clip(ys, 0, sizes[0, 0], out=ys)
def save_image(data, fn):
sizes = np.shape(data)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data)
plt.savefig(fn, dpi = height)
plt.close()
def kp_decode(nnet, images, K, ae_threshold=0.5, kernel=3):
with torch.no_grad():
detections, time_backbone, time_psn = nnet.test([images], ae_threshold=ae_threshold, K=K, kernel=kernel)
#print(detections)
detections_tl = detections[0]
detections_br = detections[1]
cls = detections[2].cpu().numpy()
offset = detections[3].cpu().numpy()
detections_tl = detections_tl.data.cpu().numpy().transpose((2, 1, 0))
detections_br = detections_br.data.cpu().numpy().transpose((2, 1, 0))
return detections_tl, detections_br, cls, offset, True
def kp_detection(image, db, nnet, debug=False, decode_func=kp_decode, cuda_id=0):
K = db.configs["top_k"]
ae_threshold = db.configs["ae_threshold"]
nms_kernel = db.configs["nms_kernel"]
categories = db.configs["categories"]
nms_threshold = db.configs["nms_threshold"]
max_per_image = db.configs["max_per_image"]
if True:
height, width = image.shape[0:2]
detections_point_tl = []
detections_point_br = []
scale = 1.0
new_height = int(height * scale)
new_width = int(width * scale)
new_center = np.array([new_height // 2, new_width // 2])
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
ratios = np.zeros((1, 2), dtype=np.float32)
borders = np.zeros((1, 4), dtype=np.float32)
sizes = np.zeros((1, 2), dtype=np.float32)
out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
height_ratio = out_height / inp_height
width_ratio = out_width / inp_width
resized_image = cv2.resize(image, (new_width, new_height))
resized_image, border, offset = crop_image(resized_image, new_center, [inp_height, inp_width])
resized_image = resized_image / 255.
#normalize_(resized_image, db.mean, db.std)
images[0] = resized_image.transpose((2, 0, 1))
borders[0] = border
sizes[0] = [int(height * scale), int(width * scale)]
ratios[0] = [height_ratio, width_ratio]
if torch.cuda.is_available():
images = torch.from_numpy(images).cuda(cuda_id)
else:
images = torch.from_numpy(images)
dets_tl, dets_br, cls, offset, flag = decode_func(nnet, images, K, ae_threshold=ae_threshold, kernel=nms_kernel)
offset = (offset + 1) * 100
image_info = {'data_type': int(cls), 'offset': float(offset)}
_rescale_points(dets_tl, ratios, borders, sizes)
_rescale_points(dets_br, ratios, borders, sizes)
detections_point_tl.append(dets_tl)
detections_point_br.append(dets_br)
detections_point_tl = np.concatenate(detections_point_tl, axis=1)
detections_point_br = np.concatenate(detections_point_br, axis=1)
#print('1')
#print(detections_point.shape)
classes_p_tl = detections_point_tl[:, 0, 1]
classes_p_br = detections_point_br[:, 0, 1]
#print('2')
#print(classes_p.shape)
# reject detections with negative scores
keep_inds_p = (detections_point_tl[:, 0, 0] > 0)
detections_point_tl = detections_point_tl[keep_inds_p, 0]
classes_p_tl = classes_p_tl[keep_inds_p]
keep_inds_p = (detections_point_br[:, 0, 0] > 0)
detections_point_br = detections_point_br[keep_inds_p, 0]
classes_p_br = classes_p_br[keep_inds_p]
#print('3')
#print(detections_point.shape)
top_points_tl = {}
top_points_br = {}
for j in range(categories):
keep_inds_p = (classes_p_tl == j)
top_points_tl[j + 1] = detections_point_tl[keep_inds_p].astype(np.float32)
keep_inds_p = (classes_p_br == j)
top_points_br[j + 1] = detections_point_br[keep_inds_p].astype(np.float32)
#print(top_points[image_id][j + 1][0])
scores = np.hstack([
top_points_tl[j][:, 0]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_points_tl[j][:, 0] >= thresh)
top_points_tl[j] = top_points_tl[j][keep_inds]
scores = np.hstack([
top_points_br[j][:, 0]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_points_br[j][:, 0] >= thresh)
top_points_br[j] = top_points_br[j][keep_inds]
return image_info, top_points_tl, top_points_br
def testing(image, db, nnet, debug=False, cuda_id=0):
return globals()[system_configs.sampling_function](image, db, nnet, debug=debug, cuda_id=cuda_id)
|
[
"t-juluo@microsoft.com"
] |
t-juluo@microsoft.com
|
6ff8f63a04c13564fb138d5caa95de69b78959ac
|
7d54cc2fe2720000ee83e6825cfaf7c3eabd38c2
|
/epaxos/tests/test_preaccept.py
|
e8fe64e48b37481577fbb122d9b3199047fa87c4
|
[] |
no_license
|
bdeggleston/cassandra_epaxos_prototype
|
5401f17d80e21fd550614ba235176ffd1c4e2599
|
85c3775ca9318f7e1dce96fb96fb29ab75810f2a
|
refs/heads/master
| 2016-09-06T13:43:37.389641
| 2014-09-24T00:57:49
| 2014-09-24T00:57:49
| 24,395,111
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
from base_epaxos_test import EpaxosTestCase
from epaxos.replica import *
class PreacceptTest(EpaxosTestCase):
def test_replica_leader_instance(self):
replicas = self.create_replicas(3)
leader = replicas[0]
remote = replicas[1:]
instance = leader.get_instance(replicas)
leader.preaccept(instance)
self.assertIn(instance.iid, leader.instances)
self.assertEqual(instance.dependencies, set())
for replica in remote:
self.assertIn(instance.iid, replica.instances)
self.assertEqual(replica.instances[instance.iid].dependencies, set())
def test_non_replica_leader_instance(self):
replicas = self.create_replicas(3)
leader = self.create_replicas(1)[0]
instance = leader.get_instance(replicas)
leader.preaccept(instance)
self.assertNotIn(instance.iid, leader.instances)
# shouldn't be anything until after accept
self.assertIsNone(instance.dependencies)
for replica in replicas:
self.assertIn(instance.iid, replica.instances)
self.assertEqual(replica.instances[instance.iid].dependencies, set())
def test_handle_preaccept_agreeing_deps(self):
replicas = self.create_replicas(2)
leader, remote = replicas
committed = remote.get_instance(replicas)
committed.commit()
remote.instances[committed.iid] = committed
expected_deps = remote.current_deps()
self.assertEqual(expected_deps, {committed.iid})
instance = remote.get_instance(replicas)
instance.state = Instance.State.PREACCEPTED
instance.dependencies = {committed.iid}
response = remote.handle_preaccept(PreacceptRequest(instance))
self.assertIsInstance(response, PreacceptResponse)
remote_instance = remote.instances[instance.iid]
self.assertIsNot(remote_instance, instance)
self.assertEqual(remote_instance.dependencies, expected_deps)
def test_handle_preaccept_disagreeing_deps(self):
replicas = self.create_replicas(2)
leader, remote = replicas
committed = remote.get_instance(replicas)
committed.commit()
remote.instances[committed.iid] = committed
expected_deps = remote.current_deps()
self.assertEqual(expected_deps, {committed.iid})
instance = remote.get_instance(replicas)
instance.state = Instance.State.PREACCEPTED
instance.dependencies = set()
response = remote.handle_preaccept(PreacceptRequest(instance))
self.assertIsInstance(response, PreacceptResponse)
remote_instance = remote.instances[instance.iid]
self.assertIsNot(remote_instance, instance)
self.assertEqual(remote_instance.dependencies, expected_deps)
def test_quorum_failure_marks_instance_as_fast_path_impossible(self):
pass
def test_disagreeing_responses_marks_fast_path_impossible(self):
pass
def test_less_than_fast_path_quorum_responses_marks_fast_path_impossible(self):
pass
class PreacceptIntegrationTest(EpaxosTestCase):
pass
|
[
"bdeggleston@gmail.com"
] |
bdeggleston@gmail.com
|
d46b5bb9580871761d21e3059135c26129baf9b4
|
4a18e1a0baa8a9057dbaa2a7d2eacb994137088f
|
/app/grids/StaticGrid.py
|
bae7692bedb1c64a00209d3e4d7be0df6bbc922c
|
[] |
no_license
|
CoDen256/TicTacToe
|
3fd6f5d5e11de1fc84175e363d54dba4775804a0
|
979af9ba6d93ac12014a7751505ce37981e2c31f
|
refs/heads/master
| 2022-10-19T22:36:15.045252
| 2019-06-02T22:32:45
| 2019-06-02T22:32:45
| 183,956,187
| 0
| 1
| null | 2022-10-09T11:40:28
| 2019-04-28T20:35:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
from grids.Grid import Grid
import pygame.gfxdraw as pydraw
import pygame
class StaticGrid(Grid):
def __init__(self, parent, position, cell_size, columns=3, rows=3):
super().__init__(parent, columns, rows, cell_size, position, scale=1)
self.scores = [0, 0]
self.touched = False
self.last_pos = None
def update_input(self, event):
# Only handling the touching: grid is not resizable
# Handles each event in loop
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
self.last_pos = pygame.mouse.get_pos()
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
self.touched = True
def update(self):
# Nothing to update
pass
def _check_winner(self, grid):
# rows
for x in range(0, 3):
row = set([grid[x][0], grid[x][1], grid[x][2]])
if len(row) == 1 and grid[x][0] != 0:
return grid[x][0]
# columns
for x in range(0, 3):
column = set([grid[0][x], grid[1][x], grid[2][x]])
if len(column) == 1 and grid[0][x] != 0:
return grid[0][x]
# diagonals
diag1 = set([grid[0][0], grid[1][1], grid[2][2]])
diag2 = set([grid[0][2], grid[1][1], grid[2][0]])
if (len(diag1) == 1 or len(diag2) == 1) and grid[1][1] != 0:
return grid[1][1]
return 0
def render(self):
# Parent surface blits grid surface at the position
self.parent.blit(self.surface, (self.x, self.y))
self.render_grid()
self.render_cells()
self.touched = False
@property
def is_just_pressed(self):
# If no touched returns False
# Otherwise returns the position of touching
if not self.touched:
return False
return self.last_pos
|
[
"den.blackshov@gmail.com"
] |
den.blackshov@gmail.com
|
72d053e7e01fe59bef3403b40576e6e3d075992c
|
7826b4421030a0be0284cb664f88539162b33b0c
|
/accounts/tests.py
|
0a0f2873e9a100b8f98429c6c9ce495341a5928b
|
[] |
no_license
|
donkey-engine/donkey-engine
|
13dbc2ba1abc712ae2a4c6c55846a5b239c052a3
|
82a2537c2ae841edec591a36c3f76da97f99701b
|
refs/heads/master
| 2023-04-04T12:11:15.843476
| 2022-02-26T17:58:37
| 2022-02-26T17:58:37
| 336,823,392
| 7
| 0
| null | 2023-03-31T15:04:57
| 2021-02-07T15:43:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,887
|
py
|
from unittest.mock import patch
from django.contrib.auth.models import User
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.test import Client, TestCase
from accounts.models import Profile
class AuthTestCase(TestCase):
def test_auth(self):
user = User.objects.create_user(
username='username',
email='e@mail.ru',
password='password',
)
c = Client()
response = c.post('/api/auth/', {}, content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
'username': ['This field is required.'],
'password': ['This field is required.'],
},
)
response = c.post(
'/api/auth/',
{
'username': 'username',
'password': 'wrong_password'
},
content_type='application/json',
)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.json(), {'error': ['Bad credentials']})
response = c.post(
'/api/auth/',
{
'username': 'username',
'password': 'password'
},
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json(),
{
'id': user.id,
'username': user.username,
}
)
def test_signup(self):
c = Client()
response = c.post('/api/signup/', {}, content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
'email': ['This field is required.'],
'username': ['This field is required.'],
'password': ['This field is required.'],
}
)
response = c.post(
'/api/signup/',
{
'username': 'username',
'password': 'password',
'email': 'e@mail.ru',
},
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'status': 'ok'})
response = c.post(
'/api/signup/',
{
'username': 'new_username',
'password': 'password',
'email': 'e@mail.ru',
},
content_type='application/json',
)
self.assertEqual(response.status_code, 422)
self.assertEqual(response.json(), {'email': ['Already exists']})
response = c.post(
'/api/signup/',
{
'username': 'username',
'password': 'password',
'email': 'new_e@mail.ru',
},
content_type='application/json',
)
self.assertEqual(response.status_code, 422)
self.assertEqual(response.json(), {'username': ['Already exists']})
def test_created_user_is_not_active(self):
client = Client()
client.post(
'/api/signup/',
{
'username': 'test_username',
'email': 'test@email.ua',
'password': 'test_password'
},
content_type='application/json'
)
created_user = User.objects.get(username='test_username')
self.assertFalse(created_user.is_active)
def test_create_user_with_similar_to_email_username(self):
client = Client()
response = client.post(
'/api/signup/',
{
'username': 'username@mail.ru',
'email': 'test@email.ua',
'password': 'test_password',
},
content_type='application/json'
)
self.assertEqual(response.status_code, 422)
self.assertEqual(
response.json(),
{'username': ['May contain only english letters, numbers, and "." or "_"']}
)
def test_create_user_with_non_ascii_username(self):
client = Client()
response = client.post(
'/api/signup/',
{
'username': 'username✡',
'email': 'test@email.ua',
'password': 'test_password',
},
content_type='application/json'
)
self.assertEqual(response.status_code, 422)
self.assertEqual(
response.json(),
{'username': ['May contain only english letters, numbers, and "." or "_"']}
)
def test_create_user_with_white_space_username(self):
client = Client()
response = client.post(
'/api/signup/',
{
'username': ' username',
'email': 'test@email.ua',
'password': 'test_password',
},
content_type='application/json'
)
self.assertEqual(response.status_code, 422)
self.assertEqual(
response.json(),
{'username': ['May contain only english letters, numbers, and "." or "_"']}
)
class DiscordAuthTestCase(TestCase):
# TODO: test case with existsed username
@patch(
'accounts.views.DiscordAuthView.exchange_code',
return_value={'access_token': 'test_access_token'}
)
@patch(
'accounts.views.DiscordAuthView.get_current_user',
return_value={
'id': 'discord_id',
'username': 'discord_user',
'email': 'email@test.com'
}
)
def test_signup_will_create_profile(self, exchange_code_mock, get_current_user_mock):
client = Client()
client.get('/api/auth/discord/redirect/?code=test_code')
profile = Profile.objects.last()
self.assertIsNotNone(profile)
self.assertEqual(profile.discord_id, 'discord_id') # type: ignore
class EmailConfirmationTestCase(TestCase):
def test_wrong_confirmation_token(self):
user = User(username='test_user', is_active=False)
user.set_password('test_password')
user.save()
client = Client()
response = client.get('/api/confirm_email/', {'token': 'token', 'username': user.username})
self.assertTrue(response.status_code, 403)
def test_user_confirmation(self):
user = User(username='test_user', is_active=False)
user.set_password('test_password')
user.save()
token = PasswordResetTokenGenerator().make_token(user=user)
client = Client()
client.get('/api/confirm_email/', {'token': token, 'username': user.username})
user.refresh_from_db()
self.assertTrue(user.is_active)
|
[
"noreply@github.com"
] |
donkey-engine.noreply@github.com
|
c4c5d04ede303aa6c745065061dfa0d24afdd47a
|
aa62a7825b0fd18c11de409d082bcd392939a335
|
/assignment1/misc.py
|
79bda343f8ae576f1a8b5995ea375b7f8c271448
|
[] |
no_license
|
davidrosenberg/nn-practice
|
fb438fe06f22c91308544ff9f60de22fc878a78a
|
cbe1ba3c3523e4bafc6c347145d7a59cae69e316
|
refs/heads/master
| 2016-08-11T12:49:39.181178
| 2015-12-24T20:23:25
| 2015-12-24T20:23:25
| 44,833,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
def iter_over_pairs(pairs):
"""
Return an iterator over pairs present in the 'pairs' input.
:type pairs: dictionary or iterable
:param pairs: The pairs to iterate upon. These may be stored either as
(key, value) items in a dictionary, or directly as pairs in any kind of
iterable structure
:rtype: iterable
:returns: an iterable yielding pairs
"""
if isinstance(pairs, dict):
return pairs.iteritems()
else:
return pairs
import matplotlib.pyplot as plt
import matplotlib
def bgd_visualization(X, y, theta_hist, loss_function, X_validation=None, y_validation=None):
"""
visulaize the loss in batch gradient descent
X-axis: iteration
y-axis: the loss function value
"""
#TODO
num_iter = theta_hist.shape[0]
loss_hist = np.log([loss_function(X, y, theta_hist[i]) for i in range(num_iter)])
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Convergence plot")
plt.plot(range(num_iter), loss_hist)
plt.legend(["Training set"])
print "Training: %r" %loss_function(X, y, theta_hist[num_iter-1])
if (X_validation != None) and (y_validation != None):
loss_hist_val = np.log([loss_function(X_validation, y_validation, theta_hist[i]) for i in range(num_iter)])
print "Validation: %r" %loss_function(X_validation, y_validation, theta_hist[num_iter-1])
plt.plot(range(num_iter), loss_hist_val)
plt.legend(["Training set", "Validation set"])
plt.show()
#plt.savefig()
|
[
"david.davidr@gmail.com"
] |
david.davidr@gmail.com
|
2684c3785482beb266ff8dce7a4d9a7caefe2f3c
|
742a423cf08422e343023d2aed657fd969261411
|
/src/far/tests/__init__.py
|
d8668d4649905816f97704fa1bf552e6fb2c4c24
|
[
"MIT"
] |
permissive
|
ksheedlo/far
|
ad7a6f54db2576d09bee9048704fa2c7f3ff9194
|
9e7819fd7163140a166936f09abf409a1b221006
|
refs/heads/master
| 2016-09-05T16:56:17.385947
| 2015-06-26T23:23:49
| 2015-06-26T23:23:49
| 32,540,006
| 1
| 0
| null | 2015-06-26T00:23:55
| 2015-03-19T18:46:08
|
Python
|
UTF-8
|
Python
| false
| false
| 188
|
py
|
'''
far: tests module.
Meant for use with py.test.
Organize tests into files, each named xxx_test.py
Read more here: http://pytest.org/
Copyright 2015, Ken Sheedlo
Licensed under MIT
'''
|
[
"ken.sheedlo@rackspace.com"
] |
ken.sheedlo@rackspace.com
|
1101a26ce213f56cfaa0e15b3aa9c4f46694f888
|
a8e6569e0f91acc9278b273b6b83fc351a9b54e7
|
/src/pokewiki/migrations/0004_auto_20201020_2010.py
|
32c9bf83d8314191f1999589de73f947da8c8a95
|
[] |
no_license
|
Kyvin234/CS411poke
|
4106bfb0d014f7223e4c5db0e1fb74eb2f5d89e9
|
0dc8a0807aed615b6ba0da035fa2f9fbdfc018cc
|
refs/heads/master
| 2023-01-28T21:20:03.283191
| 2020-12-06T01:13:33
| 2020-12-06T01:13:33
| 302,526,800
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
# Generated by Django 3.1.1 on 2020-10-20 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pokewiki', '0003_auto_20201020_1943'),
]
operations = [
migrations.AlterField(
model_name='f_table',
name='atk',
field=models.IntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='f_table',
name='df',
field=models.IntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='f_table',
name='hp',
field=models.IntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='f_table',
name='spatk',
field=models.IntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='f_table',
name='spd',
field=models.IntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='f_table',
name='spdf',
field=models.IntegerField(default=0, null=True),
),
]
|
[
"yuey8@illinois.edu"
] |
yuey8@illinois.edu
|
5b34dbc66c35275a7e2e6aedcd1e816a1be639ec
|
073bba6af041505ccf6b23e88764bf42fbfea361
|
/model_SEENET_SAD.py
|
3e972299f1ead889afed69339fd638dc2ec18605
|
[
"MIT"
] |
permissive
|
carlinl/SEENET-SAD
|
1aaf4cdaa87b970a22e6e1d0a2da3c585b0c239e
|
cf2989d0e571391b9b0cdf81bd4a480490a92bbb
|
refs/heads/main
| 2023-02-19T00:29:00.304128
| 2021-01-26T08:56:38
| 2021-01-26T08:56:38
| 333,016,106
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,279
|
py
|
# Enet pytorch code retrieved from https://github.com/davidtvs/PyTorch-ENet/blob/master/models/enet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from utils.utils import mIoULoss, to_one_hot
from torchsummary import summary
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class InitialBlock(nn.Module):
"""The initial block is composed of two branches:
1. a main branch which performs a regular convolution with stride 2;
2. an extension branch which performs max-pooling.
Doing both operations in parallel and concatenating their results
allows for efficient downsampling and expansion. The main branch
outputs 13 feature maps while the extension branch outputs 3, for a
total of 16 feature maps after concatenation.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number output channels.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer. Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
bias=False,
relu=True):
super().__init__()
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - As stated above the number of output channels for this
# branch is the total minus 3, since the remaining channels come from
# the extension branch
self.main_branch = nn.Conv2d(
in_channels,
out_channels - 3,
kernel_size=3,
stride=2,
padding=1,
bias=bias)
# Extension branch
self.ext_branch = nn.MaxPool2d(3, stride=2, padding=1)
# Initialize batch normalization to be used after concatenation
self.batch_norm = nn.BatchNorm2d(out_channels)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x):
main = self.main_branch(x)
ext = self.ext_branch(x)
# Concatenate branches
out = torch.cat((main, ext), 1)
# Apply batch normalization
out = self.batch_norm(out)
return self.out_activation(out)
class RegularBottleneck(nn.Module):
"""Regular bottlenecks are the main building block of ENet.
Main branch:
1. Shortcut connection.
Extension branch:
1. 1x1 convolution which decreases the number of channels by
``internal_ratio``, also called a projection;
2. regular, dilated or asymmetric convolution;
3. 1x1 convolution which increases the number of channels back to
``channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- channels (int): the number of input and output channels.
- internal_ratio (int, optional): a scale factor applied to
``channels`` used to compute the number of
channels after the projection. eg. given ``channels`` equal to 128 and
internal_ratio equal to 2 the number of channels after the projection
is 64. Default: 4.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer described above in item 2 of the extension
branch. Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- dilation (int, optional): spacing between kernel elements for the
convolution described in item 2 of the extension branch. Default: 1.
asymmetric (bool, optional): flags if the convolution described in
item 2 of the extension branch is asymmetric or not. Default: False.
- dropout_prob (float, optional): probability of an element to be
zeroed. Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
channels,
internal_ratio=4,
kernel_size=3,
padding=0,
dilation=1,
asymmetric=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}."
.format(channels, internal_ratio))
internal_channels = channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - shortcut connection
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution, and,
# finally, a regularizer (spatial dropout). Number of channels is constant.
# 1x1 projection convolution
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
channels,
internal_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# If the convolution is asymmetric we split the main convolution in
# two. Eg. for a 5x5 asymmetric convolution we have two convolution:
# the first is 5x1 and the second is 1x5.
if asymmetric:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(kernel_size, 1),
stride=1,
padding=(padding, 0),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation(),
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, padding),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
else:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(channels), activation())
self.selayer = SELayer(channels)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after adding the branches
self.out_activation = activation()
def forward(self, x):
# Main branch shortcut
main = x
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
ext1 = self.selayer(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext*ext1
return self.out_activation(out)
class DownsamplingBottleneck(nn.Module):
"""Downsampling bottlenecks further downsample the feature map size.
Main branch:
1. max pooling with stride 2; indices are saved to be used for
unpooling later.
Extension branch:
1. 2x2 convolution with stride 2 that decreases the number of channels
by ``internal_ratio``, also called a projection;
2. regular convolution (by default, 3x3);
3. 1x1 convolution which increases the number of channels to
``out_channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number of output channels.
- internal_ratio (int, optional): a scale factor applied to ``channels``
used to compute the number of channels after the projection. eg. given
``channels`` equal to 128 and internal_ratio equal to 2 the number of
channels after the projection is 64. Default: 4.
- return_indices (bool, optional): if ``True``, will return the max
indices along with the outputs. Useful when unpooling later.
- dropout_prob (float, optional): probability of an element to be
zeroed. Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
return_indices=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Store parameters that are needed later
self.return_indices = return_indices
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - max pooling followed by feature map (channels) padding
self.main_max1 = nn.MaxPool2d(
2,
stride=2,
return_indices=return_indices)
# Extension branch - 2x2 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 2x2 projection convolution with stride 2
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# Convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=3,
stride=1,
padding=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
out_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(out_channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x):
# Main branch shortcut
if self.return_indices:
main, max_indices = self.main_max1(x)
else:
main = self.main_max1(x)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Main branch channel padding
n, ch_ext, h, w = ext.size()
ch_main = main.size()[1]
padding = torch.zeros(n, ch_ext - ch_main, h, w)
# Before concatenating, check if main is on the CPU or GPU and
# convert padding accordingly
if main.is_cuda:
padding = padding.cuda()
# Concatenate
main = torch.cat((main, padding), 1)
# Add main and extension branches
out = main + ext
return self.out_activation(out), max_indices
class UpsamplingBottleneck(nn.Module):
"""The upsampling bottlenecks upsample the feature map resolution using max
pooling indices stored from the corresponding downsampling bottleneck.
Main branch:
1. 1x1 convolution with stride 1 that decreases the number of channels by
``internal_ratio``, also called a projection;
2. max unpool layer using the max pool indices from the corresponding
downsampling max pool layer.
Extension branch:
1. 1x1 convolution with stride 1 that decreases the number of channels by
``internal_ratio``, also called a projection;
2. transposed convolution (by default, 3x3);
3. 1x1 convolution which increases the number of channels to
``out_channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number of output channels.
- internal_ratio (int, optional): a scale factor applied to ``in_channels``
used to compute the number of channels after the projection. eg. given
``in_channels`` equal to 128 and ``internal_ratio`` equal to 2 the number
of channels after the projection is 64. Default: 4.
- dropout_prob (float, optional): probability of an element to be zeroed.
Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if ``True``.
Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - max pooling followed by feature map (channels) padding
self.main_conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels))
# Remember that the stride is the same as the kernel_size, just like
# the max pooling layers
self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 1x1 projection convolution with stride 1
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels, internal_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(internal_channels), activation())
# Transposed convolution
self.ext_tconv1 = nn.ConvTranspose2d(
internal_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias)
self.ext_tconv1_bnorm = nn.BatchNorm2d(internal_channels)
self.ext_tconv1_activation = activation()
# 1x1 expansion convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x, max_indices, output_size):
# Main branch shortcut
main = self.main_conv1(x)
main = self.main_unpool1(
main, max_indices, output_size=output_size)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_tconv1(ext, output_size=output_size)
ext = self.ext_tconv1_bnorm(ext)
ext = self.ext_tconv1_activation(ext)
ext = self.ext_conv2(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_activation(out)
class ENet(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
"""
def __init__(self, num_classes, encoder_relu=False, decoder_relu=True):
super().__init__()
self.initial_block = InitialBlock(3, 16, relu=encoder_relu)
# Stage 1 - Encoder
self.downsample1_0 = DownsamplingBottleneck(
16,
64,
return_indices=True,
dropout_prob=0.01,
relu=encoder_relu)
self.regular1_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_3 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_4 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
# Stage 2 - Encoder
self.downsample2_0 = DownsamplingBottleneck(
64,
128,
return_indices=True,
dropout_prob=0.1,
relu=encoder_relu)
self.regular2_1 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_2 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_3 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_4 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular2_5 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_6 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_7 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_8 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 3 - Encoder
self.regular3_0 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_1 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_2 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_3 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular3_4 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_5 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_6 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_7 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 4 - Decoder
self.upsample4_0 = UpsamplingBottleneck(
128, 64, dropout_prob=0.1, relu=decoder_relu)
self.regular4_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular4_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
# Stage 5 - Decoder
self.upsample5_0 = UpsamplingBottleneck(
64, 16, dropout_prob=0.1, relu=decoder_relu)
self.regular5_1 = RegularBottleneck(
16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.transposed_conv = nn.ConvTranspose2d(
16,
num_classes,
kernel_size=3,
stride=2,
padding=1,
bias=False)
def forward(self, x):
# Initial block
input_size = x.size()
x = self.initial_block(x)
# Stage 1 - Encoder
stage1_input_size = x.size()
x, max_indices1_0 = self.downsample1_0(x)
x = self.regular1_1(x)
x = self.regular1_2(x)
x = self.regular1_3(x)
x = self.regular1_4(x)
# Stage 2 - Encoder
stage2_input_size = x.size()
x, max_indices2_0 = self.downsample2_0(x)
x = self.regular2_1(x)
x = self.dilated2_2(x)
x = self.asymmetric2_3(x)
x = self.dilated2_4(x)
x = self.regular2_5(x)
x = self.dilated2_6(x)
x = self.asymmetric2_7(x)
x = self.dilated2_8(x)
# Stage 3 - Encoder
x = self.regular3_0(x)
x = self.dilated3_1(x)
x = self.asymmetric3_2(x)
x = self.dilated3_3(x)
x = self.regular3_4(x)
x = self.dilated3_5(x)
x = self.asymmetric3_6(x)
x = self.dilated3_7(x)
# Stage 4 - Decoder
x = self.upsample4_0(x, max_indices2_0, output_size=stage2_input_size)
x = self.regular4_1(x)
x = self.regular4_2(x)
# Stage 5 - Decoder
x = self.upsample5_0(x, max_indices1_0, output_size=stage1_input_size)
x = self.regular5_1(x)
x = self.transposed_conv(x, output_size=input_size)
return x
class SpatialSoftmax(nn.Module):
def __init__(self, temperature=1, device='cpu'):
super(SpatialSoftmax, self).__init__()
if temperature:
self.temperature = Parameter(torch.ones(1) * temperature).to(device)
else:
self.temperature = 1.
def forward(self, feature):
feature = feature.view(feature.shape[0], -1, feature.shape[1] * feature.shape[2])
softmax_attention = F.softmax(feature / self.temperature, dim=-1)
return softmax_attention
class SEENet_SAD(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
- sad (bool, optional): When ``True``, SAD is added to model
. If False, SAD is removed.
"""
def __init__(self, input_size, pretrained=True, sad=False, encoder_relu=False, decoder_relu=True, weight_share=True):
super().__init__()
# Init parameter
input_w, input_h = input_size
self.fc_input_feature = 5 * int(input_w / 16) * int(input_h / 16)
self.num_classes = 5
self.pretrained = pretrained
self.scale_background = 0.4
# Loss scale factor for ENet w/o SAD
self.scale_seg = 1.0
self.scale_exist = 0.1
# Loss scale factor for ENet w SAD
self.scale_sad_seg = 1.0
self.scale_sad_iou = 0.1
self.scale_sad_exist = 0.1
self.scale_sad_distill = 0.1
# Loss function
self.ce_loss = nn.CrossEntropyLoss(weight=torch.tensor([self.scale_background, 1, 1, 1, 1]))
self.bce_loss = nn.BCELoss()
self.iou_loss = mIoULoss(n_classes=4)
# Stage 0 - Initial block
self.initial_block = InitialBlock(3, 16, relu=encoder_relu)
self.sad = sad
# Stage 1 - Encoder (E1)
self.downsample1_0 = DownsamplingBottleneck(16, 64, return_indices=True, dropout_prob=0.01, relu=encoder_relu)
self.regular1_1 = RegularBottleneck(64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_2 = RegularBottleneck(64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_3 = RegularBottleneck(64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_4 = RegularBottleneck(64, padding=1, dropout_prob=0.01, relu=encoder_relu)
# Shared Encoder (E2~E4)
# Stage 2 - Encoder (E2)
self.downsample2_0 = DownsamplingBottleneck(64, 128, return_indices=True, dropout_prob=0.1, relu=encoder_relu)
self.regular2_1 = RegularBottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_2 = RegularBottleneck(128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_3 = RegularBottleneck(128, kernel_size=5, padding=2, asymmetric=True, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_4 = RegularBottleneck(128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular2_5 = RegularBottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_6 = RegularBottleneck(128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_7 = RegularBottleneck(128, kernel_size=5, asymmetric=True, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_8 = RegularBottleneck(128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 3 - Encoder (E3)
if weight_share:
self.regular3_0 = self.regular2_1
self.dilated3_1 = self.dilated2_2
self.asymmetric3_2 = self.asymmetric2_3
self.dilated3_3 = self.dilated2_4
self.regular3_4 = self.regular2_5
self.dilated3_5 = self.dilated2_6
self.asymmetric3_6 = self.asymmetric2_7
self.dilated3_7 = self.dilated2_8
else:
self.regular3_0 = RegularBottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_1 = RegularBottleneck(128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_2 = RegularBottleneck(128, kernel_size=5, padding=2, asymmetric=True, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_3 = RegularBottleneck(128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular3_4 = RegularBottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_5 = RegularBottleneck(128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_6 = RegularBottleneck(128, kernel_size=5, asymmetric=True, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_7 = RegularBottleneck(128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 4 - Encoder (E4)
if weight_share:
self.regular4_0 = self.regular2_1
self.dilated4_1 = self.dilated2_2
self.asymmetric4_2 = self.asymmetric2_3
self.dilated4_3 = self.dilated2_4
self.regular4_4 = self.regular2_5
self.dilated4_5 = self.dilated2_6
self.asymmetric4_6 = self.asymmetric2_7
self.dilated4_7 = self.dilated2_8
else:
self.regular4_0 = RegularBottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated4_1 = RegularBottleneck(128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric4_2 = RegularBottleneck(128, kernel_size=5, padding=2, asymmetric=True, dropout_prob=0.1, relu=encoder_relu)
self.dilated4_3 = RegularBottleneck(128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular4_4 = RegularBottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated4_5 = RegularBottleneck(128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric4_6 = RegularBottleneck(128, kernel_size=5, asymmetric=True, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.dilated4_7 = RegularBottleneck(128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 5 - Decoder (D1)
# self.upsample4_0 = UpsamplingBottleneck(128, 64, dropout_prob=0.1, relu=decoder_relu)
self.upsample4_0 = UpsamplingBottleneck(256, 64, dropout_prob=0.1, relu=decoder_relu)
self.regular4_1 = RegularBottleneck(64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular4_2 = RegularBottleneck(64, padding=1, dropout_prob=0.1, relu=decoder_relu)
# Stage 6 - Decoder (D2)
self.upsample5_0 = UpsamplingBottleneck(64, 16, dropout_prob=0.1, relu=decoder_relu)
self.regular5_1 = RegularBottleneck(16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.transposed_conv = nn.ConvTranspose2d(16, self.num_classes, kernel_size=3, stride=2, padding=1, bias=False)
# AT_GEN
if self.sad:
self.at_gen_upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.at_gen_l2_loss = nn.MSELoss(reduction='mean')
# Lane exist (P1)
self.layer3 = nn.Sequential(
nn.Conv2d(128, 5, 1),
nn.Softmax(dim=1),
nn.AvgPool2d(2, 2),
)
self.fc = nn.Sequential(
nn.Linear(self.fc_input_feature, 128),
nn.ReLU(),
nn.Linear(128, 4),
nn.Sigmoid()
)
def at_gen(self, x1, x2):
"""
x1 - previous encoder step feature map
x2 - current encoder step feature map
"""
# G^2_sum
sps = SpatialSoftmax(device=x1.device)
if x1.size() != x2.size():
x1 = torch.sum(x1 * x1, dim=1)
x1 = sps(x1)
x2 = torch.sum(x2 * x2, dim=1, keepdim=True)
x2 = torch.squeeze(self.at_gen_upsample(x2), dim=1)
x2 = sps(x2)
else:
x1 = torch.sum(x1 * x1, dim=1)
x1 = sps(x1)
x2 = torch.sum(x2 * x2, dim=1)
x2 = sps(x2)
loss = self.at_gen_l2_loss(x1, x2)
return loss
def forward(self, img, seg_gt=None, exist_gt=None, sad_loss=False):
# Stage 0 - Initial block
input_size = img.size()
x_0 = self.initial_block(img)
# AT-GEN after each E2, E3, E4
# Stage 1 - Encoder (E1)
stage1_input_size = x_0.size()
x, max_indices1_0 = self.downsample1_0(x_0)
x = self.regular1_1(x)
x = self.regular1_2(x)
x = self.regular1_3(x)
x_1 = self.regular1_4(x)
# if self.sad:
# loss_1 = self.at_gen(x_0, x_1)
# Stage 2 - Encoder (E2)
stage2_input_size = x_1.size()
x, max_indices2_0 = self.downsample2_0(x_1)
x = self.regular2_1(x)
x = self.dilated2_2(x)
x = self.asymmetric2_3(x)
x = self.dilated2_4(x)
x = self.regular2_5(x)
x = self.dilated2_6(x)
x = self.asymmetric2_7(x)
x_2 = self.dilated2_8(x)
if self.sad:
loss_2 = self.at_gen(x_1, x_2)
# Stage 3 - Encoder (E3)
x = self.regular3_0(x_2)
x = self.dilated3_1(x)
x = self.asymmetric3_2(x)
x = self.dilated3_3(x)
x = self.regular3_4(x)
x = self.dilated3_5(x)
x = self.asymmetric3_6(x)
x_3 = self.dilated3_7(x)
if self.sad:
loss_3 = self.at_gen(x_2, x_3)
# Stage 4 - Encoder (E4)
x = self.regular3_0(x_3)
x = self.dilated3_1(x)
x = self.asymmetric3_2(x)
x = self.dilated3_3(x)
x = self.regular3_4(x)
x = self.dilated3_5(x)
x = self.asymmetric3_6(x)
x_4 = self.dilated3_7(x)
if self.sad:
loss_4 = self.at_gen(x_3, x_4)
# Concatenate E3, E4
x_34 = torch.cat((x_3, x_4), dim=1)
# Stage 4 - Decoder (D1)
x = self.upsample4_0(x_34, max_indices2_0, output_size=stage2_input_size)
x = self.regular4_1(x)
x = self.regular4_2(x)
# Stage 5 - Decoder (D2)
x = self.upsample5_0(x, max_indices1_0, output_size=stage1_input_size)
x = self.regular5_1(x)
seg_pred = self.transposed_conv(x, output_size=input_size)
# lane exist
y = self.layer3(x_4)
y = y.view(-1, self.fc_input_feature)
exist_pred = self.fc(y)
# loss calculation
if seg_gt is not None and exist_gt is not None:
# L = L_seg + a * L_iou + b * L_exist + c * L_distill
if self.sad:
loss_seg = self.ce_loss(seg_pred, seg_gt)
seg_gt_onehot = to_one_hot(seg_gt, 5)
loss_iou = self.iou_loss(seg_pred[:, 1:self.num_classes, :, :], seg_gt_onehot[:, 1:self.num_classes, :, :])
loss_exist = self.bce_loss(exist_pred, exist_gt)
loss_distill = loss_2 + loss_3 + loss_4
loss = loss_seg * self.scale_sad_seg + loss_iou * self.scale_sad_iou + loss_exist * self.scale_sad_exist
# Add SAD loss after 40K episodes
if sad_loss:
loss += loss_distill * self.scale_sad_distill
else:
loss_seg = self.ce_loss(seg_pred, seg_gt)
loss_exist = self.bce_loss(exist_pred, exist_gt)
loss = loss_seg * self.scale_seg + loss_exist * self.scale_exist
else:
loss_seg = torch.tensor(0, dtype=img.dtype, device=img.device)
loss_exist = torch.tensor(0, dtype=img.dtype, device=img.device)
loss = torch.tensor(0, dtype=img.dtype, device=img.device)
return seg_pred, exist_pred, loss_seg, loss_exist, loss
if __name__ == '__main__':
tensor = torch.ones((8, 3, 288, 800))
seg_gt = torch.zeros((8, 288, 800)).long()
exist_gt = torch.ones((8, 4))
enet_sad = SEENet_SAD((800, 288), sad=False)
enet_sad.train(mode=True)
result = enet_sad(tensor, seg_gt, exist_gt, sad_loss=True)
summary(enet_sad, input_size=(3, 288, 800))
|
[
"noreply@github.com"
] |
carlinl.noreply@github.com
|
fe07f77c9691f0a4bea7feb8ad64497a3866cbb7
|
1362b977fd45dcdc773c836e9895701a20152bba
|
/multilayer/1d/setplot_oscillatory.py
|
a987c6bc8d824cbc5c8b74a5b13651beb9185be3
|
[] |
no_license
|
nthakkar/apps
|
4cceacf85e5bdb505f7593fcb7e5c5f4bc5bc371
|
f195821e4c8d153a93062af3ecb0c787ed51207f
|
refs/heads/master
| 2021-01-18T11:59:18.972898
| 2013-08-13T00:28:33
| 2013-08-13T00:28:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,785
|
py
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
import os
import numpy as np
# Plot customization
import matplotlib
import matplotlib.pyplot as plt
# Markers and line widths
matplotlib.rcParams['lines.linewidth'] = 2.0
matplotlib.rcParams['lines.markersize'] = 6
matplotlib.rcParams['lines.markersize'] = 8
# Font Sizes
matplotlib.rcParams['font.size'] = 16
matplotlib.rcParams['axes.labelsize'] = 15
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['xtick.labelsize'] = 12
matplotlib.rcParams['ytick.labelsize'] = 12
# DPI of output images
matplotlib.rcParams['savefig.dpi'] = 100
# Need to do this after the above
import matplotlib.pyplot as mpl
from clawpack.pyclaw.solution import Solution
from clawpack.visclaw import geoplot, colormaps
from clawpack.clawutil.oldclawdata import Data
from multilayer.aux import bathy_index,kappa_index,wind_index
import multilayer.plot as plot
#--------------------------
def setplot(plotdata,xlower,xupper,rho,dry_tolerance):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
# Load bathymetry
b = Solution(0,path=plotdata.outdir,read_aux=True).state.aux[bathy_index,:]
def hurricane_afterframe(current_data):
# Draw line for eye of hurricane
pass
def bathy(cd):
return b
def kappa(cd):
return Solution(cd.frameno,path=plotdata.outdir,read_aux=True).state.aux[kappa_index,:]
def wind(cd):
return Solution(cd.frameno,path=plotdata.outdir,read_aux=True).state.aux[wind_index,:]
def h_1(cd):
return cd.q[0,:] / rho[0]
def h_2(cd):
return cd.q[2,:] / rho[1]
def eta_2(cd):
return h_2(cd) + bathy(cd)
def eta_1(cd):
return h_1(cd) + eta_2(cd)
def u_1(cd):
index = np.nonzero(h_1(cd) > dry_tolerance)
u_1 = np.zeros(h_1(cd).shape)
u_1[index] = cd.q[1,index] / cd.q[0,index]
return u_1
def u_2(cd):
index = np.nonzero(h_2(cd) > dry_tolerance)
u_2 = np.zeros(h_2(cd).shape)
u_2[index] = cd.q[3,index] / cd.q[2,index]
return u_2
plotdata.clearfigures() # clear any old figures,axes,items data
xlimits = [xlower,xupper]
ylimits_velocities = (-0.15,0.15)
ylimits_depth = [-1.0,0.1]
ylimits_wind = [-5,5]
ylimits_kappa = [0.0,1.2]
# ========================================================================
# Depth, Momenta, and Kappa
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Depth, Momenta, and Kappa',figno=14)
def twin_axes(cd):
fig = mpl.gcf()
fig.clf()
# Get x coordinate values
x = cd.patch.dimensions[0].centers
# Draw fill plot
depth_axes = fig.add_subplot(211)
vel_axes = fig.add_subplot(212,sharex=depth_axes) # the velocity scale
kappa_axes = vel_axes.twinx()
# Bottom layer
depth_axes.fill_between(x,bathy(cd),eta_1(cd),color=plot.bottom_color)
# Top Layer
depth_axes.fill_between(x,eta_1(cd),eta_2(cd),color=plot.top_color)
# Plot bathy
depth_axes.plot(x,bathy(cd),'k',linestyle=plot.bathy_linestyle)
# Plot internal layer
depth_axes.plot(x,eta_2(cd),'k',linestyle=plot.internal_linestyle)
# Plot surface
depth_axes.plot(x,eta_1(cd),'k',linestyle=plot.surface_linestyle)
# Remove ticks from top plot
locs,labels = mpl.xticks()
labels = ['' for i in xrange(len(locs))]
mpl.xticks(locs,labels)
depth_axes.set_title('Oscillatory Wind at t = %3.2f' % cd.t)
depth_axes.set_xlim(xlimits)
depth_axes.set_ylim(ylimits_depth)
depth_axes.set_ylabel('Depth (m)')
# Draw velocity and kappa plot
# Bottom layer velocity
bottom_layer = vel_axes.plot(cd.x,u_2(cd),'k-',label="Bottom Layer Velocity")
# Top Layer velocity
top_layer = vel_axes.plot(cd.x,u_1(cd),'b--',label="Top Layer velocity")
# Kappa
kappa_line = kappa_axes.plot(cd.x,kappa(cd),'r-.')
kappa_axes.plot(cd.x,np.ones(cd.x.shape),'r:')
plot.add_legend(vel_axes,'Kappa',color='r',linestyle='-.',location=4)
vel_axes.set_xlim(xlimits)
vel_axes.set_ylim(ylimits_velocities)
kappa_axes.set_ylim(ylimits_kappa)
vel_axes.set_title('')
# vel_axes.set_title('Layer Velocities and Kappa')
vel_axes.set_ylabel('Velocities (m/s)')
kappa_axes.set_ylabel('Kappa')
# This does not work on all versions of matplotlib
try:
mpl.subplots_adjust(hspace=0.1)
except:
pass
plotaxes = plotfigure.new_plotaxes()
plotaxes.afteraxes = twin_axes
# ========================================================================
# Plot Wind Velocity
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Wind Field',figno=2)
plotfigure.show = True
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Wind Velocity"
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits_wind
def wind_afteraxes(cd):
plt.xlabel("x (m)")
plt.ylabel("Velocity (m/s)")
plotaxes.afteraxes = wind_afteraxes
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = wind
plotitem.color = 'r'
plotitem.show = True
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
|
[
"kyle.mandli@gmail.com"
] |
kyle.mandli@gmail.com
|
0986397d64d691c050782ebd15070a81944e55bc
|
f16015e0beeb1cfdb855d1cf90ce136fe21d2f32
|
/sparta_algorithm/week_2/03_add_node_linked_list.py
|
6c01a49ba0803add7c4d34d02a8e5896aa3a99d5
|
[] |
no_license
|
linuxlight/sparta_projects
|
ca432ac8df5d4174d0c6a8f9609f82e121b24a9f
|
d553a4473f54a70e33f6f7d820765a550afc5e60
|
refs/heads/master
| 2023-03-26T04:34:13.795524
| 2021-03-25T11:04:43
| 2021-03-25T11:04:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self, value):
self.head = Node(value)
def append(self, value):
cur = self.head
while cur.next is not None:
cur = cur.next
cur.next = Node(value)
def print_all(self):
cur = self.head
while cur is not None:
print(cur.data)
cur = cur.next
def get_node(self, index):
node = self.head
count = 0
while count < index:
node = node.next
count += 1
return node
def add_node(self, index, value):
new_node = Node(value) # [6]
if index == 0:
new_node.next = self.head # [6] -> [5] -> -> ...
self.head = new_node # head -> [6]
return
node = self.get_node(index - 1)
next_node = node.next
node.next = new_node
new_node.next = next_node
linked_list = LinkedList(5)
linked_list.append(12)
linked_list.append(8)
# print(linked_list.get_node(1).data) # -> 5를 들고 있는 노드를 반환해야 합니다!
linked_list.add_node(0, 6)
linked_list.print_all()
|
[
"lnxlht4j@gmail.com"
] |
lnxlht4j@gmail.com
|
0bbbee650ea275eee04941656d20cd732df4c30b
|
83da842d99587e99f217828b4f04a492b8ec109c
|
/meinheld/__init__.py
|
21b3af519902aa609e6166bab8540863c191e88a
|
[
"BSD-3-Clause"
] |
permissive
|
vuuvv/meinheld
|
5878e75ff0ac9ed782ef0fa506563347630af86d
|
b3d64688c07cc60e30d522d8c378f8c40a49ba4c
|
refs/heads/master
| 2021-01-18T06:21:22.415409
| 2012-10-22T13:02:57
| 2012-10-22T13:02:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
from meinheld.server import *
from meinheld import mlogging
__version__ = '0.5.2'
|
[
"yutaka.matsubara@gmail.com"
] |
yutaka.matsubara@gmail.com
|
38cef0612ba99f94b008f4aadf2fe5083785e04f
|
eb4f895182e796d49b707d5b9b5ba32a0edc420e
|
/Indexacion/10.py
|
ce96c72db00cccf7ef50bdbb9d0606337ab8572b
|
[] |
no_license
|
ricardo2027/Trabajo02
|
6cbed72fbfdabbce44336909057aee95bfe00128
|
7fd9d21bfda4983de9489e6bc3e691a7130a8f7c
|
refs/heads/master
| 2020-09-01T21:45:34.774994
| 2019-11-05T20:20:52
| 2019-11-05T20:20:52
| 219,066,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20
|
py
|
d={0:10}
print(d[0])
|
[
"ricardo20chamba@gmail.com"
] |
ricardo20chamba@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.