hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d7702a9b2e58db18587254af35a4b4199fcc47d0 | 1,386 | py | Python | plio/sqlalchemy_json/alchemy.py | kaitlyndlee/plio | 99f0852d8eb92efeba72f366077bd023a7da7cdd | [
"Unlicense"
] | 11 | 2018-02-01T02:56:26.000Z | 2022-02-21T12:08:12.000Z | plio/sqlalchemy_json/alchemy.py | kaitlyndlee/plio | 99f0852d8eb92efeba72f366077bd023a7da7cdd | [
"Unlicense"
] | 151 | 2016-06-15T21:31:37.000Z | 2021-11-15T16:55:53.000Z | plio/sqlalchemy_json/alchemy.py | kaitlyndlee/plio | 99f0852d8eb92efeba72f366077bd023a7da7cdd | [
"Unlicense"
] | 21 | 2016-06-17T17:02:39.000Z | 2021-03-08T20:47:50.000Z | # Third-party modules
try:
import simplejson as json
except ImportError:
import json
import sqlalchemy
from sqlalchemy.ext import mutable
# Custom modules
from . import track
class NestedMutable(mutable.MutableDict, track.TrackedDict):
"""SQLAlchemy `mutable` extension dictionary with nested change tracking."""
def __setitem__(self, key, value):
"""Ensure that items set are converted to change-tracking types."""
super(NestedMutable, self).__setitem__(key, self.convert(value, self))
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionary to NestedMutable."""
if isinstance(value, cls):
return value
if isinstance(value, dict):
return cls(value)
return super(cls).coerce(key, value)
class _JsonTypeDecorator(sqlalchemy.TypeDecorator):
"""Enables JSON storage by encoding and decoding on the fly."""
impl = sqlalchemy.String
def process_bind_param(self, value, dialect):
return json.dumps(value)
def process_result_value(self, value, dialect):
return json.loads(value)
class JsonObject(_JsonTypeDecorator):
"""JSON object type for SQLAlchemy with change tracking as base level."""
class NestedJsonObject(_JsonTypeDecorator):
"""JSON object type for SQLAlchemy with nested change tracking."""
mutable.MutableDict.associate_with(JsonObject)
NestedMutable.associate_with(NestedJsonObject)
| 27.176471 | 78 | 0.752525 | 1,097 | 0.791486 | 0 | 0 | 243 | 0.175325 | 0 | 0 | 430 | 0.310245 |
d77119de6b793cbac0f2940da62fd48d954f6aef | 165 | py | Python | squirrel/__main__.py | egxdigital/squirrel | f4c5dbead63788a088d24b28b6cd8ad283585eaa | [
"MIT"
] | null | null | null | squirrel/__main__.py | egxdigital/squirrel | f4c5dbead63788a088d24b28b6cd8ad283585eaa | [
"MIT"
] | null | null | null | squirrel/__main__.py | egxdigital/squirrel | f4c5dbead63788a088d24b28b6cd8ad283585eaa | [
"MIT"
] | null | null | null | """Squirrel Main
This module contains the entry point code for the Squirrel program.
"""
from squirrel.squirrel import main
if __name__ == '__main__':
main() | 16.5 | 67 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.6 |
d7741c04bf8fcb98f38a8a2f9343cab79bc9f668 | 394 | py | Python | clairvoyance/preprocessing/__init__.py | ZhaozhiQIAN/SyncTwin-NeurIPS-2021 | 78eff91d0287c7f1f66c76ca24834c7d1029ad3b | [
"MIT"
] | 5 | 2021-11-23T08:41:08.000Z | 2022-03-06T16:20:37.000Z | clairvoyance/preprocessing/__init__.py | ZhaozhiQIAN/SyncTwin-NeurIPS-2021 | 78eff91d0287c7f1f66c76ca24834c7d1029ad3b | [
"MIT"
] | null | null | null | clairvoyance/preprocessing/__init__.py | ZhaozhiQIAN/SyncTwin-NeurIPS-2021 | 78eff91d0287c7f1f66c76ca24834c7d1029ad3b | [
"MIT"
] | 2 | 2021-11-16T16:10:53.000Z | 2021-12-28T07:13:03.000Z | from .encoding import (
MinMaxNormalizer,
Normalizer,
OneHotEncoder,
ProblemMaker,
ReNormalizer,
StandardNormalizer,
)
from .outlier_filter import FilterNegative, FilterOutOfRange
__all__ = [
"FilterNegative",
"FilterOutOfRange",
"OneHotEncoder",
"MinMaxNormalizer",
"StandardNormalizer",
"ReNormalizer",
"Normalizer",
"ProblemMaker",
]
| 18.761905 | 60 | 0.687817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.322335 |
d7751bcbe755b9830a28eda231e1c57944ba867e | 3,659 | py | Python | csrc/layers/cfc2.py | radu-dogaru/numpyCNN | efe8749d7a35156ff9e67e7cc6df62a8077bf2ea | [
"MIT"
] | null | null | null | csrc/layers/cfc2.py | radu-dogaru/numpyCNN | efe8749d7a35156ff9e67e7cc6df62a8077bf2ea | [
"MIT"
] | null | null | null | csrc/layers/cfc2.py | radu-dogaru/numpyCNN | efe8749d7a35156ff9e67e7cc6df62a8077bf2ea | [
"MIT"
] | null | null | null | import cupy as cp
from csrc.activation import SoftMax
from csrc.layers.layer import Layer
# Cu sinapsa comparativa GPU
from csrc.comp_syn import cp_comp
class C2FullyConnected(Layer):
"""Densely connected layer (comparative).
Attributes
----------
size : int
Number of neurons.
activation : Activation
Neurons' activation's function.
is_softmax : bool
Whether or not the activation is softmax.
cache : dict
Cache.
w : numpy.ndarray
Weights.
b : numpy.ndarray
Biases.
"""
def __init__(self, size, activation):
super().__init__()
self.size = size
self.activation = activation
self.is_softmax = isinstance(self.activation, SoftMax)
self.cache = {}
self.w = None
self.b = None
def init(self, in_dim):
# He initialization
self.w = (cp.random.randn(self.size, in_dim) * cp.sqrt(2 / in_dim)).astype('float32')
# S-a trecut la tip float32 pentru a putea apela operatorul cp_comp
self.b = cp.zeros((1, self.size)).astype('float32')
def forward(self, a_prev, training):
#print('Forma1: ',cp.shape(a_prev))
#print('Forma1: ',cp.shape(self.w.T))
z = cp_comp(a_prev, self.w.T) + self.b # strat comparativ
a = self.activation.f(z)
if training:
# Cache for backward pass
self.cache.update({'a_prev': a_prev, 'z': z, 'a': a})
return a
def backward(self, da):
a_prev, z, a = (self.cache[key] for key in ('a_prev', 'z', 'a'))
batch_size = a_prev.shape[0]
# ------- aici propagarea erorii da prin neliniaritatea functiei de activare
if self.is_softmax:
# Get back y from the gradient wrt the cost of this layer's activations
# That is get back y from - y/a = da
y = da * (-a)
dz = a - y
else:
dz = da * self.activation.df(z, cached_y=a)
#---------- aici update weights si bias --------
dw = 1 / batch_size * cp.dot(dz.T, a_prev)
'''
# aici ar trebui inlocuit dz.T = (clase,batch) * (batch, intrari)
m1=cp.shape(dz.T)[0]
n1=cp.shape(a_prev)[0]
n2=cp.shape(a_prev)[1]
dw=cp.zeros((m1,n2))
for k in range(m1):
dw[k,:]=cp.sum(dz.T[k,:] * a_prev.T, axis=1)
#dw[k,:]=0.5*cp.sum(cp.abs(dz.T[k,:]+a_prev.T)-cp.abs(dz.T[k,:]-a_prev.T),axis=1)
#dw[k,:]=0.002*cp.sum(cp.sign(dz.T[k,:]+a_prev.T)+cp.sign(dz.T[k,:]-a_prev.T),axis=1)
dw = 1 / batch_size * dw
#print('Forma dz.T : ',cp.shape(dz.T))
#print('Forma a_prev : ',cp.shape(a_prev))
# NOTA: antrenarea cu sign() functioneaza numai cu gamma=0.002
# optimizer=grad_descent si eta 1..10 --> rezulta max 83%
# pe fully connected cu USPS
# Cu un strat suplimentar merge "rau"
# Pentru train e rcmd. sa ramana vechile formule !!
# sign() cu tanh() devine antrenarea mai lenta
#----------- R.D. 26 iul 2021 ----------------
'''
db = 1 / batch_size * dz.sum(axis=0, keepdims=True)
#------------ aici propagarea inversa a erorii
da_prev = cp.dot(dz, self.w)
#print('Forma dz: ',cp.shape(dz))
#print('Forma w: ',cp.shape(self.w))
return da_prev, dw, db
def update_params(self, dw, db):
self.w -= dw
self.b -= db
def get_params(self):
return self.w, self.b
def get_output_dim(self):
return self.size
| 30.239669 | 97 | 0.543591 | 3,498 | 0.955999 | 0 | 0 | 0 | 0 | 0 | 0 | 2,023 | 0.552883 |
d7756e6a29e9091ac15a875449e344d6705dafd8 | 4,640 | py | Python | ryu/app/network_ding/network_loss.py | nicePaul521/Ryu | fc1b5b79dbeac5164d0bc29006439bebb94a891c | [
"Apache-2.0"
] | null | null | null | ryu/app/network_ding/network_loss.py | nicePaul521/Ryu | fc1b5b79dbeac5164d0bc29006439bebb94a891c | [
"Apache-2.0"
] | null | null | null | ryu/app/network_ding/network_loss.py | nicePaul521/Ryu | fc1b5b79dbeac5164d0bc29006439bebb94a891c | [
"Apache-2.0"
] | null | null | null | from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER,DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.base.app_manager import lookup_service_brick
from ryu.lib import hub
from ryu.base import app_manager
from operator import attrgetter
import setting
class PortMonitor(app_manager.RyuApp):
def __init__(self,*args,**kwargs):
super(PortMonitor,self).__init__(*args,**kwargs)
self.datapaths = {}
self.link_loss = {}
self.awareness = lookup_service_brick('awareness')
self.graph = None
self.loss_thread = hub.spawn(self._monitor)
self.save_loss_thread = hub.spawn(self._save_loss)
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('Register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
self.link_loss.setdefault(datapath.id,{})
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('Unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def _monitor(self):
while setting.WEIGHT=='loss':
for dp in self.datapaths.values():
self._request_stats(dp)
hub.sleep(10)
self.show_loss_graph()
hub.sleep(1)
def _request_stats(self,datapath):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortStatsRequest(datapath,0,ofproto.OFPP_ANY)
datapath.send_msg(req)
def _save_loss(self):
while setting.WEIGHT == 'loss':
self.graph = self.get_loss()
hub.sleep(setting.LOSS_PERIOD)
def get_loss(self):
graph = self.awareness.graph
link_to_port = self.awareness.link_to_port
for link in link_to_port:
(src_dpid,dst_dpid) = link
(src_port,dst_port) = link_to_port[link]
if src_dpid in self.link_loss and dst_dpid in self.link_loss:
if self.link_loss[src_dpid] and self.link_loss[dst_dpid]:
# print(self.link_loss[src_dpid][src_port][1])
# print(self.link_loss[dst_dpid][dst_port][0])
tx_packets = self.link_loss[src_dpid][src_port][1]
rx_packets = self.link_loss[dst_dpid][dst_port][0]
loss_ratio = (tx_packets-rx_packets)/float(tx_packets)
#print(loss_ratio)
graph[src_dpid][dst_dpid]['loss'] = loss_ratio
# print(graph[src_dpid][dst_dpid]['loss'])
else:
graph[src_dpid][dst_dpid]['loss'] = 0.0
return graph
def show_loss_graph(self):
if setting.TOSHOW is False:
return
print('-----------------------Link Loss---------------------------------------')
print('src '' dst '' loss ratio ')
print('-------------------------------------------------------------------------')
graph = self.awareness.graph
link_to_port = self.awareness.link_to_port
for link in link_to_port:
(src_dpid, dst_dpid) = link
(src_port, dst_port) = link_to_port[link]
if 'loss' in graph[src_dpid][dst_dpid]:
link_los = graph[src_dpid][dst_dpid]['loss']
print('%016x:%2x---->%016x:%2x %5.12f'%(src_dpid,src_port,dst_dpid,dst_port,link_los))
@set_ev_cls(ofp_event.EventOFPPortStatsReply,MAIN_DISPATCHER)
def _port_stats_reply_handler(self,ev):
if setting.WEIGHT=='loss':
body = ev.msg.body
self.logger.info('---------------------------------------------------------------')
self.logger.info('datapath port '
'rx-pkts rx-bytes tx-pkts tx-bytes')
self.logger.info('----------------------------------------------------------------')
for stat in sorted(body,key=attrgetter('port_no')):
self.logger.info('%016x %8x %8d %8d %8d %8d',ev.msg.datapath.id,stat.port_no,stat.rx_packets,stat.rx_bytes,
stat.tx_packets,stat.tx_bytes)
self.link_loss[ev.msg.datapath.id][stat.port_no] = [stat.rx_packets,stat.tx_packets]
| 45.048544 | 123 | 0.561422 | 4,329 | 0.932974 | 0 | 0 | 1,512 | 0.325862 | 0 | 0 | 761 | 0.164009 |
d7760b7f6c4a005bf5f4aee31cc5261044469478 | 875 | py | Python | dealWithDataNpy.py | ItGirls/autoencoding_vi_for_topic_models | 10e47a3dffc92ed9373f7dd55dc66ea034097a32 | [
"MIT"
] | null | null | null | dealWithDataNpy.py | ItGirls/autoencoding_vi_for_topic_models | 10e47a3dffc92ed9373f7dd55dc66ea034097a32 | [
"MIT"
] | null | null | null | dealWithDataNpy.py | ItGirls/autoencoding_vi_for_topic_models | 10e47a3dffc92ed9373f7dd55dc66ea034097a32 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
# -*-coding:utf-8 -*-
"""
@Date : 2020/7/28 下午7:01
@Author : zhutingting
@Desc : ==============================================
Blowing in the wind. ===
# ======================================================
@Project : autoencoding_vi_for_topic_models
@FileName: dealWithDataNpy.py
@Software: PyCharm
"""
import pickle
import numpy as np
from run import onehot
dataset_tr = 'data/20news_clean/test.txt.npy'
vocab = 'data/20news_clean/vocab.pkl'
vocab = pickle.load(open(vocab, 'rb'))
# print(vocab)
vocab_size = len(vocab)
if __name__ == "__main__":
arr = np.load(dataset_tr,allow_pickle=True,encoding="latin1")
print(arr[0])
print(len(arr[0]))
print(type(arr))
print(arr)
data_tr = np.array([onehot(doc.astype('int'), vocab_size) for doc in arr if np.sum(doc) != 0])
print(data_tr[0])
# print(arr.size) | 21.341463 | 98 | 0.596571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 460 | 0.523322 |
d7761ae1988375a09b4acc44806bcabebad35bcd | 557 | py | Python | yoga/project/yoga/Database/migrations/0001_initial.py | sherlklee/yoga | fcfdfa2b326f20f2218b69fce6f881ff5d11d47b | [
"MIT"
] | null | null | null | yoga/project/yoga/Database/migrations/0001_initial.py | sherlklee/yoga | fcfdfa2b326f20f2218b69fce6f881ff5d11d47b | [
"MIT"
] | null | null | null | yoga/project/yoga/Database/migrations/0001_initial.py | sherlklee/yoga | fcfdfa2b326f20f2218b69fce6f881ff5d11d47b | [
"MIT"
] | 1 | 2019-06-04T01:53:52.000Z | 2019-06-04T01:53:52.000Z | # Generated by Django 2.2.1 on 2019-06-03 11:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('username', models.CharField(max_length=20, primary_key=True, serialize=False)),
('password', models.CharField(max_length=20)),
('identity', models.CharField(default='customer', max_length=12)),
],
),
]
| 24.217391 | 97 | 0.578097 | 464 | 0.833034 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.174147 |
d776fa923d096d8bed7a6623cbd0a90df2ecdfdb | 9,604 | py | Python | 1GDL - Newmark.py | ZibraMax/tkinter-y-sus-cosas | 713e0b5e4771fdc31f55dc29f3aeb58795587208 | [
"MIT"
] | null | null | null | 1GDL - Newmark.py | ZibraMax/tkinter-y-sus-cosas | 713e0b5e4771fdc31f55dc29f3aeb58795587208 | [
"MIT"
] | null | null | null | 1GDL - Newmark.py | ZibraMax/tkinter-y-sus-cosas | 713e0b5e4771fdc31f55dc29f3aeb58795587208 | [
"MIT"
] | null | null | null | import math
from tkinter import Tk, Canvas, W, E, NW
from tkinter.filedialog import askopenfilename
from tkinter import messagebox
from scipy.interpolate import interp1d
import time
import numpy as np
# Definición recursiva, requiere numpy
# def B(t,P):
# if len(P)==1:
# return np.array(P[0])
# else:
# return (1-t)*B(t,P[:-1])+t*B(t,P[1:])
def getCubic(L, disp):
K = np.array([[3*L**2, 2*L], [L**3, L**2]])
F = np.array([[np.arctan2(disp, L)], [disp]])
return np.linalg.solve(K, F)[:, 0]
def graficarBola():
my_canvas.delete("all")
graphAcel()
ponerTextos()
global width, height, mult, u, L, m, k, multu
up = u*mult*multu
Lp = L*mult
xi, yi = Lp, 0
centrox, centroy = width/2, height-100
r = m*2
my_canvas.create_line(centrox, centroy, centrox,
centroy-Lp+r, fill='gray', width=1, dash=[3, 3])
my_canvas.create_oval(yi-r+centrox, centroy-(xi-r), yi +
r+centrox, centroy-(xi+r), fill="")
theta = np.arctan2(up, Lp)
Lp = Lp*np.cos(theta)
up = up*np.cos(theta)
a, b = getCubic(Lp, up)
x, y = 0, 0
for i in range(51):
equis = Lp/50*i
xi, yi = equis, a*equis**3+b*equis**2
my_canvas.create_line(y+centrox, centroy-x, yi+centrox,
centroy-xi, fill='red', width=max(1, int(k/100)))
x, y = xi, yi
my_canvas.create_oval(y-r+centrox, centroy-(x-r), y +
r+centrox, centroy-(x+r), fill="blue")
def editPoint(event):
global editando
editando = not editando
if not editando:
drawBezier()
def drawBezier():
global editando, u, v, acel, L, z, f, m, k, dt, t, height, width, T, U, V, ACEL, tol, betai, betas
omega = np.sqrt(k/m)
acel = -f(0)*9.81
beta = eval(betas[betai])
while not editando:
t += dt
err = 1
ud1 = 0
u1 = 0
udd1 = acel
while err > tol:
ud1 = v + ((acel + udd1)/2)*dt
ah = (1-2*beta)*acel+2*beta*udd1
u1 = u + v * dt + 1/2*ah*dt**2
udd2 = (-m*f(t)*9.81 - 2*m*omega*z*ud1-k*u1)/m
err = abs(udd1-udd2)
udd1 = udd2
v = ud1
u = u1
acel = udd1
U += [u]
V += [v]
ACEL += [acel]
T += [t]
# time.sleep(dt/10)
graficarBola()
graficasNewmark()
my_canvas.update()
def movePoint(event):
global editando, u, L, width, height, U
if editando:
centrox, centroy = width/2, height-100
my_canvas.delete("all")
x = centrox-event.x
y = centroy-event.y
P[0] = x
P[1] = y
u, L = -x/mult/multu, y/mult
U[-1] = u
graficarBola()
graficasNewmark()
def ponerTextos():
global z, k, m, dt, height, multu, strt, betai, betas
my_canvas.create_text(100, height-90, fill="black",
font='20', text=f"omega={format(np.sqrt(k/m),'.2f')}", anchor=W)
my_canvas.create_text(200, height-90, fill="black",
font='20', text=f"T={format(2*np.pi/np.sqrt(k/m),'.2f')}", anchor=W)
my_canvas.create_text(100, height-110, fill="black",
font='20', text=f"multu={format(multu,'.2f')}", anchor=W)
my_canvas.create_text(100, height-130, fill="black",
font='20', text=f"z={format(z,'.2f')}", anchor=W)
my_canvas.create_text(100, height-150, fill="black",
font='20', text=f"k={format(k,'.2f')}", anchor=W)
my_canvas.create_text(100, height-170, fill="black",
font='20', text=f"m={format(m,'.2f')}", anchor=W)
my_canvas.create_text(100, height-190, fill="black",
font='20', text=f"dt={format(dt,'.2f')}", anchor=W)
my_canvas.create_text(100, height-210, fill="black",
font='20', text=f"beta={betas[betai]}", anchor=W)
my_canvas.create_text(100, 50, fill="black",
font='20', text=strt, anchor=NW)
def graficasNewmark():
global U, V, ACEL, T
x0 = 100
y0 = height-90
b = 300
h = 100
createGraph(width-b-x0, y0-150, b, h, T, U, title='u [m]', alert=True)
createGraph(width-b-x0, y0-2*150, b, h, T, V, title='v [m/s]', alert=True)
createGraph(width-b-x0, y0-3*150, b, h, T,
ACEL, title='a [m²/s]', alert=True, time=True)
def createGraph(x0, y0, b, h, X, Y, maxs=None, color='red', title='', alert=False, time=False):
XC = []
YC = []
np = 70
if alert:
if len(X) > np+1:
for i in range(0, len(X), int(len(X)/np)):
XC += [X[i]]
YC += [Y[i]]
XC += [X[-1]]
YC += [Y[-1]]
else:
XC = X
YC = Y
else:
XC = X
YC = Y
X = XC
Y = YC
xf = x0+b
yf = y0-h
ym = y0-h/2
xmax = max(X)
xmin = min(X)
if maxs:
ymax, ymin = maxs
else:
ymax = max(Y)
ymin = min(Y)
ymax = max(abs(ymax), abs(ymin))
if ymax == 0:
ymax = 1
dx = xmax-xmin
if dx == 0:
dx = 1
def z(x): return (x)/ymax
X = [(i-xmin)/dx*b for i in X]
Y = [z(i) for i in Y]
my_canvas.create_line(x0, y0, x0, yf, fill='gray', width=1)
my_canvas.create_line(x0, ym, xf, ym, fill='gray', width=1)
if time:
my_canvas.create_text(x0-20, yf-20, fill="black",
font='20', text=f"t={format(t,'.2f')}", anchor=W)
my_canvas.create_text(x0-5, ym, fill="black",
font='20', text=title, anchor=E)
my_canvas.create_text(x0-5, y0, fill="black",
font='5', text=format(-ymax, '.4f'), anchor=E)
my_canvas.create_text(x0-5, y0-h, fill="black",
font='5', text=format(ymax, '.4f'), anchor=E)
for i in range(len(X)-1):
my_canvas.create_line(x0+X[i], ym-Y[i]*h/2, x0 +
X[i+1], ym-Y[i+1]*h/2, fill=color, width=2)
def graphAcel():
global f, dt, height, t, data, width
n = 20
x0 = 100
y0 = height-90
b = 300
h = 100
dx = b/n
maxs = None
X = []
Y = []
for i in range(n+1):
X += [i*dx]
Y += [f(t+i*dt)]
try:
eq = data[:, 0]
ey = data[:, 1]
if t < np.max(eq):
maxs = [np.max(ey), np.min(ey)]
except:
pass
createGraph(width-b-x0, y0, b, h, X, Y, maxs, color='blue', title='ag [g]')
def importarArchivo():
global ARCHIVO
ARCHIVO = askopenfilename()
parseArchivo()
def parseArchivo():
global f, u, v, editando, data
data = np.loadtxt(ARCHIVO, skiprows=1, delimiter=',')
f = interp1d(data[:, 0], data[:, 1], kind='linear',
fill_value=(0, 0), bounds_error=False)
u = 0
v = 0
graficarBola()
drawBezier()
def kpup(e):
global editando, actual, f, u, v, t, U, T, V, ACEL, acel
if e.char.lower() == 'a':
U, T, V, ACEL = [], [], [], []
u, v, t = 0, 0, 0
def f(x): return 0
importarArchivo()
if e.char.lower() == 'r':
u, v, acel, t = 0, 0, 0, 0
def f(x): return 0
U, T, V, ACEL = [], [], [], []
if e.char.lower() == 't':
u, v, acel, t = 0, 0, 0, 0
U, T, V, ACEL = [], [], [], []
else:
actual = e.char.lower()
def wheel(event):
global z, k, m, dt, height, actual, multu, editando, betai
editando = True
delta = event.delta
if actual == 'z':
z += 0.05*np.sign(delta)
z = max(z, 0)
elif actual == 'k':
k += 10*np.sign(delta)
k = max(k, 0)
elif actual == 'm':
m += np.sign(delta)
m = max(m, 0)
elif actual == 'd':
dt += 0.01*np.sign(delta)
dt = max(dt, 0)
elif actual == 'u':
multu += 5*np.sign(delta)
multu = max(multu, 1)
elif actual == 'b':
betai += np.sign(delta)
betai = max(betai, 0)
betai = min(betai, 2)
graficarBola()
editando = False
drawBezier()
my_window = Tk()
ARCHIVO = ''
def f(t): return 0
actual = 'z'
mult = 500
t = 0
u = 0
v = 0
acel = 0
L = 1.5
z = 0.05
m = 20
k = 1500
dt = 0.01
multu = 100
data = None
tol = 1*10**(-6)
betas = ['1/8', '1/6', '1/4']
betai = 1
U = []
V = []
T = []
ACEL = []
P = [u*mult/multu, L*mult]
strt = "Controles:\nClick: Mover la masa\nA: Seleccionar archivo de aceleración\n\nPara cambiar las propiedades, use una de las siguientes letras\ny cambielas usando la rueda del mouse:\n\nK: Rigidez\nM: Masa\nZ: Amortiguamiento\nd: Paso en el tiempo\nu: Multiplicador de desplazamientos (solo para graficar)\nb: Beta de Newmark\n\nR: Reiniciar todo\nT: Reiniciar tiempo"
width = my_window.winfo_screenwidth()
height = my_window.winfo_screenheight()
my_canvas = Canvas(my_window, width=width, height=height,
background='white')
my_canvas.grid(row=0, column=0)
my_canvas.bind('<Button-1>', editPoint)
my_canvas.bind('<Motion>', movePoint)
my_canvas.bind('<MouseWheel>', wheel)
my_window.bind('<KeyRelease>', kpup)
editando = False
my_window.title('Amortiguada')
my_window.state('zoomed')
drawBezier()
my_window.mainloop()
| 29.733746 | 372 | 0.500729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,151 | 0.119808 |
d7771b4290c405990f615c022afc0dd3a2f27b5e | 3,216 | py | Python | python/parserDev/brothon/live_simulator.py | jzadeh/aktaion | 485488908e88212e615cd8bde04c6b1b63403cd0 | [
"Apache-2.0"
] | 112 | 2017-07-26T00:30:29.000Z | 2021-11-09T14:02:12.000Z | python/parserDev/brothon/live_simulator.py | jzadeh/aktaion | 485488908e88212e615cd8bde04c6b1b63403cd0 | [
"Apache-2.0"
] | null | null | null | python/parserDev/brothon/live_simulator.py | jzadeh/aktaion | 485488908e88212e615cd8bde04c6b1b63403cd0 | [
"Apache-2.0"
] | 38 | 2017-07-28T03:09:01.000Z | 2021-05-07T03:21:32.000Z | """LiveSimulator: This class reads in various Bro IDS logs. The class utilizes
the BroLogReader and simply loops over the static bro log
file, replaying rows and changing any time stamps
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
"""
from __future__ import print_function
import os
import time
import datetime
import itertools
# Third party
import numpy as np
# Local Imports
from brothon import bro_log_reader
from brothon.utils import file_utils
class LiveSimulator(object):
"""LiveSimulator: This class reads in various Bro IDS logs. The class utilizes the
BroLogReader and simply loops over the static bro log file
replaying rows at the specified EPS and changing timestamps to 'now()'
"""
def __init__(self, filepath, eps=10, max_rows=None):
"""Initialization for the LiveSimulator Class
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
"""
# Compute EPS timer
# Logic:
# - Normal distribution centered around 1.0/eps
# - Make sure never less than 0
# - Precompute 1000 deltas and then just cycle around
self.eps_timer = itertools.cycle([max(0, delta) for delta in np.random.normal(1.0/float(eps), .5/float(eps), size=1000)])
# Initialize the Bro log reader
self.log_reader = bro_log_reader.BroLogReader(filepath, tail=False)
# Store max_rows
self.max_rows = max_rows
def readrows(self):
"""Using the BroLogReader this method yields each row of the log file
replacing timestamps, looping and emitting rows based on EPS rate
"""
# Loop forever or until max_rows is reached
num_rows = 0
while True:
# Yield the rows from the internal reader
for row in self.log_reader.readrows():
yield self.replace_timestamp(row)
# Sleep and count rows
time.sleep(next(self.eps_timer))
num_rows += 1
# Check for max_rows
if self.max_rows and (num_rows >= self.max_rows):
return
@staticmethod
def replace_timestamp(row):
"""Replace the timestamp with now()"""
if 'ts' in row:
row['ts'] = datetime.datetime.utcnow()
return row
def test():
"""Test for LiveSimulator Python Class"""
# Grab a test file
data_path = file_utils.relative_dir(__file__, '../data')
test_path = os.path.join(data_path, 'conn.log')
print('Opening Data File: {:s}'.format(test_path))
# Create a LiveSimulator reader
reader = LiveSimulator(test_path, max_rows=10)
for line in reader.readrows():
print(line)
print('Read with max_rows Test successful!')
if __name__ == '__main__':
# Run the test for easy testing/debugging
test()
| 33.852632 | 129 | 0.627799 | 2,039 | 0.634017 | 686 | 0.213308 | 186 | 0.057836 | 0 | 0 | 1,757 | 0.546331 |
d7774dc77e51cb47753ab4f85b00f22b278e1195 | 2,488 | py | Python | dataset_scripts/merge_results_as_csv.py | contec-korong/r3det-on-mmdetection | 4a78a0b3330d0fcb9c017a5c97d06a92cf85ebac | [
"Apache-2.0"
] | null | null | null | dataset_scripts/merge_results_as_csv.py | contec-korong/r3det-on-mmdetection | 4a78a0b3330d0fcb9c017a5c97d06a92cf85ebac | [
"Apache-2.0"
] | null | null | null | dataset_scripts/merge_results_as_csv.py | contec-korong/r3det-on-mmdetection | 4a78a0b3330d0fcb9c017a5c97d06a92cf85ebac | [
"Apache-2.0"
] | null | null | null | from glob import glob
import os
import pandas as pd
import argparse
CATEGORIES_5 = ('background', 'small ship', 'large ship', 'individual container', 'grouped container', 'crane')
CATEGORIES_15 = ('background', 'small ship', 'large ship', 'civilian aircraft', 'military aircraft', 'small car', 'bus', 'truck', 'train',
'crane', 'bridge', 'oil tank', 'dam', 'athletic field', 'helipad', 'roundabout')
CATEGORIES_16 = ('background', 'small ship', 'large ship', 'civilian aircraft', 'military aircraft', 'small car', 'bus', 'truck', 'train',
'crane', 'bridge', 'oil tank', 'dam', 'indoor playground', 'outdoor playground', 'helipad', 'roundabout')
CATEGORIES_20 = ('background', 'small ship', 'large ship', 'civilian aircraft', 'military aircraft', 'small car', 'bus', 'truck', 'train',
'crane', 'bridge', 'oil tank', 'dam', 'indoor playground', 'outdoor playground', 'helipad', 'roundabout',
'helicopter', 'individual container', 'grouped container', 'swimming pool')
category_map = {
5 : CATEGORIES_5,
15 : CATEGORIES_15,
16 : CATEGORIES_16,
20 : CATEGORIES_20}
def main(srcpath, dstpath, classes=16):
categories = category_map[classes]
text_files = glob(os.path.join(srcpath, '*.txt'))
header_names = ['file_name', 'confidence',
'point1_x', 'point1_y',
'point2_x', 'point2_y',
'point3_x', 'point3_y',
'point4_x', 'point4_y']
dfs = []
for txt in text_files:
df = pd.read_csv(txt, delim_whitespace=True,
names=header_names)
df['class_id'] = categories.index(txt.split('/')[-1][:-4])
df['file_name'] = df['file_name'] + '.png'
dfs.append(df)
full_df = pd.concat(dfs)
full_df = full_df[['file_name', 'class_id', 'confidence',
'point1_x', 'point1_y',
'point2_x', 'point2_y',
'point3_x', 'point3_y',
'point4_x', 'point4_y']]
full_df.to_csv(dstpath, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='merge dota class results to csv file')
parser.add_argument('--srcpath', )
parser.add_argument('--classes', default=16)
parser.add_argument('--dstpath', default='result.csv')
args = parser.parse_args()
main(args.srcpath, args.dstpath, args.classes)
| 43.649123 | 138 | 0.58963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,068 | 0.42926 |
d777be6f240c93857c87bee44f62377cde598f0d | 12,511 | py | Python | testcases/OpTestIPMILockMode.py | vaibhav92/op-test-framework | 792fa18d3f09fd8c28073074815ff96d373ab96d | [
"Apache-2.0"
] | null | null | null | testcases/OpTestIPMILockMode.py | vaibhav92/op-test-framework | 792fa18d3f09fd8c28073074815ff96d373ab96d | [
"Apache-2.0"
] | null | null | null | testcases/OpTestIPMILockMode.py | vaibhav92/op-test-framework | 792fa18d3f09fd8c28073074815ff96d373ab96d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/testcases/OpTestIPMILockMode.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
# @package OpTestIPMILockMode.py
# It will test in-band ipmi white-listed commands when ipmi is in locked mode
#
# IPMI whitelist
# These are the commands that will be available over an unauthenticated
# interface when the BMC is in IPMI lockdown mode.
# Generally one can access all in-band ipmi commands, But if we issue ipmi
# lock command then one can access only specific whitelisted in-band ipmi commands.
import time
import subprocess
import re, sys
from common.OpTestConstants import OpTestConstants as BMC_CONST
import unittest
import OpTestConfiguration
from common.OpTestUtil import OpTestUtil
from common.OpTestSystem import OpSystemState
class OpTestIPMILockMode(unittest.TestCase):
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
self.util = OpTestUtil()
self.platform = conf.platform()
##
# @brief This function will cover following test steps
# 1. It will get the OS level installed on power platform
# 2. It will check for kernel version installed on the Open Power Machine
# 3. It will check for ipmitool command existence and ipmitool package
# 4. Load the necessary ipmi modules based on config values
# 5. Issue a ipmi lock command through out-of-band authenticated interface
# 6. Now BMC IPMI is in locked mode, at this point only white listed
# in-band ipmi commands sholud work(No other in-band ipmi command should work)
# 7. Execute and test the functionality of whitelisted in-band ipmi
# commands in locked mode
# 8. At the end of test issue a ipmi unlock command to revert the availablity of all
# in-band ipmi commands in unlocked mode.
def runTest(self):
if not self.platform in ['habanero','firestone','garrison', 'p9dsu']:
raise unittest.SkipTest("Platform %s doesn't support IPMI Lockdown mode" % self.platform)
self.cv_SYSTEM.goto_state(OpSystemState.OS)
# Get OS level
l_oslevel = self.cv_HOST.host_get_OS_Level()
# Get kernel version
l_kernel = self.cv_HOST.host_get_kernel_version()
# Checking for ipmitool command and lm_sensors package
self.cv_HOST.host_check_command("ipmitool")
l_pkg = self.cv_HOST.host_check_pkg_for_utility(l_oslevel, "ipmitool")
print "Installed package: %s" % l_pkg
# loading below ipmi modules based on config option
# ipmi_devintf, ipmi_powernv and ipmi_masghandler
self.cv_HOST.host_load_module_based_on_config(l_kernel, BMC_CONST.CONFIG_IPMI_DEVICE_INTERFACE,
BMC_CONST.IPMI_DEV_INTF)
self.cv_HOST.host_load_module_based_on_config(l_kernel, BMC_CONST.CONFIG_IPMI_POWERNV,
BMC_CONST.IPMI_POWERNV)
self.cv_HOST.host_load_module_based_on_config(l_kernel, BMC_CONST.CONFIG_IPMI_HANDLER,
BMC_CONST.IPMI_MSG_HANDLER)
# Issue a ipmi lock command through authenticated interface
print "Issuing ipmi lock command through authenticated interface"
l_res = self.cv_IPMI.enter_ipmi_lockdown_mode()
try:
self.run_inband_ipmi_whitelisted_cmds()
except:
l_msg = "One of white listed in-band ipmi command execution failed"
print sys.exc_info()
finally:
# Issue a ipmi unlock command at the end of test.
print "Issuing ipmi unlock command through authenticated interface"
self.cv_IPMI.exit_ipmi_lockdown_mode()
##
# @brief This function will execute whitelisted in-band ipmi commands
# and test the functionality in locked mode.
def run_inband_ipmi_whitelisted_cmds(self):
l_con = self.cv_SYSTEM.sys_get_ipmi_console()
self.cv_SYSTEM.host_console_login()
self.cv_SYSTEM.host_console_unique_prompt()
l_con.run_command("uname -a")
# Test IPMI white listed commands those should be allowed through un-authenticated
# in-band interface
# 1.[App] Get Device ID
print "Testing Get Device ID command"
l_res = l_con.run_command(BMC_CONST.HOST_GET_DEVICE_ID)
# 2.[App] Get Device GUID
print "Testing Get Device GUID"
l_res = l_con.run_command(BMC_CONST.HOST_GET_DEVICE_GUID)
# 3.[App] Get System GUID
print "Testing Get system GUID"
l_res = l_con.run_command(BMC_CONST.HOST_GET_SYSTEM_GUID)
# 4.[Storage] Get SEL info
print "Testing Get SEL info"
l_res = l_con.run_command(BMC_CONST.HOST_GET_SEL_INFO)
# 5.[Storage] Get SEL time
print "Testing Get SEL time"
l_res = l_con.run_command(BMC_CONST.HOST_GET_SEL_TIME_RAW)
# 6. [Storage] Reserve SEL
print "Testing Reserve SEL"
l_res = l_con.run_command(BMC_CONST.HOST_RESERVE_SEL)
# 7. [Storage] Set SEL time (required for RTC)
print "Testing Set SEL time"
l_res = l_con.run_command(BMC_CONST.HOST_GET_SEL_TIME)
l_res = l_con.run_command(BMC_CONST.HOST_SET_SEL_TIME + " \'" + l_res[-1] + "\'")
l_con.run_command(BMC_CONST.HOST_GET_SEL_TIME)
# 8. [Transport] Get LAN parameters
print "Testing Get LAN parameters"
l_res = l_con.run_command(BMC_CONST.HOST_GET_LAN_PARAMETERS)
# 9.[Chassis] Get System Boot Options
print "Testing Get System Boot Options"
l_res = l_con.run_command(BMC_CONST.HOST_GET_SYSTEM_BOOT_OPTIONS)
# 10.[Chassis] Set System Boot Options
print "Testing Set System Boot Options"
l_res = l_con.run_command(BMC_CONST.HOST_SET_SYTEM_BOOT_OPTIONS)
l_con.run_command(BMC_CONST.HOST_GET_SYSTEM_BOOT_OPTIONS)
# 11. [App] Get BMC Global Enables
print "Testing Get BMC Global Enables"
l_res = l_con.run_command(BMC_CONST.HOST_GET_BMC_GLOBAL_ENABLES_RAW)
l_con.run_command(BMC_CONST.HOST_GET_BMC_GLOBAL_ENABLES)
# 12. [App] Set BMC Global Enables
print "Testing Set BMC Global Enables"
l_res = l_con.run_command(BMC_CONST.HOST_SET_BMC_GLOBAL_ENABLES_SEL_OFF)
l_con.run_command(BMC_CONST.HOST_GET_BMC_GLOBAL_ENABLES)
l_con.run_command(BMC_CONST.HOST_SET_BMC_GLOBAL_ENABLES_SEL_ON)
# 13.[App] Get System Interface Capabilities
if not self.platform in ['p9dsu']:
print "Testing Get System Interface Capabilities"
l_res = l_con.run_command(BMC_CONST.HOST_GET_SYSTEM_INTERFACE_CAPABILITIES_SSIF)
l_res = l_con.run_command(BMC_CONST.HOST_GET_SYSTEM_INTERFACE_CAPABILITIES_KCS)
# 14.[App] Get Message Flags
print "Testing Get Message Flags"
l_res = l_con.run_command(BMC_CONST.HOST_GET_MESSAGE_FLAGS)
# 15. [App] Get BT Capabilities
print "Testing Get BT Capabilities"
l_res = l_con.run_command(BMC_CONST.HOST_GET_BT_CAPABILITIES)
# 16. [App] Clear Message Flags
print "Testing Clear Message Flags"
l_res = l_con.run_command_ignore_fail(BMC_CONST.HOST_CLEAR_MESSAGE_FLAGS)
if not self.platform in ['p9dsu']:
# 17. [OEM] PNOR Access Status
print "Testing the PNOR Access Status"
l_res = l_con.run_command(BMC_CONST.HOST_PNOR_ACCESS_STATUS_DENY)
l_res = l_con.run_command(BMC_CONST.HOST_PNOR_ACCESS_STATUS_GRANT)
# 18. [Storage] Add SEL Entry
print "Testing Add SEL Entry"
print "Clearing the SEL list"
self.cv_IPMI.ipmi_sdr_clear()
l_res = l_con.run_command(BMC_CONST.HOST_ADD_SEL_ENTRY)
time.sleep(1)
l_res = self.cv_IPMI.last_sel()
print "Checking for Reserved entry creation in SEL"
print l_res
if "eserved" not in l_res:
raise Exception("IPMI: Add SEL Entry command, doesn't create an SEL event")
# 19. [App] Set Power State
print "Testing Set Power State"
l_res = l_con.run_command(BMC_CONST.HOST_SET_ACPI_POWER_STATE)
# 20.[Sensor/Event] Platform Event (0x02)
print "Testing Platform Event"
self.cv_IPMI.ipmi_sdr_clear()
l_res = l_con.run_command(BMC_CONST.HOST_PLATFORM_EVENT)
l_res = self.cv_IPMI.last_sel()
if "eserved" not in l_res:
raise Exception("IPMI: Platform Event command failed to log SEL event")
# 21.[Chassis] Chassis Control
print "Testing chassis power on"
l_res = l_con.run_command(BMC_CONST.HOST_CHASSIS_POWER_ON)
# 22. [App] Get ACPI Power State (0x06)
print "Testing Get ACPI Power State"
l_res = l_con.run_command(BMC_CONST.HOST_GET_ACPI_POWER_STATE)
# 23. [App] Set watchdog
print "Testing Set watchdog"
l_res = l_con.run_command(BMC_CONST.HOST_SET_WATCHDOG)
self.cv_IPMI.mc_get_watchdog()
if self.platform in ['p9dsu']:
return
# 24. [Sensor/Event] Get Sensor Type
print "Testing Get Sensor Type"
l_res = self.cv_IPMI.sdr_get_watchdog()
matchObj = re.search( "Watchdog \((0x\d{1,})\)", l_res)
if matchObj:
print "Got sensor Id for watchdog: %s" % matchObj.group(1)
else:
raise Exception("Failed to get sensor id for watchdog sensor")
l_res = l_con.run_command(BMC_CONST.HOST_GET_SENSOR_TYPE_FOR_WATCHDOG + " " + matchObj.group(1))
# 25.[Sensor/Event] Get Sensor Reading
print "Testing Get Sensor Reading"
l_res = self.cv_IPMI.sdr_get_watchdog()
matchObj = re.search( "Watchdog \((0x\d{1,})\)", l_res)
if matchObj:
print "Got sensor Id for watchdog: %s" % matchObj.group(1)
else:
raise Exception("Failed to get sensor id for watchdog sensor")
l_res = l_con.run_command(BMC_CONST.HOST_GET_SENSOR_READING + " " + matchObj.group(1))
# 26. [OEM] PNOR Access Response (0x08)
print "Testing PNOR Access Response"
l_con.run_command(BMC_CONST.HOST_PNOR_ACCESS_STATUS_GRANT)
l_res = l_con.run_command(BMC_CONST.HOST_PNOR_ACCESS_RESPONSE)
l_con.run_command(BMC_CONST.HOST_PNOR_ACCESS_STATUS_DENY)
l_res = l_con.run_command(BMC_CONST.HOST_PNOR_ACCESS_RESPONSE)
# 27.[App] 0x38 Get Channel Authentication Cap
print "Testing Get Channel Authentication Capabilities"
l_res = l_con.run_command(BMC_CONST.HOST_GET_CHANNEL_AUTH_CAP)
# 28.[App] Reset Watchdog (0x22)
print "Testing reset watchdog"
self.cv_IPMI.ipmi_sdr_clear()
l_res = l_con.run_command(BMC_CONST.HOST_RESET_WATCHDOG)
l_res = ''
for x in range(0,25):
# Reset watchdog should create a SEL event log
print "# Looking for Watchdog SEL event try %d" % x
l_res = self.cv_IPMI.last_sel()
print l_res
if "Watchdog" in l_res:
break
time.sleep(1)
if "Watchdog" not in l_res:
raise Exception("IPMI: Reset Watchdog command, doesn't create an SEL event")
# Below commands will effect sensors and fru values and some care to be taken for
# executing.
# 29.[Storage] Write FRU
# 30.[Sensor/Event] Set Sensor Reading
# 31. [OEM] Partial Add ESEL (0xF0)
# This is testsed by kernel itself, it will send messages to BMC internally
# 32.[App] Send Message
| 41.842809 | 104 | 0.671809 | 10,992 | 0.878587 | 0 | 0 | 0 | 0 | 0 | 0 | 5,522 | 0.441372 |
d779c2a2f911575752519902fdcf73487b5c0405 | 1,446 | py | Python | scripts/update_pins.py | machow/gh-projects-cli | f0e414f7d900ac5546bab0ec6f1448a0f36bf300 | [
"MIT"
] | null | null | null | scripts/update_pins.py | machow/gh-projects-cli | f0e414f7d900ac5546bab0ec6f1448a0f36bf300 | [
"MIT"
] | 6 | 2022-01-03T18:07:01.000Z | 2022-01-04T01:22:07.000Z | scripts/update_pins.py | machow/gh-projects-cli | f0e414f7d900ac5546bab0ec6f1448a0f36bf300 | [
"MIT"
] | null | null | null | import jq
from dotenv import load_dotenv
from gh_projects import (
update_project_with_repo_issues,
fetch_all_issues,
push_issues_to_project_next,
)
load_dotenv()
PROJECT_ID = "PN_kwHOACdIos4AAto7"
# fetch_project_item_issue_ids("PN_kwHOACdIos4AAYbQ")
all_issues = fetch_all_issues("machow", "pins-python", ["projectNext(number: 1) { id }"])
need_project = (
jq.compile(".[] | select(.projectNext.id == null) | .id").input(all_issues).all()
)
push_issues_to_project_next(PROJECT_ID, need_project)
update_project_with_repo_issues(
"machow",
"pins-python",
PROJECT_ID,
{
".updatedAt": "MDE2OlByb2plY3ROZXh0RmllbGQyNjI0ODEw",
".createdAt": "MDE2OlByb2plY3ROZXh0RmllbGQyNjI0ODM4",
".closedAt": "MDE2OlByb2plY3ROZXh0RmllbGQyNjI0ODM5",
".author.login": "MDE2OlByb2plY3ROZXh0RmllbGQyNjI0ODQ5",
".comments.totalCount": "MDE2OlByb2plY3ROZXh0RmllbGQyNjI0ODk4",
".comments.nodes[] | .createdAt": "MDE2OlByb2plY3ROZXh0RmllbGQyNjI0ODk3",
".comments.nodes[] | .author.login": "MDE2OlByb2plY3ROZXh0RmllbGQyNjI0ODg3",
".isReadByViewer": "MDE2OlByb2plY3ROZXh0RmllbGQyNjI0ODc3",
},
query_fragment="""
updatedAt
createdAt
closedAt
author { login }
isReadByViewer
comments(last: 1) {
totalCount
nodes {
createdAt
author {
login
}
}
}
""",
)
| 25.821429 | 89 | 0.667358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 902 | 0.62379 |
d77c6fdb8c4f6bbd8699fdd1ff1c22e620654dfe | 7,061 | py | Python | aiosvc/amqp/pool.py | acsnem/aiosvc | 5bb316fe6958d4143bc0434f6dfbe9bfa9328916 | [
"MIT"
] | null | null | null | aiosvc/amqp/pool.py | acsnem/aiosvc | 5bb316fe6958d4143bc0434f6dfbe9bfa9328916 | [
"MIT"
] | null | null | null | aiosvc/amqp/pool.py | acsnem/aiosvc | 5bb316fe6958d4143bc0434f6dfbe9bfa9328916 | [
"MIT"
] | null | null | null | import logging
import asyncio
from aiosvc import Componet
from .simple import Publisher
# class Pool(Componet):
#
# def __init__(self, exchange, *, publish_timeout=5, try_publish_interval=.9, size=1, max_size=2, loop=None, start_priority=1):
# super().__init__(loop=loop, start_priority=start_priority)
# # Publisher(exchange=exchange, publish_timeout=publish_timeout, try_publish_interval=try_publish_interval)
class Pool(Componet):
"""A connection pool.
Connection pool can be used to manage a set of connections to the AMQP server.
Connections are first acquired from the pool, then used, and then released
back to the pool. Once a connection is released, it's reset to close all
open cursors and other resources *except* prepared statements.
Pools are created by calling :func:`~asyncpg.pool.create_pool`.
"""
# __slots__ = ('_queue', '_loop', '_minsize', '_maxsize',
# '_connect_args', '_connect_kwargs',
# '_working_addr', '_working_opts',
# '_con_count', '_max_queries', '_connections',
# '_initialized', '_closed', '_setup')
def __init__(self,
exchange,
min_size,
max_size,
publish_timeout=5,
try_publish_interval=.9,
loop=None,
start_priority=1):
super().__init__(loop=loop, start_priority=start_priority)
self._exchange = exchange
self._publish_timeout = publish_timeout
self._try_publish_interval = try_publish_interval
if max_size <= 0:
raise ValueError('max_size is expected to be greater than zero')
if min_size <= 0:
raise ValueError('min_size is expected to be greater than zero')
if min_size > max_size:
raise ValueError('min_size is greater than max_size')
self._minsize = min_size
self._maxsize = max_size
self._reset()
self._closed = False
async def _start(self):
await self._init()
async def _before_stop(self):
await asyncio.gather(*[con._before_stop() for con in self._connections], loop=self._loop)
async def _stop(self):
await asyncio.gather(*[con._stop() for con in self._connections], loop=self._loop)
async def _new_connection(self):
con = Publisher(self._exchange, publish_timeout=self._publish_timeout,
try_publish_interval=self._try_publish_interval, loop=self._loop)
try:
await con._start()
except Exception as e:
logging.exception(e)
try:
await con._before_stop()
await con._stop()
except:
pass
self._connections.add(con)
return con
async def _init(self):
if self._initialized:
return
if self._closed:
raise Exception('pool is closed')
for _ in range(self._minsize):
self._con_count += 1
try:
con = await self._new_connection()
except:
self._con_count -= 1
raise
self._queue.put_nowait(con)
self._initialized = True
return self
def acquire(self, *, timeout=None):
"""Acquire a AMQP connection from the pool.
:param float timeout: A timeout for acquiring a Connection.
:return: An instance of :class:`~asyncpg.connection.Connection`.
Can be used in an ``await`` expression or with an ``async with`` block.
.. code-block:: python
async with pool.acquire() as con:
await con.execute(...)
Or:
.. code-block:: python
con = await pool.acquire()
try:
await con.execute(...)
finally:
await pool.release(con)
"""
return PoolAcquireContext(self, timeout)
async def _acquire(self, timeout):
if timeout is None:
return await self._acquire_impl()
else:
return await asyncio.wait_for(self._acquire_impl(),
timeout=timeout,
loop=self._loop)
async def _acquire_impl(self):
self._check_init()
try:
con = self._queue.get_nowait()
except asyncio.QueueEmpty:
con = None
if con is None:
if self._con_count < self._maxsize:
self._con_count += 1
try:
con = await self._new_connection()
except:
self._con_count -= 1
raise
else:
con = await self._queue.get()
return con
async def release(self, connection):
"""Release a AMQP connection back to the pool."""
self._check_init()
# if connection.is_closed():
# self._con_count -= 1
# self._connections.remove(connection)
# else:
# await connection.reset()
self._queue.put_nowait(connection)
async def close(self):
"""Gracefully close all connections in the pool."""
if self._closed:
return
self._check_init()
self._closed = True
coros = []
for con in self._connections:
coros.append(con._before_stop())
await asyncio.gather(*coros, loop=self._loop)
coros = []
for con in self._connections:
coros.append(con._stop())
await asyncio.gather(*coros, loop=self._loop)
self._reset()
def _check_init(self):
if not self._initialized:
raise Exception('pool is not initialized')
if self._closed:
raise Exception('pool is closed')
def _reset(self):
self._connections = set()
self._con_count = 0
self._initialized = False
self._queue = asyncio.Queue(maxsize=self._maxsize, loop=self._loop)
def __await__(self):
return self._init().__await__()
async def __aenter__(self):
await self._init()
return self
async def __aexit__(self, *exc):
await self.close()
class PoolAcquireContext:
__slots__ = ('timeout', 'connection', 'done', 'pool')
def __init__(self, pool, timeout):
self.pool = pool
self.timeout = timeout
self.connection = None
self.done = False
async def __aenter__(self):
if self.connection is not None or self.done:
raise Exception('a connection is already acquired')
self.connection = await self.pool._acquire(self.timeout)
return self.connection
async def __aexit__(self, *exc):
self.done = True
con = self.connection
self.connection = None
await self.pool.release(con)
def __await__(self):
self.done = True
return self.pool._acquire(self.timeout).__await__() | 31.950226 | 131 | 0.578813 | 6,622 | 0.937828 | 0 | 0 | 0 | 0 | 3,454 | 0.489166 | 2,080 | 0.294576 |
d77d2bb48907dd464ade365bdb00dd8e8c032d0d | 2,364 | py | Python | src/result.py | danbailo/T2-Analise-Algoritmos | 5335207307e68594f1669653fe871624cd2f3163 | [
"MIT"
] | 1 | 2019-05-16T16:04:01.000Z | 2019-05-16T16:04:01.000Z | src/result.py | danbailo/T2-Analise-Algoritmos-I | 5335207307e68594f1669653fe871624cd2f3163 | [
"MIT"
] | null | null | null | src/result.py | danbailo/T2-Analise-Algoritmos-I | 5335207307e68594f1669653fe871624cd2f3163 | [
"MIT"
] | null | null | null | from knapsack import Knapsack, read_instances, organize_instances
from os import path,mkdir
from platform import system
import json
def number_solutions(n):
with open('./number_of_results.txt', 'w') as result_txt: result_txt.write(n)
try:
number = int(n)
if number == 0:
print('\n0 solutions?\n')
return False
print('\nSuccess!')
print("run '$ python3 main.py get_sol' to get your results and plot them.\n")
return number
except ValueError as err:
print('ERROR:',err)
print('Please, only numbers!')
print('View the README to see how to execute the code!')
def get_solutions(all_instances, number_items, weight_max, values_items, weight_items):
try:
with open('./number_of_results.txt', 'r') as result_txt: n_result = int(result_txt.readline())
except FileNotFoundError:
print("Please, run '$ python3 main.py n_sol 1' by default, before execute 'get_sol'")
exit(-1)
except UnboundLocalError:
print("Please, run '$ python3 main.py n_sol 1' by default, before execute 'get_sol'")
exit(-1)
if n_result == 0:
print('\n0 results? OK! Done...\n')
return False
print('\nGenerating result...')
print('Generated 0/{} result done!'.format(n_result))
for n in range(1, n_result+1):
result_bottomUp, time_bottomUp, result_topDown, time_topDown = \
Knapsack().get_result(all_instances, number_items, weight_max, values_items, weight_items)
data = {}
k = 0
for instance in all_instances:
data[instance] = {
'result topDown':result_topDown[k], 'time topDown':time_topDown[k],\
'result bottomUp':result_bottomUp[k], 'time bottomUp':time_bottomUp[k]}
k += 1
if not path.isdir('../result'):
if system() == 'Linux': mkdir('../result')
elif system() == 'Windows': mkdir('../result')
elif system() == 'Darwin': mkdir('../result')
with open('../result/result'+str(n)+'.json','w') as file: file.write(json.dumps(data,indent=4))
print('Generated {}/{} result done!'.format(n,n_result))
print('\nSuccess!')
print("run '$ python3 statistic.py' to get the statistics")
print("run '$ python3 plot.py' to see the graphics\n") | 44.603774 | 103 | 0.615059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 772 | 0.326565 |
d77d6e7fb86f611447c66f19c0c89a6591454f10 | 162 | py | Python | parse.py | lpmi-13/telegramStressBot | d6968108347e1215a97c6f8a2a2801e3770874d7 | [
"MIT"
] | null | null | null | parse.py | lpmi-13/telegramStressBot | d6968108347e1215a97c6f8a2a2801e3770874d7 | [
"MIT"
] | 2 | 2017-05-08T21:03:37.000Z | 2020-10-25T05:16:55.000Z | parse.py | lpmi-13/telegramStressBot | d6968108347e1215a97c6f8a2a2801e3770874d7 | [
"MIT"
] | null | null | null | import nltk
from nltk import word_tokenize
def create_POS_tags(sentence):
parsedSentence = word_tokenize(sentence)
return nltk.pos_tag(parsedSentence)
| 18 | 44 | 0.796296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d77e06fa86733c3b6164292916851025cf9ee6e3 | 472 | py | Python | matplotlib_examples/examples_src/pylab_examples/ellipse_demo.py | xzlmark/webspider | 133c620c65aa45abea1718b0dada09618c2115bf | [
"Apache-2.0"
] | 3 | 2020-04-09T02:35:26.000Z | 2021-02-27T17:00:21.000Z | matplotlib_examples/examples_src/pylab_examples/ellipse_demo.py | colorworlds/webspider | 133c620c65aa45abea1718b0dada09618c2115bf | [
"Apache-2.0"
] | null | null | null | matplotlib_examples/examples_src/pylab_examples/ellipse_demo.py | colorworlds/webspider | 133c620c65aa45abea1718b0dada09618c2115bf | [
"Apache-2.0"
] | 1 | 2020-04-09T02:35:08.000Z | 2020-04-09T02:35:08.000Z | import matplotlib.pyplot as plt
import numpy.random as rnd
from matplotlib.patches import Ellipse
NUM = 250
ells = [Ellipse(xy=rnd.rand(2)*10, width=rnd.rand(), height=rnd.rand(), angle=rnd.rand()*360)
for i in range(NUM)]
fig = plt.figure(0)
ax = fig.add_subplot(111, aspect='equal')
for e in ells:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(rnd.rand())
e.set_facecolor(rnd.rand(3))
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
plt.show()
| 21.454545 | 93 | 0.684322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.014831 |
d77e372f71687020e3c52a4156b4641851c6ee87 | 2,779 | py | Python | messageAnalysis.py | brennanmcmicking/message-counter | 912abb960ce3e67648c766ddadac829ad80033cb | [
"MIT"
] | null | null | null | messageAnalysis.py | brennanmcmicking/message-counter | 912abb960ce3e67648c766ddadac829ad80033cb | [
"MIT"
] | null | null | null | messageAnalysis.py | brennanmcmicking/message-counter | 912abb960ce3e67648c766ddadac829ad80033cb | [
"MIT"
] | null | null | null |
# Standard library imports
import glob
import json
import argparse
# Third-party imports
import pandas as pd
# Parse command line parameters
parser = argparse.ArgumentParser(description='''
Process facebook json message data. The messages directory from the data download must
be in the current working directory.
''')
parser.add_argument(
"--name",
help="Name of the owner facebook, as it appear in messages download.",
required=True
)
parser.add_argument(
"--friends",
nargs="+",
help="Name of friends to include in stats.",
required=True
)
args = parser.parse_args()
filePath = args.data
my_name = args.name
friends = args.friends
# iterate over every message file, grabbing data from each
rows = []
for filePath in glob.glob("messages/inbox/**/message_*.json"):
# read the message file into a dictionary
message_file_json = ""
with open(filePath) as file:
message_file_json = file.read()
message_file = json.loads(message_file_json)
for thread in message_file:
# get message file metadata
participants = message_file["participants"]
messages = message_file["messages"]
# discard group chats
if len(participants) != 2:
continue
# get the name of the friend the current message file is for
friend_name = participants[0]["name"]
# create a row for each message
for message in messages:
# ignore non-text messages e.g. pictures, shares, calls etc.
msgtype = message.get('type')
if msgtype != 'Generic':
continue
sent_by_friend = message.get("sender_name") != my_name
timestamp = message.get("timestamp_ms")
content = message.get("content")
length = len(content) if content else 0
rows.append({
'friend': friend_name,
'sent_by_friend': sent_by_friend,
'length': length,
'timestamp': timestamp
})
df = pd.DataFrame(rows)
df['timestamp'] = pd.to_datetime(df['timestamp'], unit="ms")
df['friend'] = pd.Categorical(df.friend)
for x in df['friend'].unique():
print(x)
df = df[df['friend'].isin(friends)]
date_index = df.set_index('timestamp')
month_sums = date_index.groupby(
[pd.Grouper(freq="M"), 'friend']).count().dropna()
del month_sums['sent_by_friend']
month_sums = month_sums.reset_index().pivot(
index='timestamp', columns='friend').fillna(0)
print(month_sums.to_string())
ax = month_sums.plot.area(figsize=(10, 5), linewidth=0)
ax.get_figure().savefig('stacked.png', dpi=300)
lines = month_sums.plot.line(figsize=(10, 5))
lines.get_figure().savefig('lines.png', dpi=300)
month_sums.to_csv('messagedata.csv')
| 28.357143 | 92 | 0.652393 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 962 | 0.346168 |
d77fd3ef68e31fafb19d287504f750c6ea163eef | 14,162 | py | Python | robogen/rgkit/backup bots/KarenRoper10.py | andrewgailey/robogen | 7e96cfa26d2e6dc383c5d205816ddd98f8f100d7 | [
"Unlicense"
] | null | null | null | robogen/rgkit/backup bots/KarenRoper10.py | andrewgailey/robogen | 7e96cfa26d2e6dc383c5d205816ddd98f8f100d7 | [
"Unlicense"
] | null | null | null | robogen/rgkit/backup bots/KarenRoper10.py | andrewgailey/robogen | 7e96cfa26d2e6dc383c5d205816ddd98f8f100d7 | [
"Unlicense"
] | null | null | null | # Karen Roper 1.0 by Adam
# http://robotgame.net/viewrobot/7819
import rg
escapeSquares = []
globTurn = 0
class Robot:
def act(self, game):
# reset the escape squares for this turn
global escapeSquares
global globTurn
if globTurn != game.turn:
globTurn = game.turn
# refresh list of used escape squares
escapeSquares = []
badSpawnLocs = [(3, 3), (3, 15), (15, 3), (15, 15)]
goodSpawnLocs = [(3, 4), (4, 3), (3, 14), (4, 15), (14, 3), (15, 4), (14, 15), (15, 4), (2, 6), (6, 2), (2, 12), (6, 16), (12, 2), (16, 6), (12, 16), (16, 12)]
# set the location that would take us towards the centre
towardCentre=rg.toward(self.location, rg.CENTER_POINT)
# build info about adjacent and close robots
adjEnemyCount = 0
adjEnemyLocs = []
closeEnemyCount = 0
closeEnemyLocs = []
closeEnemyTargets = []
adjFriendlyCount = 0
adjFriendlyLocs = []
closeFriendlyCount = 0
closeFriendlyLocs = []
closeFriendlyTargets = []
nearbyFriendlyCount = 0
nearbyFriendlyLocs = []
for loc, bot in game.robots.iteritems():
if bot.player_id != self.player_id:
if rg.wdist(loc, self.location) == 1:
adjEnemyCount += 1
adjEnemyLocs = adjEnemyLocs + [loc]
if rg.wdist(loc, self.location) == 2:
closeEnemyCount += 1
closeEnemyLocs = closeEnemyLocs + [loc]
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle')):
for poss in rg.locs_around(loc, filter_out=('invalid', 'obstacle')):
if poss == dest:
closeEnemyTargets = closeEnemyTargets + [poss]
if bot.player_id == self.player_id:
if rg.wdist(loc, self.location) == 1:
adjFriendlyCount += 1
adjFriendlyLocs = adjFriendlyLocs + [loc]
if rg.wdist(loc, self.location) == 2:
closeFriendlyCount += 1
closeFriendlyLocs = closeFriendlyLocs + [loc]
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle')):
for poss in rg.locs_around(loc, filter_out=('invalid', 'obstacle')):
if poss == dest:
closeFriendlyTargets = closeFriendlyTargets + [poss]
if rg.wdist(loc, self.location) <= 3:
if loc != self.location:
nearbyFriendlyCount += 1
nearbyFriendlyLocs = nearbyFriendlyLocs + [loc]
# if it's nearly respawning time...
if game.turn % 10 in [9, 0] and game.turn != 99:
# if we're on the edge, move away from spawn locations
if 'spawn' in rg.loc_types(self.location):
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle', 'spawn')):
if dest not in game.robots:
if dest not in escapeSquares:
escapeSquares = escapeSquares + [dest]
return ['move', dest]
# if this isn't possible and we have a spare turn, try a new spawn location
if game.turn % 10 == 9:
if 'spawn' in rg.loc_types(towardCentre):
if towardCentre not in game.robots:
if towardCentre not in escapeSquares:
escapeSquares = escapeSquares + [towardCentre]
return ['move', towardCentre]
# otherwise commit suicide
if game.turn % 10 == 0:
return ['suicide']
# if it's nearly respawning time...
if game.turn % 10 in [9, 0] and game.turn != 99:
# try to bump spawning robots
for loc in closeEnemyLocs:
if 'spawn' in rg.loc_types(loc):
if game.turn % 10 == 0 or self.hp >= 9:
# try to attack the square on its path to the centre
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle', 'spawn')):
if rg.toward(loc, rg.CENTER_POINT) == dest:
if dest not in game.robots:
if dest not in escapeSquares:
escapeSquares = escapeSquares + [dest]
return ['move', dest]
# if not, and it's turn 10, try to attack any square it could move to
if game.turn % 10 == 0:
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle', 'spawn')):
for poss in rg.locs_around(loc, filter_out=('invalid', 'obstacle')):
if poss == dest:
if dest not in game.robots:
if dest not in escapeSquares:
escapeSquares = escapeSquares + [dest]
return ['move', dest]
# if we're next to 3+ enemy bots, and low on health, commit suicide
if adjEnemyCount >= 3:
if self.hp <= adjEnemyCount * 9:
return ['suicide']
# if we're next to one enemy bot on low health, try to kill it (as long as we're not more likely to die ourselves)
if adjEnemyCount == 1:
for loc, bot in game.robots.iteritems():
if loc in adjEnemyLocs:
if bot.hp <= 7 or self.hp >= 10:
return ['attack', loc]
if bot.hp <= self.hp:
return ['attack', loc]
# if we're next to 2 enemy bots, or next to one enemy bot and low on health, run away (but not next to an enemy robot)
if adjEnemyCount >= 1:
if self.hp <= 9 or adjEnemyCount >= 2:
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle', 'spawn')):
if dest not in game.robots:
if dest not in closeEnemyTargets:
if dest not in escapeSquares:
escapeSquares = escapeSquares + [dest]
return ['move', dest]
# allow spawn squares if absolutely necessary and we're not near respawn time
if game.turn % 10 not in [8, 9, 0] or game.turn in [98, 99]:
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle')):
if dest not in game.robots:
if dest not in closeEnemyTargets:
if dest not in escapeSquares:
escapeSquares = escapeSquares + [dest]
return ['move', dest]
# if we're next to an ally in a spawn square, try to free it up by moving towards the centre
if 'spawn' not in rg.loc_types(self.location):
for loc in adjFriendlyLocs:
if 'spawn' in rg.loc_types(loc):
if towardCentre not in game.robots:
if towardCentre not in escapeSquares:
surplusHP = self.hp
for dest in closeEnemyTargets:
if dest == towardCentre:
surplusHP -= 9
if surplusHP > 0 or closeEnemyCount == 0:
escapeSquares = escapeSquares + [towardCentre]
return ['move', towardCentre]
# if we're next to an enemy bot, attack it
for loc in adjEnemyLocs:
return ['attack', loc]
# if we're in a spawn square, try to escape to a safe square
if 'spawn' in rg.loc_types(self.location):
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle', 'spawn')):
if dest not in game.robots:
if dest not in closeEnemyTargets:
if dest not in escapeSquares:
escapeSquares = escapeSquares + [dest]
return ['move', dest]
# if this isn't possible, try a 'good' spawn location
for dest in goodSpawnLocs:
if dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle')):
if dest not in game.robots:
if dest not in closeEnemyTargets:
if dest not in closeFriendlyTargets:
if dest not in escapeSquares:
escapeSquares = escapeSquares + [dest]
return ['move', dest]
# if this isn't possible, try a non-bad spawn location
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle')):
if 'spawn' in rg.loc_types(dest):
if dest not in badSpawnLocs:
if dest not in game.robots:
if dest not in closeEnemyTargets:
if dest not in closeFriendlyTargets:
if dest not in escapeSquares:
escapeSquares = escapeSquares + [dest]
return ['move', dest]
# if we're close to another bot who's in a battle, help attack it, unless this would bring us into a big battle!
if game.turn != 99:
for loc in closeEnemyLocs:
for ally in rg.locs_around(loc, filter_out=('invalid')):
if ally in nearbyFriendlyLocs:
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle')):
for poss in rg.locs_around(loc, filter_out=('invalid', 'obstacle')):
if poss == dest:
if dest not in game.robots:
if dest not in escapeSquares:
# check for other enemies around the square we're about to move into
moveIn = 1
for enemy in rg.locs_around(dest, filter_out=('invalid')):
if enemy in closeEnemyLocs:
if enemy != loc:
moveIn = 0
if moveIn == 1:
escapeSquares = escapeSquares + [dest]
return ['move', dest]
# if we're close to another bot, attack the square we think it's going to move into (provided there isn't another bot in it)
for loc in closeEnemyLocs:
# try to attack the square on its path to the centre
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle')):
if rg.toward(loc, rg.CENTER_POINT) == dest:
if dest not in game.robots:
return ['attack', dest]
# if not, try to attack any square it could move to
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle')):
for poss in rg.locs_around(loc, filter_out=('invalid', 'obstacle')):
if poss == dest:
if dest not in game.robots:
return ['attack', dest]
# if we're next to friends, try to move away from them
if adjFriendlyCount >=1:
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle', 'spawn')):
if dest not in game.robots:
if dest not in closeEnemyTargets: # it won't be, but there's no harm in double checking
if dest not in closeFriendlyTargets:
if dest not in escapeSquares: # it won't be by the above condition, but there's no harm in double checking
escapeSquares = escapeSquares + [dest]
return ['move', dest]
# if we're in the center, stay put
if self.location == rg.CENTER_POINT:
return ['guard']
# move toward the centre if there's a bot that needs room, even if there's a friend there that might be moving
for loc in adjFriendlyLocs:
if rg.toward(loc, rg.CENTER_POINT) == self.location:
for dest in rg.locs_around(self.location, filter_out=('invalid', 'obstacle', 'spawn')):
if rg.wdist(dest, rg.CENTER_POINT) < rg.wdist(self.location, rg.CENTER_POINT):
if dest not in escapeSquares:
escapeSquares = escapeSquares + [towardCentre]
return ['move', towardCentre]
# if there's no free escape squares, just try to go towards the centre
if towardCentre not in escapeSquares:
escapeSquares = escapeSquares + [towardCentre]
return ['move', towardCentre]
# move toward the centre (as long as we won't then be next to a friend)
if towardCentre not in closeFriendlyTargets:
if towardCentre not in escapeSquares: # it won't be by the above condition
escapeSquares = escapeSquares + [towardCentre]
return ['move', towardCentre]
return ['guard']
| 55.105058 | 167 | 0.492303 | 14,052 | 0.992233 | 0 | 0 | 0 | 0 | 0 | 0 | 2,860 | 0.201949 |
d7803ad66a3a0cc62d5dfd23d899e392e7609904 | 10,150 | py | Python | tests/cli/tools.py | CNR-ITTIG/plasodfaxp | 923797fc00664fa9e3277781b0334d6eed5664fd | [
"Apache-2.0"
] | 1 | 2019-09-26T08:16:30.000Z | 2019-09-26T08:16:30.000Z | tests/cli/tools.py | CNR-ITTIG/plasodfaxp | 923797fc00664fa9e3277781b0334d6eed5664fd | [
"Apache-2.0"
] | null | null | null | tests/cli/tools.py | CNR-ITTIG/plasodfaxp | 923797fc00664fa9e3277781b0334d6eed5664fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the CLI tools classes."""
import argparse
import io
import sys
import unittest
from plaso.cli import tools
from plaso.lib import errors
from tests.cli import test_lib
class CLIToolTest(test_lib.CLIToolTestCase):
"""Tests for the CLI tool base class."""
_EXPECTED_BASIC_OPTIONS = u'\n'.join([
u'usage: tool_test.py [-h] [-V]',
u'',
u'Test argument parser.',
u'',
u'optional arguments:',
u' -h, --help show this help message and exit.',
u' -V, --version show the version information.',
u''])
_EXPECTED_DATA_OPTION = u'\n'.join([
u'usage: tool_test.py [--data PATH]',
u'',
u'Test argument parser.',
u'',
u'optional arguments:',
u' --data PATH the location of the data files.',
u''])
_EXPECTED_INFORMATIONAL_OPTIONS = u'\n'.join([
u'usage: tool_test.py [-d] [-q]',
u'',
u'Test argument parser.',
u'',
u'optional arguments:',
u' -d, --debug enable debug output.',
u' -q, --quiet disable informational output.',
u''])
_EXPECTED_TIMEZONE_OPTION = u'\n'.join([
u'usage: tool_test.py [-z TIMEZONE]',
u'',
u'Test argument parser.',
u'',
u'optional arguments:',
u' -z TIMEZONE, --zone TIMEZONE, --timezone TIMEZONE',
(u' explicitly define the timezone. Typically '
u'the timezone'),
(u' is determined automatically where possible. '
u'Use "-z'),
u' list" to see a list of available timezones.',
u''])
def testAddBasicOptions(self):
"""Tests the AddBasicOptions function."""
argument_parser = argparse.ArgumentParser(
prog=u'tool_test.py', description=u'Test argument parser.',
add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter)
test_tool = tools.CLITool()
test_tool.AddBasicOptions(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_BASIC_OPTIONS)
def testAddDataLocationOption(self):
"""Tests the AddDataLocationOption function."""
argument_parser = argparse.ArgumentParser(
prog=u'tool_test.py', description=u'Test argument parser.',
add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter)
test_tool = tools.CLITool()
test_tool.AddDataLocationOption(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_DATA_OPTION)
def testAddInformationalOptions(self):
"""Tests the AddInformationalOptions function."""
argument_parser = argparse.ArgumentParser(
prog=u'tool_test.py', description=u'Test argument parser.',
add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter)
test_tool = tools.CLITool()
test_tool.AddInformationalOptions(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_INFORMATIONAL_OPTIONS)
def testAddTimezoneOption(self):
"""Tests the AddTimezoneOption function."""
argument_parser = argparse.ArgumentParser(
prog=u'tool_test.py', description=u'Test argument parser.',
add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter)
test_tool = tools.CLITool()
test_tool.AddTimezoneOption(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_TIMEZONE_OPTION)
def testGetCommandLineArguments(self):
"""Tests the GetCommandLineArguments function."""
cli_tool = tools.CLITool()
cli_tool.preferred_encoding = u'UTF-8'
command_line_arguments = cli_tool.GetCommandLineArguments()
self.assertIsNotNone(command_line_arguments)
def testListTimeZones(self):
"""Tests the ListTimeZones function."""
output_writer = test_lib.TestOutputWriter()
cli_tool = tools.CLITool(output_writer=output_writer)
cli_tool.ListTimeZones()
string = output_writer.ReadOutput()
expected_string = (
b'\n'
b'************************************ Zones '
b'*************************************\n'
b' Timezone : UTC Offset\n'
b'----------------------------------------'
b'----------------------------------------\n')
self.assertTrue(string.startswith(expected_string))
def testParseStringOption(self):
"""Tests the ParseStringOption function."""
encoding = sys.stdin.encoding
# Note that sys.stdin.encoding can be None.
if not encoding:
encoding = self.preferred_encoding
cli_tool = tools.CLITool()
cli_tool.preferred_encoding = u'UTF-8'
expected_string = u'Test Unicode string'
options = test_lib.TestOptions()
options.test = expected_string
string = cli_tool.ParseStringOption(options, u'test')
self.assertEqual(string, expected_string)
options = test_lib.TestOptions()
string = cli_tool.ParseStringOption(options, u'test')
self.assertIsNone(string)
string = cli_tool.ParseStringOption(
options, u'test', default_value=expected_string)
self.assertEqual(string, expected_string)
options = test_lib.TestOptions()
options.test = expected_string.encode(encoding)
string = cli_tool.ParseStringOption(options, u'test')
self.assertEqual(string, expected_string)
if not sys.stdin.encoding and sys.stdin.encoding.upper() == u'UTF-8':
options = test_lib.TestOptions()
options.test = (
b'\xad\xfd\xab\x73\x99\xc7\xb4\x78\xd0\x8c\x8a\xee\x6d\x6a\xcb\x90')
with self.assertRaises(errors.BadConfigOption):
cli_tool.ParseStringOption(options, u'test')
def testPrintSeparatorLine(self):
"""Tests the PrintSeparatorLine function."""
output_writer = test_lib.TestOutputWriter()
cli_tool = tools.CLITool(output_writer=output_writer)
cli_tool.PrintSeparatorLine()
string = output_writer.ReadOutput()
expected_string = (
b'----------------------------------------'
b'----------------------------------------\n')
self.assertEqual(string, expected_string)
class StdinInputReaderTest(unittest.TestCase):
"""The unit test case for a stdin input reader."""
_TEST_DATA = (
b'A first string\n'
b'A 2nd string\n'
b'\xc3\xberi\xc3\xb0ja string\n'
b'\xff\xfef\x00j\x00\xf3\x00r\x00\xf0\x00a\x00 \x00b\x00a\x00n\x00d\x00')
def testReadAscii(self):
"""Tests the Read function with ASCII encoding."""
original_stdin = sys.stdin
sys.stdin = io.BytesIO(self._TEST_DATA)
input_reader = tools.StdinInputReader(encoding=u'ascii')
string = input_reader.Read()
self.assertEqual(string, u'A first string\n')
string = input_reader.Read()
self.assertEqual(string, u'A 2nd string\n')
# UTF-8 string with non-ASCII characters.
string = input_reader.Read()
self.assertEqual(string, u'\ufffd\ufffdri\ufffd\ufffdja string\n')
# UTF-16 string with non-ASCII characters.
string = input_reader.Read()
expected_string = (
u'\ufffd\ufffdf\x00j\x00\ufffd\x00r\x00\ufffd\x00a\x00 '
u'\x00b\x00a\x00n\x00d\x00')
self.assertEqual(string, expected_string)
sys.stdin = original_stdin
def testReadUtf8(self):
"""Tests the Read function with UTF-8 encoding."""
original_stdin = sys.stdin
sys.stdin = io.BytesIO(self._TEST_DATA)
input_reader = tools.StdinInputReader()
string = input_reader.Read()
self.assertEqual(string, u'A first string\n')
string = input_reader.Read()
self.assertEqual(string, u'A 2nd string\n')
# UTF-8 string with non-ASCII characters.
string = input_reader.Read()
self.assertEqual(string, u'þriðja string\n')
# UTF-16 string with non-ASCII characters.
string = input_reader.Read()
expected_string = (
u'\ufffd\ufffdf\x00j\x00\ufffd\x00r\x00\ufffd\x00a\x00 '
u'\x00b\x00a\x00n\x00d\x00')
self.assertEqual(string, expected_string)
sys.stdin = original_stdin
class FileObjectOutputWriterTest(unittest.TestCase):
"""The unit test case for a file-like object output writer."""
def testWriteAscii(self):
"""Tests the Write function with ASCII encoding."""
output_writer = test_lib.TestOutputWriter(encoding=u'ascii')
output_writer.Write(u'A first string\n')
string = output_writer.ReadOutput()
self.assertEqual(string, b'A first string\n')
# Byte string with ASCII characters.
output_writer.Write(b'A 2nd string\n')
string = output_writer.ReadOutput()
self.assertEqual(string, b'A 2nd string\n')
# Unicode string with non-ASCII characters.
output_writer.Write(u'þriðja string\n')
string = output_writer.ReadOutput()
self.assertEqual(string, b'?ri?ja string\n')
# Byte string with non-ASCII characters.
with self.assertRaises(UnicodeDecodeError):
# This fails because the byte string cannot be converted to
# a Unicode string before the call to encode().
output_writer.Write(b'\xc3\xberi\xc3\xb0ja string\n')
def testWriteUtf8(self):
"""Tests the Write function with UTF-8 encoding."""
output_writer = test_lib.TestOutputWriter()
output_writer.Write(u'A first string\n')
string = output_writer.ReadOutput()
self.assertEqual(string, b'A first string\n')
# Byte string with ASCII characters.
output_writer.Write(b'A 2nd string\n')
string = output_writer.ReadOutput()
self.assertEqual(string, b'A 2nd string\n')
# Unicode string with non-ASCII characters.
output_writer.Write(u'þriðja string\n')
string = output_writer.ReadOutput()
self.assertEqual(string, b'\xc3\xberi\xc3\xb0ja string\n')
# Byte string with non-ASCII characters.
with self.assertRaises(UnicodeDecodeError):
# This fails because the byte string cannot be converted to
# a Unicode string before the call to encode().
output_writer.Write(b'\xc3\xberi\xc3\xb0ja string\n')
if __name__ == '__main__':
unittest.main()
| 33.278689 | 79 | 0.670936 | 9,875 | 0.972332 | 0 | 0 | 0 | 0 | 0 | 0 | 3,731 | 0.367369 |
d780a8f6fa48d64a0962e1a1c43209f9666ae9dc | 18,108 | py | Python | src/sklearndf/transformation/wrapper/_wrapper.py | mtsokol/sklearndf | 172fb9d5497d6a8f5586d9f4d02e9b48b9bf62c3 | [
"Apache-2.0"
] | 37 | 2021-01-12T08:06:45.000Z | 2022-02-02T02:32:25.000Z | src/sklearndf/transformation/wrapper/_wrapper.py | mtsokol/sklearndf | 172fb9d5497d6a8f5586d9f4d02e9b48b9bf62c3 | [
"Apache-2.0"
] | 13 | 2021-01-20T13:03:13.000Z | 2022-03-04T15:44:58.000Z | src/sklearndf/transformation/wrapper/_wrapper.py | mtsokol/sklearndf | 172fb9d5497d6a8f5586d9f4d02e9b48b9bf62c3 | [
"Apache-2.0"
] | 4 | 2021-01-31T16:14:24.000Z | 2022-03-14T08:20:08.000Z | """
Core implementation of :mod:`sklearndf.transformation.wrapper`
"""
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, List, Optional, TypeVar, Union
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.impute import MissingIndicator, SimpleImputer
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.manifold import Isomap
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, PolynomialFeatures
from pytools.api import AllTracker
from ... import TransformerDF
from ...wrapper import TransformerWrapperDF
log = logging.getLogger(__name__)
__all__ = [
"BaseDimensionalityReductionWrapperDF",
"BaseMultipleInputsPerOutputTransformerWrapperDF",
"ColumnPreservingTransformerWrapperDF",
"ColumnSubsetTransformerWrapperDF",
"ComponentsDimensionalityReductionWrapperDF",
"FeatureSelectionWrapperDF",
"NComponentsDimensionalityReductionWrapperDF",
"NumpyTransformerWrapperDF",
"ColumnTransformerWrapperDF",
"IsomapWrapperDF",
"ImputerWrapperDF",
"MissingIndicatorWrapperDF",
"AdditiveChi2SamplerWrapperDF",
"KBinsDiscretizerWrapperDF",
"PolynomialFeaturesWrapperDF",
"OneHotEncoderWrapperDF",
]
#
# type variables
#
T_Transformer = TypeVar("T_Transformer", bound=TransformerMixin)
# T_Imputer is needed because sklearn's _BaseImputer only exists from v0.22 onwards.
# Once we drop support for sklearn 0.21, _BaseImputer can be used instead.
# The following TypeVar helps to annotate availability of "add_indicator" and
# "missing_values" attributes on an imputer instance for ImputerWrapperDF below
# noinspection PyProtectedMember
from sklearn.impute._iterative import IterativeImputer
T_Imputer = TypeVar("T_Imputer", SimpleImputer, IterativeImputer)
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# wrapper classes for transformers
#
class NumpyTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that only accept numpy arrays.
Converts data frames to numpy arrays before handing off to the native transformer.
Implementations must define :meth:`_get_features_original`.
"""
# noinspection PyPep8Naming
def _adjust_X_type_for_delegate(
self, X: pd.DataFrame, *, to_numpy: Optional[bool] = None
) -> np.ndarray:
assert to_numpy is not False, "X must be converted to a numpy array"
return super()._adjust_X_type_for_delegate(X, to_numpy=True)
def _adjust_y_type_for_delegate(
self,
y: Optional[Union[pd.Series, pd.DataFrame]],
*,
to_numpy: Optional[bool] = None,
) -> Optional[np.ndarray]:
assert to_numpy is not False, "y must be converted to a numpy array"
return super()._adjust_y_type_for_delegate(y, to_numpy=True)
class ColumnSubsetTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that do not change column names,
but that may remove one or more columns.
Implementations must define :meth:`_get_features_out`.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# return column labels for arrays returned by the fitted transformer.
pass
def _get_features_original(self) -> pd.Series:
# return the series with output columns in index and output columns as values
features_out = self._get_features_out()
return pd.Series(index=features_out, data=features_out.values)
class ColumnPreservingTransformerWrapperDF(
ColumnSubsetTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
):
"""
DF wrapper for transformers whose output columns match the input columns.
The native transformer must not add, remove, reorder, or rename any of the input
columns.
"""
def _get_features_out(self) -> pd.Index:
return self.feature_names_in_
class BaseMultipleInputsPerOutputTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer]
):
"""
DF wrapper for transformers mapping multiple input columns to individual output
columns.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# make this method abstract to ensure subclasses override the default
# behaviour, which usually relies on method ``_get_features_original``
pass
def _get_features_original(self) -> pd.Series:
raise NotImplementedError(
f"{type(self.native_estimator).__name__} transformers map multiple "
"inputs to individual output columns; current sklearndf implementation "
"only supports many-to-1 mappings from output columns to input columns"
)
class BaseDimensionalityReductionWrapperDF(
BaseMultipleInputsPerOutputTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers.
The native transformer is considered to map all input columns to each output column.
"""
@property
@abstractmethod
def _n_components_(self) -> int:
pass
def _get_features_out(self) -> pd.Index:
return pd.Index([f"x_{i}" for i in range(self._n_components_)])
class NComponentsDimensionalityReductionWrapperDF(
BaseDimensionalityReductionWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers supporting the
:attr:`n_components` attribute.
Subclasses must implement :meth:`_get_features_original`.
"""
_ATTR_N_COMPONENTS = "n_components"
def _validate_delegate_estimator(self) -> None:
self._validate_delegate_attribute(attribute_name=self._ATTR_N_COMPONENTS)
@property
def _n_components_(self) -> int:
return getattr(self.native_estimator, self._ATTR_N_COMPONENTS)
class ComponentsDimensionalityReductionWrapperDF(
BaseDimensionalityReductionWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers supporting the
``components_`` attribute.
The native transformer must provide a ``components_`` attribute once fitted,
as an array of shape (n_components, n_features).
"""
_ATTR_COMPONENTS = "components_"
# noinspection PyPep8Naming
def _post_fit(
self, X: pd.DataFrame, y: Optional[pd.Series] = None, **fit_params
) -> None:
# noinspection PyProtectedMember
super()._post_fit(X, y, **fit_params)
self._validate_delegate_attribute(attribute_name=self._ATTR_COMPONENTS)
@property
def _n_components_(self) -> int:
return len(getattr(self.native_estimator, self._ATTR_COMPONENTS))
class FeatureSelectionWrapperDF(
ColumnSubsetTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
DF wrapper for feature selection transformers.
The native transformer must implement a ``get_support`` method, providing the
indices of the selected input columns
"""
_ATTR_GET_SUPPORT = "get_support"
def _validate_delegate_estimator(self) -> None:
self._validate_delegate_attribute(attribute_name=self._ATTR_GET_SUPPORT)
def _get_features_out(self) -> pd.Index:
get_support = getattr(self.native_estimator, self._ATTR_GET_SUPPORT)
return self.feature_names_in_[get_support()]
class ColumnTransformerWrapperDF(
TransformerWrapperDF[ColumnTransformer], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.compose.ColumnTransformer`.
Requires all transformers passed as the ``transformers`` parameter to implement
:class:`.TransformerDF`.
"""
__DROP = "drop"
__PASSTHROUGH = "passthrough"
__SPECIAL_TRANSFORMERS = (__DROP, __PASSTHROUGH)
def _validate_delegate_estimator(self) -> None:
column_transformer: ColumnTransformer = self.native_estimator
if (
column_transformer.remainder
not in ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS
):
raise ValueError(
f"unsupported value for arg remainder: ({column_transformer.remainder})"
)
non_compliant_transformers: List[str] = [
type(transformer).__name__
for _, transformer, _ in column_transformer.transformers
if not (
isinstance(transformer, TransformerDF)
or transformer in ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS
)
]
if non_compliant_transformers:
from .. import ColumnTransformerDF
raise ValueError(
f"{ColumnTransformerDF.__name__} only accepts instances of "
f"{TransformerDF.__name__} or special values "
f'"{" and ".join(ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS)}" '
"as valid transformers, but "
f'also got: {", ".join(non_compliant_transformers)}'
)
def _get_features_original(self) -> pd.Series:
"""
Return the series mapping output column names to original columns names.
:return: the series with index the column names of the output dataframe and
values the corresponding input column names.
"""
def _features_original(df_transformer: TransformerDF, columns: List[Any]):
if df_transformer == ColumnTransformerWrapperDF.__PASSTHROUGH:
# we may get positional indices for columns selected by the
# 'passthrough' transformer, and in that case so need to look up the
# associated column names
if all(isinstance(column, int) for column in columns):
column_names = self._get_features_in()[columns]
else:
column_names = columns
return pd.Series(index=column_names, data=column_names)
else:
return df_transformer.feature_names_original_
return pd.concat(
[
_features_original(df_transformer, columns)
for _, df_transformer, columns in self.native_estimator.transformers_
if (
len(columns) > 0
and df_transformer != ColumnTransformerWrapperDF.__DROP
)
]
)
class ImputerWrapperDF(TransformerWrapperDF[T_Imputer], metaclass=ABCMeta):
"""
DF wrapper for imputation transformers, e.g., :class:`sklearn.impute.SimpleImputer`.
"""
def _get_features_original(self) -> pd.Series:
# get the columns that were dropped during imputation
delegate_estimator = self.native_estimator
nan_mask = []
def _nan_mask_from_statistics(stats: np.array):
if issubclass(stats.dtype.type, float):
na_mask = np.isnan(stats)
else:
na_mask = [
x is None or (isinstance(x, float) and np.isnan(x)) for x in stats
]
return na_mask
# implementation for i.e. SimpleImputer
if hasattr(delegate_estimator, "statistics_"):
nan_mask = _nan_mask_from_statistics(stats=delegate_estimator.statistics_)
# implementation for IterativeImputer
elif hasattr(delegate_estimator, "initial_imputer_"):
initial_imputer: SimpleImputer = delegate_estimator.initial_imputer_
nan_mask = _nan_mask_from_statistics(stats=initial_imputer.statistics_)
# implementation for i.e. KNNImputer
elif hasattr(delegate_estimator, "_mask_fit_X"):
# noinspection PyProtectedMember
nan_mask = np.all(delegate_estimator._mask_fit_X, axis=0)
# the imputed columns are all ingoing columns, except the ones that were dropped
imputed_columns = self.feature_names_in_.delete(np.argwhere(nan_mask).tolist())
features_original = pd.Series(
index=imputed_columns, data=imputed_columns.values
)
# if the add_indicator flag is set, we will get additional "missing" columns
if delegate_estimator.add_indicator:
from .. import MissingIndicatorDF
missing_indicator = MissingIndicatorDF.from_fitted(
estimator=delegate_estimator.indicator_,
features_in=self.feature_names_in_,
n_outputs=self.n_outputs_,
)
return features_original.append(missing_indicator.feature_names_original_)
else:
return features_original
class MissingIndicatorWrapperDF(
TransformerWrapperDF[MissingIndicator], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.impute.MissingIndicator`.
"""
def _get_features_original(self) -> pd.Series:
features_original: np.ndarray = self.feature_names_in_[
self.native_estimator.features_
].values
features_out = pd.Index([f"{name}__missing" for name in features_original])
return pd.Series(index=features_out, data=features_original)
class IsomapWrapperDF(BaseDimensionalityReductionWrapperDF[Isomap], metaclass=ABCMeta):
"""
DF wrapper for :class:`sklearn.manifold.Isomap`.
"""
@property
def _n_components_(self) -> int:
return self.native_estimator.embedding_.shape[1]
class AdditiveChi2SamplerWrapperDF(
BaseDimensionalityReductionWrapperDF[AdditiveChi2Sampler], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.kernel_approximation.AdditiveChi2Sampler`.
"""
@property
def _n_components_(self) -> int:
return len(self._features_in) * (2 * self.native_estimator.sample_steps + 1)
class PolynomialFeaturesWrapperDF(
BaseMultipleInputsPerOutputTransformerWrapperDF[PolynomialFeatures],
metaclass=ABCMeta,
):
"""
DF wrapper for :class:`sklearn.preprocessing.PolynomialFeatures`.
"""
def _get_features_out(self) -> pd.Index:
return pd.Index(
data=self.native_estimator.get_feature_names(
input_features=self.feature_names_in_.astype(str)
)
)
class OneHotEncoderWrapperDF(TransformerWrapperDF[OneHotEncoder], metaclass=ABCMeta):
"""
DF wrapper for :class:`sklearn.preprocessing.OneHotEncoder`.
"""
def _validate_delegate_estimator(self) -> None:
if self.native_estimator.sparse:
raise NotImplementedError("sparse matrices not supported; use sparse=False")
def _get_features_original(self) -> pd.Series:
# Return the series mapping output column names to original column names.
#
# Remove 1st category column if argument drop == 'first'
# Remove 1st category column only of binary features if arg drop == 'if_binary'
feature_names_out = pd.Index(
self.native_estimator.get_feature_names(self.feature_names_in_)
)
if self.drop == "first":
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in range(len(category) - 1)
]
elif self.drop == "if_binary":
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in (range(1) if len(category) == 2 else category)
]
else:
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in category
]
return pd.Series(index=feature_names_out, data=feature_names_in)
class KBinsDiscretizerWrapperDF(
TransformerWrapperDF[KBinsDiscretizer], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.preprocessing.KBinsDiscretizer`.
"""
def _validate_delegate_estimator(self) -> None:
if self.native_estimator.encode == "onehot":
raise NotImplementedError(
'property encode="onehot" is not supported due to sparse matrices;'
'consider using "onehot-dense" instead'
)
def _get_features_original(self) -> pd.Series:
"""
Return the series mapping output column names to original columns names.
:return: the series with index the column names of the output dataframe and
values the corresponding input column names.
"""
if self.native_estimator.encode == "onehot-dense":
n_bins_per_feature = self.native_estimator.n_bins_
features_in, features_out = zip(
*(
(feature_name, f"{feature_name}_bin_{bin_index}")
for feature_name, n_bins in zip(
self.feature_names_in_, n_bins_per_feature
)
for bin_index in range(n_bins)
)
)
return pd.Series(index=features_out, data=features_in)
elif self.native_estimator.encode == "ordinal":
return pd.Series(
index=self.feature_names_in_.astype(str) + "_bin",
data=self.feature_names_in_,
)
else:
raise ValueError(
f"unexpected value for property encode={self.native_estimator.encode}"
)
#
# validate __all__
#
__tracker.validate()
| 33.783582 | 88 | 0.678264 | 15,985 | 0.882759 | 0 | 0 | 931 | 0.051414 | 0 | 0 | 6,013 | 0.332063 |
d7815d2c4c1816fa3330d39ab973353055d555f9 | 4,073 | py | Python | tests/test_graph.py | nokia/PyBGL | e9868361e5a3870b5247872a8c8c91a1c065fe84 | [
"BSD-3-Clause"
] | 11 | 2019-05-20T16:47:03.000Z | 2021-12-17T10:24:22.000Z | tests/test_graph.py | nokia/PyBGL | e9868361e5a3870b5247872a8c8c91a1c065fe84 | [
"BSD-3-Clause"
] | null | null | null | tests/test_graph.py | nokia/PyBGL | e9868361e5a3870b5247872a8c8c91a1c065fe84 | [
"BSD-3-Clause"
] | 3 | 2019-05-24T02:24:30.000Z | 2020-03-17T09:55:40.000Z | #!/usr/bin/env pytest-3
# -*- coding: utf-8 -*-
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2020, Nokia"
__license__ = "BSD-3"
from pybgl.graph import *
from pybgl.graphviz import graph_to_html
def test_graph_vertex():
for G in [DirectedGraph, UndirectedGraph]:
g = G(2)
assert set(vertices(g)) == {0, 1}
assert num_vertices(g) == 2
assert set(edges(g)) == set()
assert num_edges(g) == 0
q = add_vertex(g)
assert num_vertices(g) == 3
assert num_edges(g) == 0
assert set(vertices(g)) == {0, 1, 2}
def test_graph_edge():
for G in [DirectedGraph, UndirectedGraph]:
g = G(3)
(u, v, w) = (q for q in vertices(g))
assert set(edges(g)) == set()
assert num_edges(g) == 0
assert out_degree(u, g) == 0
assert out_degree(v, g) == 0
assert out_degree(w, g) == 0
(e1, added) = add_edge(u, v, g)
assert added
assert source(e1, g) == u
assert target(e1, g) == v
assert set(edges(g)) == {e1}
assert num_edges(g) == 1
assert set(out_edges(u, g)) == {e1}
assert set(out_edges(v, g)) == set() if is_directed(g) else {e1}
assert set(out_edges(w, g)) == set()
assert out_degree(u, g) == 1
assert out_degree(v, g) == 0 if is_directed(g) else 1
assert out_degree(w, g) == 0
(e2, added) = add_edge(u, v, g)
assert added
assert source(e2, g) == u
assert target(e2, g) == v
assert set(edges(g)) == {e1, e2}
assert num_edges(g) == 2
assert set(out_edges(u, g)) == {e1, e2}
assert set(out_edges(v, g)) == set() if is_directed(g) else {e1, e2}
assert set(out_edges(w, g)) == set()
assert out_degree(u, g) == 2
assert out_degree(v, g) == 0 if is_directed(g) else 2
assert out_degree(w, g) == 0
(e3, added) = add_edge(u, w, g)
assert added
assert source(e3, g) == u
assert target(e3, g) == w
assert set(edges(g)) == {e1, e2, e3}
assert num_edges(g) == 3
assert set(out_edges(u, g)) == {e1, e2, e3}
assert set(out_edges(v, g)) == set() if is_directed(g) else {e1, e2}
assert set(out_edges(w, g)) == set() if is_directed(g) else {e3}
assert out_degree(u, g) == 3
assert out_degree(v, g) == 0 if is_directed(g) else 2
assert out_degree(w, g) == 0 if is_directed(g) else 1
assert num_vertices(g) == 3
remove_edge(e2, g)
assert num_edges(g) == 2
assert set(edges(g)) == {e1, e3}
assert out_degree(u, g) == 2
def test_graph_remove_vertex():
for G in [DirectedGraph, UndirectedGraph]:
g = G(3)
(e1, _) = add_edge(0, 1, g)
(e2, _) = add_edge(0, 1, g)
(e3, _) = add_edge(0, 2, g)
(e4, _) = add_edge(0, 2, g)
(e5, _) = add_edge(1, 2, g)
(e6, _) = add_edge(2, 2, g)
assert num_vertices(g) == 3
assert set(vertices(g)) == {0, 1, 2}
assert num_edges(g) == 6
assert set(edges(g)) == {e1, e2, e3, e4, e5, e6}
remove_vertex(1, g)
assert num_vertices(g) == 2
assert set(vertices(g)) == {0, 2}
assert num_edges(g) == 3
assert set(edges(g)) == {e3, e4, e6}
remove_vertex(2, g)
assert num_vertices(g) == 1
assert set(vertices(g)) == {0}
assert num_edges(g) == 0
assert set(edges(g)) == set()
def test_graph_is_directed():
for G in [DirectedGraph, UndirectedGraph]:
g = G()
assert is_directed(g) == (G is DirectedGraph)
def test_graph_graphviz():
for G in [DirectedGraph, UndirectedGraph]:
g = G(3)
(e1, _) = add_edge(0, 1, g)
(e2, _) = add_edge(0, 1, g)
(e3, _) = add_edge(0, 2, g)
(e4, _) = add_edge(0, 2, g)
(e5, _) = add_edge(1, 2, g)
svg = graph_to_html(g)
| 33.661157 | 76 | 0.532286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.038547 |
d782321c51f6868ecf0aeda657d36d93a32a2794 | 18,087 | py | Python | tests/engine/test_error_handling.py | vanguard/sql_translate | 28ae149e54a300c3337b538691be80d878a7dbf2 | [
"Apache-2.0"
] | 3 | 2021-03-19T21:39:29.000Z | 2021-03-26T14:00:24.000Z | tests/engine/test_error_handling.py | vanguard/sql_translate | 28ae149e54a300c3337b538691be80d878a7dbf2 | [
"Apache-2.0"
] | 1 | 2021-07-07T11:45:04.000Z | 2021-07-07T11:45:04.000Z | tests/engine/test_error_handling.py | vanguard/sql_translate | 28ae149e54a300c3337b538691be80d878a7dbf2 | [
"Apache-2.0"
] | null | null | null | import unittest
import pytest
import sqlparse
from sql_translate.engine import error_handling
from typing import Dict, List
import re
E = error_handling._ErrorHandler() # Just for coverage
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select cast(a as integer)",
"line 1:8: Cannot cast timestamp to integer (1)",
"select to_unixtime(a)"),
("select cast(a as integer) as a",
"line 1:8: Cannot cast timestamp to integer (1)",
"select to_unixtime(a) AS a")
])
def test_cast_timestamp_to_epoch(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._cast_timestamp_to_epoch]
assert ErrorHandlerHiveToPresto._cast_timestamp_to_epoch(statement, re.search(pattern[0], error_message)) == expected
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select 1 in (select '1')",
"line 1:10: value and result of subquery must be of the same type for IN expression: integer vs varchar (1)",
"select cast(1 AS varchar) in (select '1')")
])
def test_cast_in_subquery(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._cast_in_subquery]
assert ErrorHandlerHiveToPresto._cast_in_subquery(statement, re.search(pattern[0], error_message)) == expected
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select coalesce('1', 1)",
"line 1:22: All COALESCE operands must be the same type: varchar (1)",
"select coalesce('1', cast(1 AS varchar))")
])
def test_coalesce_statements(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._coalesce_statements]
assert ErrorHandlerHiveToPresto._coalesce_statements(statement, re.search(pattern[0], error_message)) == expected
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select case when true then 'a' else 1 end",
"line 1:37: All CASE results must be the same type: varchar (1)",
"select case when true then 'a' else cast(1 AS varchar) end")
])
def test_case_statements(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._case_statements]
assert ErrorHandlerHiveToPresto._case_statements(statement, re.search(pattern[0], error_message)) == expected
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select a \nfrom cte\nwhere a in ( \n 1, 2, 3)",
"line 4:2: IN value and list items must be the same type: bigint (1)",
"select a \nfrom cte\nwhere cast(a AS bigint) in ( \n 1, 2, 3)"),
("select a \nfrom cte\nwhere a in (1.1, 2.3, 3.1)",
"line 3:13: IN value and list items must be the same type: float (1)",
"select a \nfrom cte\nwhere cast(a AS double) in (1.1, 2.3, 3.1)"),
("select a \nfrom cte\nwhere a in ('1', '2', '3')",
"line 3:13: IN value and list items must be the same type: varchar (1)",
"select a \nfrom cte\nwhere cast(a AS varchar) in ('1', '2', '3')"),
("select a \nfrom cte\nwhere a in ('1')",
"line 3:13: IN value and list items must be the same type: varchar (1)",
"select a \nfrom cte\nwhere cast(a AS varchar) in ('1')")
])
def test_cast_in(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._cast_in]
assert ErrorHandlerHiveToPresto._cast_in(statement, re.search(pattern[0], error_message)) == expected
@pytest.mark.parametrize(['statement', 'error_message'], [
("select a \nfrom cte\nwhere a in ( \n 1, '2', 3)",
"line 4:2: IN value and list items must be the same type: bigint (1)")
])
def test_cast_in_ValueError(statement: str, error_message: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._cast_in]
with pytest.raises(ValueError):
ErrorHandlerHiveToPresto._cast_in(statement, re.search(pattern[0], error_message))
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select cast(a as integer) from cte",
"line 1:8: Cannot cast char(10) to integer (1)",
"select cast(trim(cast(a AS varchar)) AS integer) from cte"),
("select a from cte",
"line 1:8: Cannot cast bigint to integer (1)",
"select a from cte")
])
def test_cannot_cast_to_type(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._cannot_cast_to_type]
assert ErrorHandlerHiveToPresto._cannot_cast_to_type(statement, re.search(pattern[0], error_message)) == expected
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select a from cte where b between c and d",
"line 1:27: Cannot check if varchar is BETWEEN varchar and date (1)",
"select a from cte where b between c and cast(d AS varchar)"),
("select a from cte where b between c and d",
"line 1:27: Cannot check if double is BETWEEN double and date (1)",
"select a from cte where cast(b AS varchar) between cast(c AS varchar) and cast(d AS varchar)")
])
def test_between(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._between]
assert ErrorHandlerHiveToPresto._between(statement, re.search(pattern[0], error_message)) == expected
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select a from db.vcte", # Table name start with v --> try with "t" as this could be a view
"Table 'db.vcte' not found (1)",
"select a from db.tcte"),
("select a from db.cte",
"Table 'db.cte' not found (1)",
"select a from db.cte_presto") # Table name does not start with v
])
def test_table_not_found(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._table_not_found]
assert ErrorHandlerHiveToPresto._table_not_found(statement, re.search(pattern[0], error_message)) == expected
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select concat(1, '1') from b inner join c\n ON a.my_col=b.another_col\nwhere d=e",
"line 1:8: Unexpected parameters (bigint, varchar) for function concat (1)",
"select concat(cast(1 AS varchar), cast('1' AS varchar)) from b inner join c\n ON a.my_col=b.another_col\nwhere d=e"),
("select concat(max(1), '1') from b inner join c\n ON a.my_col=b.another_col\nwhere d=e",
"line 1:8: Unexpected parameters (bigint, varchar) for function concat (1)",
"select concat(cast(max(1) AS varchar), cast('1' AS varchar)) from b inner join c\n ON a.my_col=b.another_col\nwhere d=e")
])
def test_unexpected_parameters(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._unexpected_parameters]
assert ErrorHandlerHiveToPresto._unexpected_parameters(statement, re.search(pattern[0], error_message)) == expected
@pytest.mark.parametrize(['statement', 'error_message'], [
("select something(1, '1') from b inner join c\n ON a.my_col=b.another_col\nwhere d=e",
"line 1:8: Unexpected parameters (bigint, varchar) for function something (1)"),
("select concat(a - b, a or b) from b inner join c\n ON a.my_col=b.another_col\nwhere d=e",
"line 1:8: Unexpected parameters (bigint, varchar) for function concat (1)")
])
def test_unexpected_parameters_NotImplementedError(statement: str, error_message: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._unexpected_parameters]
with pytest.raises(NotImplementedError):
ErrorHandlerHiveToPresto._unexpected_parameters(statement, re.search(pattern[0], error_message))
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select 'a' =1",
"line 1:12: '=' cannot be applied to varchar, bigint (1)",
"select 'a' =cast(1 AS varchar)"),
("select 'a' >1",
"line 1:12: '>' cannot be applied to varchar, bigint (1)",
"select 'a' >cast(1 AS varchar)"),
("select 'a' <1",
"line 1:12: '<' cannot be applied to varchar, bigint (1)",
"select 'a' <cast(1 AS varchar)"),
("select 'a' >=1",
"line 1:12: '>=' cannot be applied to varchar, bigint (1)",
"select 'a' >=cast(1 AS varchar)"),
("select 'a' <=1",
"line 1:12: '<=' cannot be applied to varchar, bigint (1)",
"select 'a' <=cast(1 AS varchar)"),
("select 'a' !=1",
"line 1:12: '!=' cannot be applied to varchar, bigint (1)",
"select 'a' !=cast(1 AS varchar)"),
("select a from b inner join c\n ON a.my_col=b.another_col\nwhere d=e",
"line 2:18: '=' cannot be applied to bigint, varchar (1)",
"select a from b inner join c\n ON cast(a.my_col AS varchar)=b.another_col\nwhere d=e"),
("select a from b inner join c\n ON a.my_col=b.another_col\nwhere d=e",
"line 2:18: '=' cannot be applied to date, timestamp (1)",
"select a from b inner join c\n ON cast(a.my_col AS varchar)=cast(b.another_col AS varchar)\nwhere d=e"),
("select a\nfrom b\nwhere cast(event_date AS varchar)>='2021-01-21' AND event_date<='2021-01-23'",
"line 3:63: '<=' cannot be applied to date, varchar(10) (1)",
"select a\nfrom b\nwhere cast(event_date AS varchar)>='2021-01-21' AND cast(event_date AS varchar)<='2021-01-23'")
])
def test_cast_both_sides(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._cast_both_sides]
assert ErrorHandlerHiveToPresto._cast_both_sides(statement, re.search(pattern[0], error_message)) == expected
@pytest.mark.parametrize(['statement', 'table_properties', 'expected'], [
("with cte AS (select b from cte2) select b from cte", {"columns": {"b": "bigint"}}, "with cte AS (select b from cte2) SELECT\nb\nfrom cte"), # No wildcard
("with cte AS (select b from cte2) select count(*) as b from cte", {"columns": {"b": "bigint"}}, "with cte AS (select b from cte2) SELECT\ncount(*) AS b\nfrom cte"), # No wildcard
("with cte AS (select b from cte2) select * from cte",
{"columns": {"c": "bigint", "d": "bigint", "e": "bigint", "a": "bigint"}},
"with cte AS (select b from cte2) SELECT\nc,\nd,\ne,\na\nfrom cte"), # select *
("with cte AS (select b from cte2) select a.*, c as d from cte", # Wildcard then regular column
{"columns": {"b": "bigint", "d": "varchar"}},
"with cte AS (select b from cte2) SELECT\nb,\nc AS d\nfrom cte"),
("with cte AS (select b from cte2) select c as d, * from cte", # regular column then wildcard
{"columns": {"b": "bigint", "d": "varchar"}},
"with cte AS (select b from cte2) SELECT\nc AS d,\nb\nfrom cte"),
("with cte AS (select b, c from cte2) select foo(a) as d, *, cte.a, `hey yo` from cte", # Wildcard in the middle bringing 2+ columns in
{"columns": {"a": "bigint", "c": "varchar", "some thing": "varchar", "d": "varchar", "hey yo": "varchar"}},
"with cte AS (select b, c from cte2) SELECT\nfoo(a) AS d,\nc,\n`some thing`,\ncte.a,\n`hey yo`\nfrom cte") # Final column order is not sorted by * replacement is
])
def test_expand_wildcards(statement: str, table_properties: Dict[str, str], expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
assert ErrorHandlerHiveToPresto._expand_wildcards(statement, table_properties) == expected
@pytest.mark.parametrize(['statement', 'table_properties'], [
("with cte AS (select b from cte2) select *, cte.* from cte", {"columns": {"b": "bigint"}}) # Double wildcard
])
def test_expand_wildcards_ValueError(statement: str, table_properties: Dict[str, str]) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
with pytest.raises(ValueError):
ErrorHandlerHiveToPresto._expand_wildcards(statement, table_properties)
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("""with abc AS (
SELECT b
FROM c
GROUP BY b)
SELECT
d, e, f, g, CURRENT_DATE AS hhh
FROM abc a
LEFT JOIN def b
ON a.b = b.b""",
"Mismatch at column 2: 'e' is of type bigint but expression is of type double (1)",
"""with abc AS (
SELECT b
FROM c
GROUP BY b)
SELECT
d,
cast(e AS bigint) AS e,
f,
g,
CURRENT_DATE AS hhh
FROM abc a
LEFT JOIN def b
ON a.b = b.b"""
),
("select name.my_col a from b inner join c\n ON name.my_col=b.another_col\nwhere d=e",
"Mismatch at column 1: 'my_col' is of type char(1) but expression is of type smallint (1)",
"SELECT\ncast(cast(name.my_col AS varchar) AS char(1)) AS a\nfrom b inner join c\n ON name.my_col=b.another_col\nwhere d=e"),
("select name.my_col a from b inner join c\n ON name.my_col=b.another_col\nwhere d=e",
"Mismatch at column 1: 'my_col' is of type varchar but expression is of type char(1) (1)",
"SELECT\ncast(name.my_col AS varchar) AS a\nfrom b inner join c\n ON name.my_col=b.another_col\nwhere d=e"),
("select name.my_col from b inner join c\n ON name.my_col=b.another_col\nwhere d=e",
"Mismatch at column 1: 'my_col' is of type varchar but expression is of type char(1) (1)",
"SELECT\ncast(name.my_col AS varchar) AS my_col\nfrom b inner join c\n ON name.my_col=b.another_col\nwhere d=e"),
("with abc as (select name.my_col from b inner join c\n ON name.my_col=b.another_col\nwhere d=e) select j.my_col, k.my_col from abc",
"Mismatch at column 2: 'my_col' is of type varchar but expression is of type char(1) (1)",
"with abc as (select name.my_col from b inner join c\n ON name.my_col=b.another_col\nwhere d=e) SELECT\nj.my_col,\ncast(k.my_col AS varchar) AS my_col\nfrom abc"),
("with abc as (select name.my_col from b inner join c\n ON name.my_col=b.another_col\nwhere d=e) select my_col, case when a=1 then 'Y' else 'N' end from abc",
"Mismatch at column 2: 'unknown_col' is of type varchar but expression is of type char(1) (1)",
"with abc as (select name.my_col from b inner join c\n ON name.my_col=b.another_col\nwhere d=e) SELECT\nmy_col,\ncast(case when a=1 then 'Y' else 'N' end AS varchar) AS unknown_col\nfrom abc"),
("with abc as (select name.my_col from b inner join c\n ON name.my_col=b.another_col\nwhere d=e) select my_col, concat('a', max('b')) as c, k.my_col from abc",
"Mismatch at column 2: 'c' is of type varchar but expression is of type bigint (1)",
"with abc as (select name.my_col from b inner join c\n ON name.my_col=b.another_col\nwhere d=e) SELECT\nmy_col,\ncast(concat('a', max('b')) AS varchar) AS c,\nk.my_col\nfrom abc"),
("select name.my_col, cte.my_col from name inner join cte\n ON name.my_col=cte.my_col\nwhere d=e",
"Mismatch at column 1: 'my_col' is of type char(1) but expression is of type smallint (1)",
"SELECT\ncast(cast(name.my_col AS varchar) AS char(1)) AS my_col,\ncte.my_col\nfrom name inner join cte\n ON name.my_col=cte.my_col\nwhere d=e")
])
def test_column_type_mismatch(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
pattern = [k for k, v in ErrorHandlerHiveToPresto.known_issues.items() if v == ErrorHandlerHiveToPresto._column_type_mismatch]
assert ErrorHandlerHiveToPresto._column_type_mismatch(statement, re.search(pattern[0], error_message), temp_tgt_table_properties={"columns": {}}) == expected
@pytest.mark.parametrize(['statement', 'error_message', 'expected'], [
("select cast(a AS bigint) from cte", "line 1:8: Cannot cast timestamp to bigint (1)", "select to_unixtime(a) from cte")
])
def test_handle_errors(statement: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
assert ErrorHandlerHiveToPresto.handle_errors(statement, statement, error_message) == (expected, expected)
@pytest.mark.parametrize(['statement', 'original_sql', 'error_message', 'expected'], [
("select a, cast(a AS bigint) from cte", "select {a}, cast(a AS bigint) from cte", "line 1:11: Cannot cast timestamp to bigint (1)", "select a, to_unixtime(a) from cte")
])
def test_handle_errors_Exception(statement: str, original_sql: str, error_message: str, expected: str) -> None:
ErrorHandlerHiveToPresto = error_handling.ErrorHandlerHiveToPresto()
assert ErrorHandlerHiveToPresto.handle_errors(statement, original_sql, error_message) == (expected, "")
| 62.368966 | 203 | 0.697241 | 0 | 0 | 0 | 0 | 17,845 | 0.98662 | 0 | 0 | 9,796 | 0.541604 |
d783237b7b4b1a622bb0239356949acd7f8af40d | 472 | py | Python | day1/debugme.py | autotaker/training-domo | 91ac3f90e1a1e06f51f5c794a3a15ade0ade246c | [
"MIT"
] | null | null | null | day1/debugme.py | autotaker/training-domo | 91ac3f90e1a1e06f51f5c794a3a15ade0ade246c | [
"MIT"
] | null | null | null | day1/debugme.py | autotaker/training-domo | 91ac3f90e1a1e06f51f5c794a3a15ade0ade246c | [
"MIT"
] | null | null | null | def convert_fizzbuzz(n: int) -> str:
s = str(n)
if n % 3 == 0 and n % 5 == 0:
s = "FizzBuzz"
if n % 3 == 0:
s = "Fizz"
if n % 5 == 0:
s = "Buzz"
return s
def fizzbuzz() -> None:
"""
1から100までの整数nに対して
* nが3の倍数かつ5の倍数の時はFizzBuzz
* nが3の倍数の時はFizz
* nが5の倍数の時はBuzz
* それ以外の時はn
を標準出力に一行ずつプリントする
"""
for i in range(100):
print(convert_fizzbuzz(i))
if __name__ == "__main__":
fizzbuzz()
| 16.857143 | 36 | 0.516949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.491554 |
d7854c788a36c44c1e1a449591bc078424ed689c | 4,854 | py | Python | src/Selenium2Library/locators/windowmanager.py | tanggai/robotframework_selenium2library | c702cfad4584f54a08a73f8366e769d2304bf1ee | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2015-09-11T03:24:48.000Z | 2018-08-08T11:59:54.000Z | src/Selenium2Library/locators/windowmanager.py | tanggai/robotframework_selenium2library | c702cfad4584f54a08a73f8366e769d2304bf1ee | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/Selenium2Library/locators/windowmanager.py | tanggai/robotframework_selenium2library | c702cfad4584f54a08a73f8366e769d2304bf1ee | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2016-09-29T03:53:13.000Z | 2021-11-09T02:35:37.000Z | from types import *
from robot import utils
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.common.exceptions import NoSuchWindowException
class WindowManager(object):
def __init__(self):
self._strategies = {
'title': self._select_by_title,
'name': self._select_by_name,
'url': self._select_by_url,
None: self._select_by_default
}
def get_window_ids(self, browser):
return [ window_info[1] for window_info in self._get_window_infos(browser) ]
def get_window_names(self, browser):
return [ window_info[2] for window_info in self._get_window_infos(browser) ]
def get_window_titles(self, browser):
return [ window_info[3] for window_info in self._get_window_infos(browser) ]
def select(self, browser, locator):
assert browser is not None
(prefix, criteria) = self._parse_locator(locator)
strategy = self._strategies.get(prefix)
if strategy is None:
raise ValueError("Window locator with prefix '" + prefix + "' is not supported")
return strategy(browser, criteria)
# Strategy routines, private
def _select_by_title(self, browser, criteria):
self._select_matching(
browser,
lambda window_info: window_info[3].strip().lower() == criteria.lower(),
"Unable to locate window with title '" + criteria + "'")
def _select_by_name(self, browser, criteria):
self._select_matching(
browser,
lambda window_info: window_info[2].strip().lower() == criteria.lower(),
"Unable to locate window with name '" + criteria + "'")
def _select_by_url(self, browser, criteria):
self._select_matching(
browser,
lambda window_info: window_info[4].strip().lower() == criteria.lower(),
"Unable to locate window with URL '" + criteria + "'")
def _select_by_default(self, browser, criteria):
if criteria.lower() == "current":
return
handles = browser.get_window_handles()
if criteria is None or len(criteria) == 0 or criteria.lower() == "null":
browser.switch_to_window(handles[0])
return
if criteria.lower() == "last" or criteria.lower() == "latest":
browser.switch_to_window(handles[-1])
return
if criteria.lower() == "new" or criteria.lower() == "newest" or criteria.lower() == "popup":
try:
start_handle = browser.get_current_window_handle()
except NoSuchWindowException:
raise AssertionError("No from window to switch to new window")
if len(handles) < 2 or handles[-1] == start_handle:
raise AssertionError("No new window to switch to")
browser.switch_to_window(handles[-1])
return
for handle in handles:
browser.switch_to_window(handle)
if criteria == handle:
return
for item in browser.get_current_window_info()[2:4]:
if item.strip().lower() == criteria.lower():
return
raise ValueError("Unable to locate window with handle or name or title or URL '" + criteria + "'")
# Private
def _parse_locator(self, locator):
prefix = None
criteria = locator
if locator is not None and len(locator) > 0:
locator_parts = locator.partition('=')
if len(locator_parts[1]) > 0:
prefix = locator_parts[0].strip().lower()
criteria = locator_parts[2].strip()
if prefix is None or prefix == 'name':
if criteria is None or criteria.lower() == 'main':
criteria = ''
return (prefix, criteria)
def _get_window_infos(self, browser):
window_infos = []
starting_handle = browser.get_current_window_handle()
try:
for handle in browser.get_window_handles():
browser.switch_to_window(handle)
window_infos.append(browser.get_current_window_info())
finally:
browser.switch_to_window(starting_handle)
return window_infos
def _select_matching(self, browser, matcher, error):
try:
starting_handle = browser.get_current_window_handle()
except NoSuchWindowException: pass
for handle in browser.get_window_handles():
browser.switch_to_window(handle)
if matcher(browser.get_current_window_info()):
return
if starting_handle:
browser.switch_to_window(starting_handle)
raise ValueError(error)
| 40.789916 | 107 | 0.598475 | 4,683 | 0.964771 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.087969 |
d7857acd245bcadd0807a8048540079a29f7bb0b | 1,265 | py | Python | localshop/urls.py | rcoup/localshop | b7d0803afd9335862accfc79dee047a6b0e67ad6 | [
"BSD-3-Clause"
] | null | null | null | localshop/urls.py | rcoup/localshop | b7d0803afd9335862accfc79dee047a6b0e67ad6 | [
"BSD-3-Clause"
] | null | null | null | localshop/urls.py | rcoup/localshop | b7d0803afd9335862accfc79dee047a6b0e67ad6 | [
"BSD-3-Clause"
] | null | null | null | import re
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic.base import RedirectView
from localshop.apps.packages.xmlrpc import handle_request
admin.autodiscover()
static_prefix = re.escape(settings.STATIC_URL.lstrip('/'))
urlpatterns = patterns('',
url(r'^$', 'localshop.views.index', name='index'),
# Default path for xmlrpc calls
url(r'^RPC2$', handle_request),
url(r'^packages/',
include('localshop.apps.packages.urls', namespace='packages')),
url(r'^simple/', include('localshop.apps.packages.urls_simple',
namespace='packages-simple')),
# We add a separate route for simple without the trailing slash so that
# POST requests to /simple/ and /simple both work
url(r'^simple$', 'localshop.apps.packages.views.simple_index'),
url(r'^permissions/',
include('localshop.apps.permissions.urls', namespace='permissions')),
url(r'^accounts/signup/', RedirectView.as_view(url="/")),
url(r'^accounts/', include('userena.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^%s(?P<path>.*)$' % static_prefix,
'django.contrib.staticfiles.views.serve', {'insecure': True}),
)
| 30.119048 | 77 | 0.690909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 564 | 0.44585 |
d7870e58d2f2ce7682797454d2079370c26d1673 | 1,150 | py | Python | test/util_test.py | quiet-oceans/libais | 61ed34683c01662925f4b9ed69b10885dcb3bc79 | [
"Apache-2.0"
] | 161 | 2015-02-10T16:40:25.000Z | 2022-02-11T10:17:28.000Z | test/util_test.py | quiet-oceans/libais | 61ed34683c01662925f4b9ed69b10885dcb3bc79 | [
"Apache-2.0"
] | 179 | 2015-01-14T23:19:25.000Z | 2021-10-15T23:32:14.000Z | test/util_test.py | quiet-oceans/libais | 61ed34683c01662925f4b9ed69b10885dcb3bc79 | [
"Apache-2.0"
] | 88 | 2015-01-19T05:10:39.000Z | 2022-03-09T06:59:27.000Z | #!/usr/bin/env python
"""Tests for ais.util."""
import unittest
from ais import util
import six
class UtilTest(unittest.TestCase):
def testMaybeToNumber(self):
self.assertEqual(util.MaybeToNumber(None), None)
self.assertEqual(util.MaybeToNumber([]), [])
self.assertEqual(util.MaybeToNumber({}), {})
self.assertEqual(util.MaybeToNumber('a'), 'a')
self.assertEqual(util.MaybeToNumber(1), 1)
self.assertEqual(util.MaybeToNumber(-3.12), -3.12)
self.assertEqual(util.MaybeToNumber('-1'), -1)
self.assertIsInstance(util.MaybeToNumber('-1'), int)
self.assertEqual(util.MaybeToNumber('42.0'), 42.0)
self.assertIsInstance(util.MaybeToNumber('42.0'), float)
value = 9999999999999999999999999
value_str = '9999999999999999999999999'
self.assertEqual(util.MaybeToNumber(value_str), value)
self.assertIsInstance(util.MaybeToNumber(value_str), six.integer_types)
self.assertEqual(
util.MaybeToNumber('1e99999999999999999999999'), float('inf'))
self.assertEqual(
util.MaybeToNumber('-1e99999999999999999999999'), float('-inf'))
if __name__ == '__main__':
unittest.main()
| 30.263158 | 75 | 0.715652 | 1,003 | 0.872174 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.152174 |
d787a69ca7a5c43e3169025b3f9e4ad5662b526b | 8,695 | py | Python | 16_1.py | yunjung-lee/class_python_numpy | 589817c8bbca85d70596e4097c0ece093b5353c3 | [
"MIT"
] | null | null | null | 16_1.py | yunjung-lee/class_python_numpy | 589817c8bbca85d70596e4097c0ece093b5353c3 | [
"MIT"
] | null | null | null | 16_1.py | yunjung-lee/class_python_numpy | 589817c8bbca85d70596e4097c0ece093b5353c3 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
num = np.array(['3.14','-2.7','30'], dtype=np.string_) #코드 이해 쉽게 : dtype=np.string_
# num=num.astype(int)
# print(num)
# ValueError: invalid literal for int() with base 10: '3.14'
num=num.astype(float).astype(int)
print(num)
# [ 3 -2 30] : 바로 int형 변형이 안되면 float으로 바꿨다가 바꿀 수 있다.
num=num.astype(float)
print(num)
# [ 3.14 -2.7 30. ]
arr=np.arange(32).reshape((8,4))
print(arr)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]
# [12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]
# [24 25 26 27]
# [28 29 30 31]]
print(arr[[1,5,7,2],[0,3,1,2]]) #지정된 데이터 추출[[행번호],[열번호]]==>(행,열)순서쌍으로 요소 확인
# [ 4 23 29 10]
print(arr[[1,5,7,2]][:,[0,3,1,2]]) #[[행]][:,[열]] : 연속의 의미==>행 1,5,7,2번 index에 해당하는 행
# [[ 4 7 5 6]
# [20 23 21 22]
# [28 31 29 30]
# [ 8 11 9 10]]
print(arr[[1,5,7,2]][:,[3,1]]) #[[행]][:,[열]] : 연속의 의미==>index행에 대한 열 1,3번 index에 해당하는 열
# [[ 7 5]
# [23 21]
# [31 29]
# [11 9]]
import random
walk = []
position =0
steps=1000
for i in range(steps):
step = 1 if random.randint(0,1) else -1 #randint,randn,rannormal
position+=step
walk.append(position)
print("position : ",position)
# position : 18
print("walk : ",walk)
# walk : [-1, 0, 1, 0, -1, -2, -1, -....]
print(min(walk))
# -7
print(max(walk))
# 28
# print(abs(walk)) #abs : 절대값 변환
obj = Series([1,2,-3,4])
print(obj)
# 0 1
# 1 2
# 2 -3
# 3 4
# dtype: int64
print(obj.values) #values : 값만 추출함(속성, 함수)
# [ 1 2 -3 4]
print(obj.index) #index : 인덱스 추출
# RangeIndex(start=0, stop=4, step=1)
#인덱스 지정
obj = Series([1,2,-3,4],index=['x','y','z','k']) #index의 이름을 직접 부여
print(obj)
# 지정 인덱스 출력
# x 1
# y 2
# z -3
# k 4
# dtype: int64
print(obj['y'])
# 2
obj['x']=10
print(obj)
# x 10
# y 2
# z -3
# k 4
# dtype: int64
#여러개를 참조하는 방법
# print(obj['x','y'])
# # KeyError: ('x', 'y')
print(obj[['x','y','z']]) #index 1개 참조시 [],2개이상 참조시 [[]]
# x 10
# y 2
# z -3
# dtype: int64
print('='*50)
print(obj>0) #조건식 사용 가능
# x True
# y True
# z False
# k True
# dtype: bool
print(obj[obj>0]) #조건식으로 추출 가능
# x 10
# y 2
# k 4
# dtype: int64
print(obj*2) # 사칙연산 가능
# x 20
# y 4
# z -6
# k 8
# dtype: int64
print(np.exp(obj)) # 지수승
# x 22026.465795
# y 7.389056
# z 0.049787
# k 54.598150
# dtype: float64
# null(초기화 되지 않은 상태), na(결측치)
print(obj)
print('a' in obj) #in : 특정 문자가 있는지 확인
print('x' in obj) # 열: 특징, 행 : 관측치
print('='*50)
#key & value->Series->index & value 변환(key=>index,value=>value)
sdata = {'Ohio': 35000, 'Texas': 71000, "Oregon":16000, "Utah":5000}
obj3=Series(sdata) #dictionaly도 Series로 변환 가능
print(obj3)
# Ohio 35000
# Texas 71000
# Oregon 16000
# Utah 5000
# dtype: int64
print(type(obj3))
# <class 'pandas.core.series.Series'>
states = ['California','Ohio','Oregon','Texas']
obj99 = Series(states) #list를 Series로 변환
# print(obj99)
# # 0 California
# # 1 Ohio
# # 2 Oregon
# # 3 Texas
# # dtype: object
obj4 = Series(sdata, index=states) #sdata를 사용하여 index는 states기준으로 Series자료구조 변환
print(obj4)
# California NaN
# Ohio 35000.0
# Oregon 16000.0
# Texas 71000.0
# dtype: float64
print(pd.isnull(obj4))
# California True
# Ohio False
# Oregon False
# Texas False
# dtype: bool
#일반적인 개념 nan : 숫자가 아닌 문자같은 것.
#na : 값이 누락, null : 값이 초기화 되지 않은 상태
#pandas개념 : 혼용하여 사용
#isnull함수 : na(null,nan) 인지 아닌지 확인
print(obj4+obj3) # 교집합만의 value만 출력
obj4.name = 'population'
obj.index.name = 'state'
print(obj4)
# California NaN
# Ohio 35000.0
# Oregon 16000.0
# Texas 71000.0
# Name: population, dtype: float64
obj4.index=['w','x','y','z'] #index를 직접 변환
print(obj4)
# w NaN
# x 35000.0
# y 16000.0
# z 71000.0
# Name: population, dtype: float64
data = {
'state' : ['Ohio','Ohio','Ohio','Nevada','Nevada'],
'year': [2000,2001,2002,2001,2002],
'pop': [1.5,1.7,3.6,2.4,2.9]}
frame = DataFrame(data) #series 들의 묶음과 같음
print(frame)
# state year pop
# 0 Ohio 2000 1.5
# 1 Ohio 2001 1.7
# 2 Ohio 2002 3.6
# 3 Nevada 2001 2.4
# 4 Nevada 2002 2.9
print(DataFrame(data, columns=['year','state','pop'])) # column의 순서 변경(임시적)
# year state pop
# 0 2000 Ohio 1.5
# 1 2001 Ohio 1.7
# 2 2002 Ohio 3.6
# 3 2001 Nevada 2.4
# 4 2002 Nevada 2.9
frame = DataFrame(data, columns=['year','state','pop']) #fram으로 완전히 순서 변경
frame2= DataFrame(data, columns=['year','state','pop','debt'], index=['one','two','three','four','five'])
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 NaN
# two 2001 Ohio 1.7 NaN
# three 2002 Ohio 3.6 NaN
# four 2001 Nevada 2.4 NaN
# five 2002 Nevada 2.9 NaN
print(frame2['state']) # 원하는 열만 출력
# one Ohio
# two Ohio
# three Ohio
# four Nevada
# five Nevada
# Name: state, dtype: object
print(frame2['year'])
# one 2000
# two 2001
# three 2002
# four 2001
# five 2002
# Name: year, dtype: int64
print(frame2.ix['three']) #ix : 특정 index(행)만 참조
#두개 이상의 열 또는 행을 추출 => [[]]사용
# print(frame2[['year','state']])
#
# print(frame2.ix[['three','five']])
print(frame2)
frame2['debt']=16.5
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 16.5
# two 2001 Ohio 1.7 16.5
# three 2002 Ohio 3.6 16.5
# four 2001 Nevada 2.4 16.5
# five 2002 Nevada 2.9 16.5
# frame2['debt']=np.arange(3)
# print(frame2)
# # ValueError: Length of values does not match length of index
frame2['debt']=np.arange(5)
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 0
# two 2001 Ohio 1.7 1
# three 2002 Ohio 3.6 2
# four 2001 Nevada 2.4 3
# five 2002 Nevada 2.9 4
print('='*50)
val = Series([-1.2,-1.5,-1.7],index=['two','three','five'])
print(val)
# two -1.2
# three -1.5
# five -1.7
# dtype: float64
#길이가 다른 데이터 열을추가시 -> 시리즈를 생성하여 추가
frame2['debt']=val # index를 지정하여 value 변경(index의 숫자가 동일하지 않아도 index가 지정되어있어서 대입가능)
print(frame2)
# 새로운 열 추가 : 동부에 속하는 Ohio는 True, 나머지는 False로 한다.(조건 제시형)
frame2['eastern']=frame2.state=='Ohio'
print(frame2)
# year state pop debt eastern
# one 2000 Ohio 1.5 NaN True
# two 2001 Ohio 1.7 -1.2 True
# three 2002 Ohio 3.6 -1.5 True
# four 2001 Nevada 2.4 NaN False
# five 2002 Nevada 2.9 -1.7 False
#열 제거
del frame2['eastern']
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 NaN
# two 2001 Ohio 1.7 -1.2
# three 2002 Ohio 3.6 -1.5
# four 2001 Nevada 2.4 NaN
# five 2002 Nevada 2.9 -1.7
print(frame2.columns)
# Index(['year', 'state', 'pop', 'debt'], dtype='object')
print(frame2.index)
# Index(['one', 'two', 'three', 'four', 'five'], dtype='object')
pop = {'Nevada' : {2001 : 2.4,2002:2.9},'Ohio' : {2000 : 1.5,2001:1.7,2002:3.6}}
frame3 = DataFrame(pop)
print(frame3)
# Nevada Ohio
# 2000 NaN 1.5
# 2001 2.4 1.7
# 2002 2.9 3.6
# 열과 행 바꿈(transfer)
print(frame3.T)
# 2000 2001 2002
# Nevada NaN 2.4 2.9
# Ohio 1.5 1.7 3.6
# frame4 = DataFrame(pop,index=[2001,2002,2003]) #index 지정을 하려면 DataFrame을 사용해야한다.(딕셔너리엔 index가 없음)
# print(frame4)
# # AttributeError: 'list' object has no attribute 'astype'
frame4 = DataFrame(frame3,index=[2001,2002,2003])
print(frame4)
# Nevada Ohio
# 2001 2.4 1.7
# 2002 2.9 3.6
# 2003 NaN NaN
print(frame3)
# Nevada Ohio
# 2000 NaN 1.5
# 2001 2.4 1.7
# 2002 2.9 3.6
pdata = {'Ohio':frame3['Ohio'][:-1],'Nevada':frame3['Nevada'][:2]} #[:-1] : 마지막 행 제외,[:2] : 0,1 행만 출력
frame5=DataFrame(pdata)
print(frame5)
# Ohio Nevada
# 2000 1.5 NaN
# 2001 1.7 2.4
pdata = {'Ohio':frame3['Ohio'][:-1],'Nevada':frame3['Nevada']}
#'Nevada'-모두 출력이기 때문에 [:-1]사용으로 자료가 없는 'Ohio'의 2002는 NaN이 된다.
frame5=DataFrame(pdata)
print(frame5)
# Ohio Nevada
# 2000 1.5 NaN
# 2001 1.7 2.4
# 2002 NaN 2.9
| 24.287709 | 114 | 0.531685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,727 | 0.697026 |
d78849ae509b808adaa2636049aede936282f95e | 1,063 | py | Python | Chapter8/listing8_1.py | hohsieh/osgeopy-code | 932157c748c8fedb67d862b266a983fdd29ead56 | [
"MIT"
] | 160 | 2015-01-11T06:45:11.000Z | 2022-03-07T15:09:57.000Z | Chapter8/listing8_1.py | sthagen/osgeopy-code | bc85f4ec7a630b53502ee491e400057b67cdab22 | [
"MIT"
] | 3 | 2018-09-29T11:34:13.000Z | 2020-07-20T16:45:23.000Z | Chapter8/listing8_1.py | sthagen/osgeopy-code | bc85f4ec7a630b53502ee491e400057b67cdab22 | [
"MIT"
] | 108 | 2015-05-28T11:29:01.000Z | 2022-02-12T12:01:46.000Z | # Script to reproject a shapefile.
from osgeo import ogr, osr
# Create an output SRS.
sr = osr.SpatialReference()
sr.ImportFromProj4('''+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23
+lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80
+datum=NAD83 +units=m +no_defs''')
# Don't forget to change your directory here.
ds = ogr.Open(r'D:\osgeopy-data\US', 1)
# Get the input layer.
in_lyr = ds.GetLayer('us_volcanos')
# Create the empty output layer.
out_lyr = ds.CreateLayer('us_volcanos_aea', sr,
ogr.wkbPoint)
out_lyr.CreateFields(in_lyr.schema)
# Loop through the features in the input layer.
out_feat = ogr.Feature(out_lyr.GetLayerDefn())
for in_feat in in_lyr:
# Clone the geometry, project it, and add it to the feature.
geom = in_feat.geometry().Clone()
geom.TransformTo(sr)
out_feat.SetGeometry(geom)
# Copy attributes.
for i in range(in_feat.GetFieldCount()):
out_feat.SetField(i, in_feat.GetField(i))
# Insert the feature
out_lyr.CreateFeature(out_feat)
| 28.72973 | 65 | 0.668862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 514 | 0.483537 |
d788ad2e0e4762dfe1aa6e2559af48c180f16bf4 | 10,962 | py | Python | examples/seq2seq/task_seq2seq_simbert_v2_stage2.py | Tongjilibo/bert4torch | 71d5ffb3698730b16e5a252b06644a136787711e | [
"MIT"
] | 49 | 2022-03-15T07:28:16.000Z | 2022-03-31T07:16:15.000Z | examples/seq2seq/task_seq2seq_simbert_v2_stage2.py | Tongjilibo/bert4torch | 71d5ffb3698730b16e5a252b06644a136787711e | [
"MIT"
] | null | null | null | examples/seq2seq/task_seq2seq_simbert_v2_stage2.py | Tongjilibo/bert4torch | 71d5ffb3698730b16e5a252b06644a136787711e | [
"MIT"
] | null | null | null | #! -*- coding: utf-8 -*-
# SimBERT_v2预训练代码stage2,把simbert的相似度蒸馏到roformer-sim上
# 官方项目:https://github.com/ZhuiyiTechnology/roformer-sim
import json
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences
from bert4torch.tokenizers import Tokenizer
import jieba
jieba.initialize()
# 基本信息
maxlen = 64
batch_size = 12
# bert配置,需要加载stage1训练后的权重,这里直接加载官方最终的权重以示例
config_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--roformer_chinese_sim_char_base/config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--roformer_chinese_sim_char_base/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--roformer_chinese_sim_char_base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 这里语料和stage1保持一致
class MyDataset(ListDataset):
@staticmethod
def load_data(filename):
"""读取语料,每行一个json
示例:{"text": "懂英语的来!", "synonyms": ["懂英语的来!!!", "懂英语的来", "一句英语翻译 懂英语的来"]}
"""
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
D.append(json.loads(l))
return D
def truncate(text):
"""截断句子
"""
seps, strips = u'\n。!?!?;;,, ', u';;,, '
return text_segmentate(text, maxlen - 2, seps, strips)[0]
def masked_encode(text):
"""wwm随机mask
"""
words = jieba.lcut(text)
rands = np.random.random(len(words))
source, target = [tokenizer._token_start_id], [0]
for r, w in zip(rands, words):
ids = tokenizer.encode(w)[0][1:-1]
if r < 0.15 * 0.8:
source.extend([tokenizer._token_mask_id] * len(ids))
target.extend(ids)
elif r < 0.15 * 0.9:
source.extend(ids)
target.extend(ids)
elif r < 0.15:
source.extend(
np.random.choice(tokenizer._vocab_size - 1, size=len(ids)) + 1
)
target.extend(ids)
else:
source.extend(ids)
target.extend([0] * len(ids))
source = source[:maxlen - 1] + [tokenizer._token_end_id]
target = target[:maxlen - 1] + [0]
return source, target
# ========== 蒸馏用:开始 ==========
# simbert配置
sim_config_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--simbert_chinese_base/config.json'
sim_checkpoint_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--simbert_chinese_base/pytorch_model.bin'
sim_dict_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--simbert_chinese_base/vocab.txt'
# 建立分词器
sim_tokenizer = Tokenizer(sim_dict_path, do_lower_case=True) # 建立分词器
# 建立加载模型
simbert = build_transformer_model(sim_config_path, sim_checkpoint_path, with_pool='linear', application='unilm').to(device)
# ========== 蒸馏用:结束 ==========
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
batch_sim_token_ids, batch_sim_segment_ids = [], []
for d in batch:
text, synonyms = d['text'], d['synonyms']
synonyms = [text] + synonyms
np.random.shuffle(synonyms)
for _ in range(2):
text, synonym = synonyms[:2]
if np.random.random() < 0.5:
text_ids = masked_encode(text)[0]
else:
text_ids = tokenizer.encode(text)[0]
synonym_ids = tokenizer.encode(synonym)[0][1:]
truncate_sequences(maxlen * 2, -2, text_ids, synonym_ids)
token_ids = text_ids + synonym_ids
segment_ids = [0] * len(text_ids) + [1] * len(synonym_ids)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
# ==== 蒸馏用:开始 ====
token_ids, segment_ids = sim_tokenizer.encode(text, maxlen=maxlen)
batch_sim_token_ids.append(token_ids)
batch_sim_segment_ids.append(segment_ids)
# ==== 蒸馏用:结束 ====
text, synonym = synonym, text
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
# ==== 蒸馏用:开始 ====
batch_sim_token_ids = torch.tensor(sequence_padding(batch_sim_token_ids), dtype=torch.long, device=device)
batch_sim_segment_ids = torch.tensor(sequence_padding(batch_sim_segment_ids), dtype=torch.long, device=device)
sim_vecs = simbert.predict([batch_sim_token_ids, batch_sim_segment_ids])[1]
sim_vecs /= (sim_vecs**2).sum(dim=-1, keepdims=True)**0.5
sims = torch.matmul(sim_vecs, sim_vecs.T)
# ==== 蒸馏用:结束 ====
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids, sims]
train_dataloader = DataLoader(MyDataset('../datasets/data_similarity.json'), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
# 建立加载模型
class Model(BaseModel):
def __init__(self, pool_method='cls'):
super().__init__()
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, model='roformer',
with_pool='linear', with_mlm='linear', dropout_rate=0.2, application='unilm')
self.pool_method = pool_method
def get_pool_emb(self, hidden_state, pool_cls, attention_mask):
if self.pool_method == 'cls':
return pool_cls
elif self.pool_method == 'mean':
hidden_state = torch.sum(hidden_state * attention_mask[:, :, None], dim=1)
attention_mask = torch.sum(attention_mask, dim=1)[:, None]
return hidden_state / attention_mask
elif self.pool_method == 'max':
seq_state = hidden_state * attention_mask[:, :, None]
return torch.max(seq_state, dim=1)
else:
raise ValueError('pool_method illegal')
def forward(self, token_ids, segment_ids):
hidden_state, pool_cls, seq_logit = self.bert([token_ids, segment_ids])
sen_emb = self.get_pool_emb(hidden_state, pool_cls, attention_mask=token_ids.gt(0).long())
return seq_logit, sen_emb
model = Model(pool_method='cls').to(device)
class TotalLoss(nn.Module):
"""loss分两部分,一是seq2seq的交叉熵,二是相似度的交叉熵。
"""
def forward(self, outputs, target):
seq_logit, sen_emb = outputs
seq_label, seq_mask, sims = target
seq2seq_loss = self.compute_loss_of_seq2seq(seq_logit, seq_label, seq_mask)
similarity_loss = self.compute_loss_of_similarity(sen_emb, sims)
return {'loss': seq2seq_loss + similarity_loss, 'seq2seq_loss': seq2seq_loss, 'similarity_loss': similarity_loss}
def compute_loss_of_seq2seq(self, y_pred, y_true, y_mask):
'''
y_pred: [btz, seq_len, hdsz]
y_true: [btz, seq_len]
y_mask: [btz, seq_len]
'''
y_true = y_true[:, 1:] # 目标token_ids
y_mask = y_mask[:, 1:] # 指示了要预测的部分
y_pred = y_pred[:, :-1, :] # 预测序列,错开一位
y_pred = y_pred.reshape(-1, y_pred.shape[-1])
y_true = (y_true*y_mask).flatten()
return F.cross_entropy(y_pred, y_true, ignore_index=0)
def compute_loss_of_similarity(self, y_pred, y_true):
y_pred = F.normalize(y_pred, p=2, dim=-1) # 句向量归一化
similarities = torch.matmul(y_pred, y_pred.T) # 相似度矩阵
loss = 100 * torch.mean((similarities - y_true) ** 2)
return loss
model.compile(loss=TotalLoss(), optimizer=optim.Adam(model.parameters(), 1e-5), metrics=['seq2seq_loss', 'similarity_loss'])
class SynonymsGenerator(AutoRegressiveDecoder):
"""seq2seq解码器
"""
@AutoRegressiveDecoder.wraps('logits')
def predict(self, inputs, output_ids, states):
token_ids, segment_ids = inputs
token_ids = torch.cat([token_ids, output_ids], 1)
segment_ids = torch.cat([segment_ids, torch.ones_like(output_ids, device=device)], 1)
seq_logit, _ = model.predict([token_ids, segment_ids])
return seq_logit[:, -1, :]
def generate(self, text, n=1, topk=5):
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
output_ids = self.random_sample([token_ids, segment_ids], n, topk) # 基于随机采样
return [tokenizer.decode(ids.cpu().numpy()) for ids in output_ids]
synonyms_generator = SynonymsGenerator(start_id=None, end_id=tokenizer._token_end_id, maxlen=maxlen, device=device)
def cal_sen_emb(text_list):
'''输入text的list,计算sentence的embedding
'''
X, S = [], []
for t in text_list:
x, s = tokenizer.encode(t)
X.append(x)
S.append(s)
X = torch.tensor(sequence_padding(X), dtype=torch.long, device=device)
S = torch.tensor(sequence_padding(S), dtype=torch.long, device=device)
_, Z = model.predict([X, S])
return Z
def gen_synonyms(text, n=100, k=20):
""""含义: 产生sent的n个相似句,然后返回最相似的k个。
做法:用seq2seq生成,并用encoder算相似度并排序。
效果:
>>> gen_synonyms(u'微信和支付宝哪个好?')
[
u'微信和支付宝,哪个好?',
u'微信和支付宝哪个好',
u'支付宝和微信哪个好',
u'支付宝和微信哪个好啊',
u'微信和支付宝那个好用?',
u'微信和支付宝哪个好用',
u'支付宝和微信那个更好',
u'支付宝和微信哪个好用',
u'微信和支付宝用起来哪个好?',
u'微信和支付宝选哪个好',
]
"""
r = synonyms_generator.generate(text, n)
r = [i for i in set(r) if i != text] # 不和原文相同
r = [text] + r
Z = cal_sen_emb(r)
Z /= (Z**2).sum(dim=1, keepdims=True)**0.5
argsort = torch.matmul(Z[1:], -Z[0]).argsort()
return [r[i + 1] for i in argsort[:k]]
def just_show(some_samples):
"""随机观察一些样本的效果
"""
S = [np.random.choice(some_samples) for _ in range(3)]
for s in S:
try:
print(u'原句子:%s' % s)
print(u'同义句子:', gen_synonyms(s, 10, 10))
print()
except:
pass
class Evaluator(Callback):
"""评估模型
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, global_step, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show(['微信和支付宝拿个好用?',
'微信和支付宝,哪个好?',
'微信和支付宝哪个好',
'支付宝和微信哪个好',
'支付宝和微信哪个好啊',
'微信和支付宝那个好用?',
'微信和支付宝哪个好用',
'支付宝和微信那个更好',
'支付宝和微信哪个好用',
'微信和支付宝用起来哪个好?',
'微信和支付宝选哪个好'
])
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=50, steps_per_epoch=200, callbacks=[evaluator])
else:
model.load_weights('./best_model.pt')
| 36.54 | 137 | 0.628352 | 4,711 | 0.390177 | 0 | 0 | 772 | 0.063939 | 0 | 0 | 3,452 | 0.285904 |
d78a10d27ddc86c99b26b8eff9416d8403b7dcfc | 5,872 | py | Python | Validation/RecoTrack/python/customiseMTVForBPix123Holes.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Validation/RecoTrack/python/customiseMTVForBPix123Holes.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Validation/RecoTrack/python/customiseMTVForBPix123Holes.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | from __future__ import print_function
# This customise file provides an example (in the form of holes in
# BPix L1-L2 and L3-L3) on how to select a subset of generalTracks
# (e.g. by phi and eta) and setup various MTV instances for those
# (selected tracks, built tracks, and seeds in this case). The naming
# of DQM folders is consistent with an example in trackingCompare.py
import FWCore.ParameterSet.Config as cms
def customiseMTVForBPix123Holes(process):
from Validation.RecoTrack.cutsRecoTracks_cfi import cutsRecoTracks as _cutsRecoTracks
import math
_minPhi = process.trackValidatorTrackingOnly.histoProducerAlgoBlock.minPhi.value()
_maxPhi = process.trackValidatorTrackingOnly.histoProducerAlgoBlock.maxPhi.value()
_nPhi = process.trackValidatorTrackingOnly.histoProducerAlgoBlock.nintPhi.value()
_binPhi = (_maxPhi - _minPhi) / _nPhi
process.generalTracksL1L2 = _cutsRecoTracks.clone(
minLayer = 0,
quality = [],
minRapidity = -1.0, # also eta < -1 is affected, but let's start with this
minPhi=_minPhi+_binPhi*14, maxPhi=_minPhi+_binPhi*19) # ~0.7 .. ~0.2
process.generalTracksL2L3 = process.generalTracksL1L2.clone(
minRapidity = -0.9, maxRapidity = 2,
minPhi=_minPhi+_binPhi*33, maxPhi=_minPhi+_binPhi + 2*math.pi) # ~2.6 .. ~3.3
print("L1L2 %f %f" % (process.generalTracksL1L2.minPhi.value(), process.generalTracksL1L2.maxPhi.value()))
print("L2L3 %f %f" % (process.generalTracksL2L3.minPhi.value(), process.generalTracksL2L3.maxPhi.value()))
from CommonTools.RecoAlgos.trackingParticleRefSelector_cfi import trackingParticleRefSelector as _trackingParticleRefSelector
process.trackingParticlesL1L2 = _trackingParticleRefSelector.clone(
signalOnly = False,
chargedOnly = False,
tip = 1e5,
lip = 1e5,
minRapidity = process.generalTracksL1L2.minRapidity.value(),
maxRapidity = process.generalTracksL1L2.maxRapidity.value(),
ptMin = 0,
minPhi = process.generalTracksL1L2.minPhi.value(),
maxPhi = process.generalTracksL1L2.maxPhi.value(),
)
process.trackingParticlesL2L3 = process.trackingParticlesL1L2.clone(
minRapidity = process.generalTracksL2L3.minRapidity.value(),
maxRapidity = process.generalTracksL2L3.maxRapidity.value(),
minPhi = process.generalTracksL2L3.minPhi.value(),
maxPhi = process.generalTracksL2L3.maxPhi.value(),
)
process.tracksPreValidationTrackingOnly += (
process.trackingParticlesL1L2 +
process.trackingParticlesL2L3 +
process.generalTracksL1L2 +
process.generalTracksL2L3
)
process.trackValidatorTrackingOnlyL1L2 = process.trackValidatorTrackingOnly.clone(
dirName = process.trackValidatorTrackingOnly.dirName.value().replace("Track/", "TrackL1L2/"),
label_tp_effic = "trackingParticlesL1L2",
label_tp_effic_refvector = True,
label = ["generalTracksL1L2"],
)
process.trackValidatorTrackingOnlyL2L3 = process.trackValidatorTrackingOnlyL1L2.clone(
dirName = process.trackValidatorTrackingOnlyL1L2.dirName.value().replace("L1L2", "L2L3"),
label_tp_effic = "trackingParticlesL2L3",
label = ["generalTracksL2L3"],
)
process.trackValidatorsTrackingOnly += (
process.trackValidatorTrackingOnlyL1L2 +
process.trackValidatorTrackingOnlyL2L3
)
for trkColl in process.trackValidatorTrackingOnly.label:
if "ByAlgoMask" in trkColl: continue
if "Pt09" in trkColl and not trkColl in ["generalTracksPt09", "cutsRecoTracksPt09Hp"]: continue
if trkColl != "generalTracks":
selL1L2 = getattr(process, trkColl).clone(src="generalTracksL1L2")
selL2L3 = getattr(process, trkColl).clone(src="generalTracksL2L3")
if "Pt09" in trkColl:
selL1L2Name = trkColl.replace("Pt09", "Pt09L1L2")
selL2L3Name = trkColl.replace("Pt09", "Pt09L2L3")
else:
selL1L2Name = trkColl.replace("cutsRecoTracks", "cutsRecoTracksL1L2")
selL2L3Name = trkColl.replace("cutsRecoTracks", "cutsRecoTracksL2L3")
setattr(process, selL1L2Name, selL1L2)
setattr(process, selL2L3Name, selL2L3)
process.tracksPreValidationTrackingOnly += (selL1L2+selL2L3)
process.trackValidatorTrackingOnlyL1L2.label.append(selL1L2Name)
process.trackValidatorTrackingOnlyL2L3.label.append(selL2L3Name)
for midfix in ["Building", "Seeding"]:
label = "trackValidator%sTrackingOnly" % midfix
mtv = getattr(process, label)
mtvL1L2 = mtv.clone(
dirName = mtv.dirName.value()[:-1] + "L1L2/",
label_tp_effic = "trackingParticlesL1L2",
label_tp_effic_refvector = True,
label = [],
mvaLabels = cms.PSet(),
doMVAPlots = False,
)
mtvL2L3 = mtvL1L2.clone(
dirName = mtvL1L2.dirName.value().replace("L1L2", "L2L3"),
label_tp_effic = "trackingParticlesL2L3",
)
setattr(process, label+"L1L2", mtvL1L2)
setattr(process, label+"L2L3", mtvL2L3)
process.trackValidatorsTrackingOnly += (
mtvL1L2 +
mtvL2L3
)
for trkColl in mtv.label:
selL1L2 = process.generalTracksL1L2.clone(src=trkColl)
selL2L3 = process.generalTracksL2L3.clone(src=trkColl)
selL1L2Name = trkColl+"L1L2"
selL2L3Name = trkColl+"L2L3"
setattr(process, selL1L2Name, selL1L2)
setattr(process, selL2L3Name, selL2L3)
process.tracksPreValidationTrackingOnly += (selL1L2+selL2L3)
mtvL1L2.label.append(selL1L2Name)
mtvL2L3.label.append(selL2L3Name)
return process
| 48.933333 | 129 | 0.679666 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 916 | 0.155995 |
d78adda6c383319ee2452a20f3e7494d5bd7a171 | 145 | py | Python | helloworld/api/v1.py | ElyasSantana/example-api | f99cff55a152e3ab4e1b3490d2632e8f06b7f7fb | [
"MIT"
] | null | null | null | helloworld/api/v1.py | ElyasSantana/example-api | f99cff55a152e3ab4e1b3490d2632e8f06b7f7fb | [
"MIT"
] | null | null | null | helloworld/api/v1.py | ElyasSantana/example-api | f99cff55a152e3ab4e1b3490d2632e8f06b7f7fb | [
"MIT"
] | null | null | null | from fastapi import APIRouter
router_helloworld = APIRouter()
@router_helloworld.get("/")
def get_helloworld():
return {"Hello": "World"}
| 16.111111 | 31 | 0.724138 | 0 | 0 | 0 | 0 | 79 | 0.544828 | 0 | 0 | 17 | 0.117241 |
d78be78c3b064c64ae4256d80473c5ab6ad70fcf | 791 | py | Python | projects/slots/activities/activity_randomizer.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | projects/slots/activities/activity_randomizer.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | projects/slots/activities/activity_randomizer.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | from random import sample, randint
"""
Randomizer for available lists
plus radio broadcasting randomizer
"""
# Available lists randomizer class
class Randomize_and_pop_on_call:
"""
Randomize given array and on call pop given value from array.
If array is empty - returns None
"""
# created only to ease-up code a little
def __init__(self, array):
self.array = sample(array, len(array))
def __call__(self):
return self.array.pop() if len(self.array) else None
# alias
randomize = Randomize_and_pop_on_call
# random radio
broadcast = "Евгеника" if randint(1, 4) == 1 else "Маяк"
__all__ = ['randomize', 'broadcast']
if __name__ == '__main__':
# randomizer check
ar = randomize([1,2,3,4,5])
print(ar(), ar(), ar(), ar(), ar(), ar())
| 25.516129 | 65 | 0.672566 | 358 | 0.445828 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.447073 |
d78d46855b1e8af013795bcd9ce42f63ccd57ab7 | 7,716 | py | Python | tests/test_contract.py | iwob/pysv | 6fdfb93d66cce84cceacabd3806f3f51f0cbbe17 | [
"MIT"
] | 2 | 2017-06-21T04:00:11.000Z | 2018-06-11T17:28:55.000Z | tests/test_contract.py | iwob/pysv | 6fdfb93d66cce84cceacabd3806f3f51f0cbbe17 | [
"MIT"
] | null | null | null | tests/test_contract.py | iwob/pysv | 6fdfb93d66cce84cceacabd3806f3f51f0cbbe17 | [
"MIT"
] | 1 | 2018-06-11T17:28:56.000Z | 2018-06-11T17:28:56.000Z | import unittest
from pysv.contract import *
class TestsContract(unittest.TestCase):
def test_program_vars_input_and_local(self):
vars = ProgramVars({'x': 'Int'}, {'y': 'Int'})
vars.add_marked_variables(["|x|'", "|y|'", "|y|''"])
self.assertEquals({'x': 'Int', "|x|'": 'Int'}, vars.input_vars)
self.assertEquals({'y': 'Int', "|y|'": 'Int', "|y|''": 'Int'}, vars.local_vars)
self.assertEquals({'x', "|x|'"}, set(vars.get_names_input()))
self.assertEquals({'y', "|y|'", "|y|''"}, set(vars.get_names_local()))
self.assertEquals({'x', "|x|'", 'y', "|y|'", "|y|''"}, set(vars.get_names_all()))
self.assertEquals({'x': 'Int', "|x|'": 'Int', 'y': 'Int', "|y|'": 'Int', "|y|''": 'Int'}, vars.all())
vars.add_input_variables(['a', 'b'], 'Bool')
self.assertEquals({'x': 'Int', "|x|'": 'Int', 'a': 'Bool', 'b': 'Bool'}, vars.input_vars)
vars.add_local_variables(['c'], 'Bool')
self.assertEquals({'y': 'Int', "|y|'": 'Int', "|y|''": 'Int', 'c': 'Bool'}, vars.local_vars)
vars.rename_var('c', 'c_T1')
self.assertEquals({'y': 'Int', "|y|'": 'Int', "|y|''": 'Int', 'c_T1': 'Bool'}, vars.local_vars)
def test_program_vars_markers(self):
vars = ProgramVars({'x':'Int'}, {"y":"Int", "|y''|":"Int", "|asd'''''|":"Double", "|y'|":"Int"})
self.assertEquals("|y''|", vars.get_latest_var_name('y'))
def test_formula_test_case_border_cases(self):
self.assertRaises(Exception, formula_test_case_py, [], [])
self.assertRaises(Exception, formula_test_case_py, ['A'], [])
self.assertRaises(Exception, formula_test_case_py, [], ['B'])
def test_formula_test_case(self):
formula = formula_test_case_py(['A'], ['C'])
expected = "((not (A)) or (C))"
self.assertEquals(expected, formula)
formula = formula_test_case_py(['A', 'B'], ['C', 'D'])
expected = "(((not (A)) or (not (B))) or ((C) and (D)))"
self.assertEquals(expected, formula)
def test_formula_test_cases_1(self):
p1 = (['x>0'], ['res==5 and y<0'])
formula = formula_test_cases_py([p1])
expected = "((not (x>0)) or (res==5 and y<0))"
self.assertEquals(expected, formula)
def test_formula_test_cases_2(self):
p1 = (['A', 'B'], ['x == 8', 'y == 0'])
p2 = (['A', 'C'], ['x == 5', 'y == 1'])
p3 = (['D', 'B'], ['x == 8', 'y == 2'])
formula = formula_test_cases_py([p1, p2, p3])
expected = "(((not (A)) or (not (B))) or ((x == 8) and (y == 0))) and (((not (A)) or (not (C))) or ((x == 5) and (y == 1))) and (((not (D)) or (not (B))) or ((x == 8) and (y == 2)))"
self.assertEquals(expected, formula)
def test_program_vars_static_methods(self):
vars = {'x': 'Int', 'y': 'Int', 'z': 'Bool', 'a': 'Real'}
self.assertEquals({'Int', 'Bool', 'Real'}, ProgramVars.get_types(vars))
self.assertEquals({'x': 'Int', 'y': 'Int'}, ProgramVars.get_vars_of_type(vars, 'Int'))
self.assertEquals({'z': 'Bool'}, ProgramVars.get_vars_of_type(vars, 'Bool'))
self.assertEquals({'a': 'Real'}, ProgramVars.get_vars_of_type(vars, 'Real'))
self.assertEquals({}, ProgramVars.get_vars_of_type(vars, 'String'))
def test_Test_class(self):
test = Test([1, 2], [3, -1], ['x', 'y'], ['add', 'sub'])
self.assertEquals([1, 2], test.inputs)
self.assertEquals([3, -1], test.outputs)
self.assertEquals("(x == 1) and (y == 2)", test.code_inputs(lang=utils.LANG_PYTHON))
self.assertEquals("(add == 3) and (sub == -1)", test.code_outputs(lang=utils.LANG_PYTHON))
self.assertEquals("(and (= x 1) (= y 2))", test.code_inputs(lang=utils.LANG_SMT2))
self.assertEquals("(and (= add 3) (= sub -1))", test.code_outputs(lang=utils.LANG_SMT2))
def test_Test_formulaic_form_py(self):
t = Test([1, 2], [3, -1], ['x', 'y'], ['add', 'sub'])
self.assertEquals(['x == 1', 'y == 2'], Test.formulaic_form(t.inputs, t.in_vars, lang=utils.LANG_PYTHON))
self.assertEquals(['add == 3', 'sub == -1'], Test.formulaic_form(t.outputs, t.out_vars, lang=utils.LANG_PYTHON))
self.assertEquals(['(= x 1)', '(= y 2)'], Test.formulaic_form(t.inputs, t.in_vars, lang=utils.LANG_SMT2))
self.assertEquals(['(= add 3)', '(= sub -1)'], Test.formulaic_form(t.outputs, t.out_vars, lang=utils.LANG_SMT2))
def test_TestF_class(self):
t = TestF([1, 2], ['add < sub', 'sub >= 0'], ['x', 'y'], ['add', 'sub'])
self.assertEquals([1, 2], t.inputs)
self.assertEquals(['add < sub', 'sub >= 0'], t.outputs)
self.assertEquals("(x == 1) and (y == 2)", t.code_inputs(lang=utils.LANG_PYTHON))
self.assertEquals("(add < sub) and (sub >= 0)", t.code_outputs(lang=utils.LANG_PYTHON))
self.assertEquals("(and (= x 1) (= y 2))", t.code_inputs(lang=utils.LANG_SMT2))
self.assertEquals("(and add < sub sub >= 0)", t.code_outputs(lang=utils.LANG_SMT2))
def test_TestFF_class(self):
t = TestFF(['x == 1', 'y == 2'], ['add < sub', 'sub >= 0'], ['x', 'y'], ['add', 'sub'])
self.assertEquals(['x == 1', 'y == 2'], t.inputs)
self.assertEquals(['add < sub', 'sub >= 0'], t.outputs)
self.assertEquals("(x == 1) and (y == 2)", t.code_inputs(lang=utils.LANG_PYTHON))
self.assertEquals("(add < sub) and (sub >= 0)", t.code_outputs(lang=utils.LANG_PYTHON))
self.assertEquals("(and x == 1 y == 2)", t.code_inputs(lang=utils.LANG_SMT2))
self.assertEquals("(and add < sub sub >= 0)", t.code_outputs(lang=utils.LANG_SMT2))
def test_TestCases_class_py(self):
tests = [Test([0, 2], [2]),
Test([1, 2], [3]),
Test([1, 3], [4])]
tc = TestCases(tests, in_vars=['x', 'y'], out_vars=['res'], lang=utils.LANG_PYTHON)
self.assertEquals([0, 2], tc.tests[0].inputs)
self.assertEquals([1, 2], tc.tests[1].inputs)
self.assertEquals([2], tc.tests[0].outputs)
self.assertEquals([3], tc.tests[1].outputs)
self.assertEquals('(not ((x == 0) and (y == 2)) or (res == 2)) and ' +\
'(not ((x == 1) and (y == 2)) or (res == 3)) and ' +\
'(not ((x == 1) and (y == 3)) or (res == 4))',
tc.code_postcond())
tests = [TestFF(['A', 'B'], ['C'])]
tc = TestCases(tests, in_vars=['x', 'y'], out_vars=['res'], lang=utils.LANG_PYTHON)
self.assertEquals('(not ((A) and (B)) or (C))',
tc.code_postcond())
tests = []
tc = TestCases(tests, in_vars=['x', 'y'], out_vars=['res'], lang=utils.LANG_PYTHON)
self.assertEquals('', tc.code_postcond())
def test_TestCases_class_smt2(self):
tests = [Test([0, 2], [2]),
Test([1, 2], [3]),
Test([1, 3], [4])]
test_cases = TestCases(tests, in_vars=['x', 'y'], out_vars=['res'], lang=utils.LANG_SMT2)
self.assertEquals('(and (=> (and (= x 0) (= y 2)) (= res 2)) ' +\
'(=> (and (= x 1) (= y 2)) (= res 3)) ' +\
'(=> (and (= x 1) (= y 3)) (= res 4))' +\
')',
test_cases.code_postcond())
tests = [TestFF(['A', 'B'], ['C'])]
tc = TestCases(tests, in_vars=['x', 'y'], out_vars=['res'], lang=utils.LANG_SMT2)
self.assertEquals('(=> (and A B) C)',
tc.code_postcond())
tests = []
tc = TestCases(tests, in_vars=['x', 'y'], out_vars=['res'], lang=utils.LANG_SMT2)
self.assertEquals('', tc.code_postcond()) | 49.780645 | 194 | 0.525531 | 7,670 | 0.994038 | 0 | 0 | 0 | 0 | 0 | 0 | 1,861 | 0.241187 |
d791857131ffb45651a77bad5f5ede0b6842e7f5 | 3,774 | py | Python | unittests/unintary_tests.py | OneCricketeer/pysqoop | 616199a8441d886ffcc4111445da3d0351401454 | [
"MIT"
] | 9 | 2019-06-17T19:21:22.000Z | 2021-07-12T05:14:03.000Z | unittests/unintary_tests.py | OneCricketeer/pysqoop | 616199a8441d886ffcc4111445da3d0351401454 | [
"MIT"
] | 5 | 2019-07-19T14:42:43.000Z | 2020-08-06T16:55:05.000Z | unittests/unintary_tests.py | OneCricketeer/pysqoop | 616199a8441d886ffcc4111445da3d0351401454 | [
"MIT"
] | 14 | 2019-06-05T16:50:27.000Z | 2021-08-05T16:36:15.000Z | import unittest
from pysqoop.SqoopImport import Sqoop
class TestStringMethods(unittest.TestCase):
def test_empty_sqoop(self):
try:
Sqoop()
except Exception as e:
self.assertEqual(str(e), 'all parameters are empty')
def test_properties_not_empty(self):
try:
Sqoop(fields_terminated_by='\"')
except Exception as e:
self.assertEqual(str(e), Sqoop._EMPTY_TABLE_AND_QUERY_PARAMETERS_EXCEPTION)
def test_parameters_order(self):
for iteration in range(0, 10000):
sqoop = Sqoop(null_string='\'\'', fields_terminated_by='\"', table='prova')
self.assertEqual(sqoop.command(), 'sqoop import --fields-terminated-by \" --null-string \'\' --table prova')
def test_real_case(self):
for iteration in range(0, 10000):
expected = 'sqoop import -fs hdfs://remote-cluster:8020 --hive-drop-import-delims --fields-terminated-by \; --enclosed-by \'\"\' --escaped-by \\\\ --null-string \'\' --null-non-string \'\' --table sample_table --target-dir hdfs://remote-cluster/user/hive/warehouse/db/sample_table --delete-target-dir --connect jdbc:oracle:thin:@//your_ip:your_port/your_schema --username user --password pwd --num-mappers 2 --bindir /path/to/bindir/folder'
sqoop = Sqoop(fs='hdfs://remote-cluster:8020', hive_drop_import_delims=True, fields_terminated_by='\;',
enclosed_by='\'"\'', escaped_by='\\\\', null_string='\'\'', null_non_string='\'\'',
table='sample_table',
target_dir='hdfs://remote-cluster/user/hive/warehouse/db/sample_table',
delete_target_dir=True, connect='jdbc:oracle:thin:@//your_ip:your_port/your_schema',
username='user', password='pwd', num_mappers=2,
bindir='/path/to/bindir/folder')
self.assertEqual(expected, sqoop.command())
def test_hbase_basic_import(self):
expected = "sqoop import --table Rutas " \
"--connect 'jdbc:sqlserver://127.0.0.1:1433;DatabaseName=SQLDB;user=root;password=password' " \
"--incremental lastmodified --hbase-table Rutas --column-family Id_Ruta " \
"--hbase-row-key Id_Ruta -m 1"
sqoop = Sqoop(
connect="'jdbc:sqlserver://127.0.0.1:1433;DatabaseName=SQLDB;user=root;password=password'",
table="Rutas",
incremental="lastmodified",
hbase_table="Rutas",
hbase_row_key="Id_Ruta",
column_family="Id_Ruta",
m=1
)
self.assertEqual(expected, sqoop.command())
def test_hbase_lazy_contruction(self):
expected = "sqoop import --table Rutas " \
"--connect 'jdbc:sqlserver://127.0.0.1:1433;DatabaseName=SQLDB;user=root;password=password' " \
"--incremental lastmodified --hbase-table Rutas --column-family Id_Ruta " \
"--hbase-row-key Id_Ruta -m 1"
sqoop = Sqoop()
sqoop.set_param(param="--connect",
value="'jdbc:sqlserver://127.0.0.1:1433;DatabaseName=SQLDB;user=root;password=password'")
sqoop.set_param(param="--table", value="Rutas")
sqoop.set_param(param="--incremental", value="lastmodified")
# sqoop.unset_param(param="--connect")
sqoop.command()
sqoop.set_param(param="--hbase-table", value="Rutas")
sqoop.set_param(param="--column-family", value="Id_Ruta")
sqoop.set_param(param="--hbase-row-key", value="Id_Ruta")
sqoop.set_param(param="-m", value="1")
self.assertEqual(expected, sqoop.command())
if __name__ == '__main__':
unittest.main()
| 51.69863 | 454 | 0.607578 | 3,668 | 0.971913 | 0 | 0 | 0 | 0 | 0 | 0 | 1,612 | 0.427133 |
d793148e8a5d44297963077f150757b903cf3e64 | 1,084 | py | Python | tests/base.py | strukovsv/PyHAML | 75d7774f30809f755dad2867e9ab55cea3019046 | [
"BSD-3-Clause"
] | 21 | 2015-01-27T13:32:46.000Z | 2022-03-12T21:45:12.000Z | tests/base.py | strukovsv/PyHAML | 75d7774f30809f755dad2867e9ab55cea3019046 | [
"BSD-3-Clause"
] | 2 | 2017-05-23T11:30:01.000Z | 2019-07-29T01:21:27.000Z | tests/base.py | strukovsv/PyHAML | 75d7774f30809f755dad2867e9ab55cea3019046 | [
"BSD-3-Clause"
] | 8 | 2015-07-13T17:46:24.000Z | 2021-12-08T18:13:22.000Z | from unittest import TestCase, main, SkipTest
import os
from mako.template import Template
import haml
def skip(func):
def test(*args, **kwargs):
raise SkipTest()
return test
def skip_on_travis(func):
if os.environ.get('TRAVIS') == 'true':
def test(*args, **kwargs):
raise SkipTest()
return test
else:
return func
class Base(TestCase):
def assertMako(self, source, expected, *args):
node = haml.parse_string(source)
mako = haml.generate_mako(node).replace('<%! from haml import runtime as __HAML %>\\\n', '')
self.assertEqual(
mako.replace(' ', '\t'),
expected.replace(' ', '\t'),
*args
)
def assertHTML(self, source, expected, *args, **kwargs):
node = haml.parse_string(source)
mako = haml.generate_mako(node)
html = Template(mako).render_unicode(**kwargs)
self.assertEqual(
html.replace(' ', '\t'),
expected.replace(' ', '\t'),
*args
)
| 24.636364 | 100 | 0.551661 | 701 | 0.646679 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.095018 |
d793b12d0e01c44da57d39ebb7878a010a633a7a | 5,787 | py | Python | playground/basis_set.py | not-matt/QuantumPlayground | ddd832efb73563cf80c1090b817fa11ff05fc535 | [
"MIT"
] | null | null | null | playground/basis_set.py | not-matt/QuantumPlayground | ddd832efb73563cf80c1090b817fa11ff05fc535 | [
"MIT"
] | null | null | null | playground/basis_set.py | not-matt/QuantumPlayground | ddd832efb73563cf80c1090b817fa11ff05fc535 | [
"MIT"
] | null | null | null | import requests
import logging
import numpy as np
from playground.utils import elements, angular_quanta
_LOGGER = logging.getLogger(__name__)
class AO(object):
"""
atomic orbital
"""
def __init__(self,
orbital_type: str,
contract_num: int,
exponents: list,
coeffs: list,
centre: tuple = (0, 0, 0)):
"""
orbital_type - eg. s, px, dx2
contract_num - G contraction factor
exponents - G exponents list
coeffs - G coefficients list
centre - tuple, (float * 3) - x,y,z coordinates
"""
self.orbital_type = orbital_type
self.contract_num = contract_num
self.exponents = exponents
self.coeffs = coeffs
self.centre = centre
self.angular = angular_quanta[orbital_type]
def __repr__(self):
return f"<Atomic Orbital, type {self.orbital_type}>"
def __call__(self, x, y, z):
res = 0
x0, y0, z0 = self.centre
l, m, n = self.angular
for i in range(len(self.coeffs)):
exponent = self.exponents[i]
gprimitivex = Gprimitive(l, x0, exponent)
gprimitivey = Gprimitive(m, y0, exponent)
gprimitivez = Gprimitive(n, z0, exponent)
res += self.coeffs[i]*gprimitivex(x)*gprimitivey(y)*gprimitivez(z)
return res
class Gprimitive: #gaussian primitive class for only one variable. The total will be product of gprimitive(x)*gprimitive(y)*gprimitive(z)
def __init__(self, angular, centre, exponent):
self.angular = angular
self.centre = centre
self.exponent = exponent
def __call__(self, x):
return (x-self.centre)**self.angular * np.exp(-self.exponent*(x-self.centre)**2)
def parse_basis_lines(basis_lines: str):
"""
Handles creating the atomic orbitals for one atom of the basis set.
basis_lines is a list of each line of the basis set information in "gaussian94" format
"""
orbitals = []
lines = iter(basis_lines)
atom_symbol, _ = next(lines).split()
while True:
try:
orbital_type, contract_num, _ = next(lines).split()
contract_num = int(contract_num)
if orbital_type == "F":
msg = "F orbitals are not yet supported. Please choose a simpler basis set"
raise ValueError(msg)
# SP orbitals have an extra coefficient parameter for the p orbitals that need to be handled separately
if orbital_type == "SP":
exponents = []
coeffs = []
coeffps = []
for i in range(contract_num):
exponent, coeff, coeffp = next(lines).replace("D", "e").split()
exponents.append(float(exponent))
coeffs.append(float(coeff))
coeffps.append(float(coeffp))
else:
exponents = []
coeffs = []
for i in range(contract_num):
exponent, coeff = next(lines).replace("D", "e").split()
exponents.append(float(exponent))
coeffs.append(float(coeff))
assert len(exponents) == contract_num
assert len(coeffs) == contract_num
if orbital_type == "S":
s = AO("S", contract_num, exponents, coeffs)
orbitals.append(s)
elif orbital_type == "P":
for angular in ["Px", "Py", "Pz"]:
ao = AO(angular, contract_num, exponents, coeffs)
orbitals.append(ao)
elif orbital_type == "D":
for angular in ["Dx2", "Dy2", "Dz2", "Dxy", "Dyz", "Dzx"]:
ao = AO(angular, contract_num, exponents, coeffs)
orbitals.append(ao)
elif orbital_type == "SP":
s = AO("S", contract_num, exponents, coeffs)
orbitals.append(s)
for angular in ["Px", "Py", "Pz"]:
ao = AO(angular, contract_num, exponents, coeffps)
orbitals.append(ao)
except StopIteration:
break
return atom_symbol, orbitals
def get_basis_set(basis_set: str, atomic_nos: tuple):
"""
Performs an API GET to basissetexchange.org for basis set of the specified atomic numbers.
Returns parsed response.
"""
atomic_nos_string = ','.join(map(str, atomic_nos))
response = requests.get(f"https://www.basissetexchange.org/api/basis/{basis_set}/format/gaussian94/?version=1&elements={atomic_nos_string}")
if not response.ok:
raise Exception(response.json()["message"])
basis_set = {}
text = response.text.split("\n")
# print out header
for line in text:
if line.startswith("!"):
_LOGGER.info(line.lstrip("!"))
else:
break
# Remove header and blank lines
text = [line for line in text if (
line
and not line.startswith("!")
)]
# iterate through the lines, make a new basis for each section of the text
# new section denoted by "****"
basis_lines = []
for line in text:
if line.startswith("*"):
try:
atom, orbitals = parse_basis_lines(basis_lines)
except ValueError as e:
_LOGGER.error(e)
return
basis_set[atom] = orbitals
basis_lines = []
else:
basis_lines.append(line)
return basis_set | 36.859873 | 145 | 0.541386 | 1,714 | 0.296181 | 0 | 0 | 0 | 0 | 0 | 0 | 1,311 | 0.226542 |
d797ff101f30669e899d5350f2889ccccfcd1a17 | 849 | py | Python | controls.py | juandigomez/me366j | 9f82d0ea2b6e4b422be0add0ceb1a842e0dd6b21 | [
"MIT"
] | null | null | null | controls.py | juandigomez/me366j | 9f82d0ea2b6e4b422be0add0ceb1a842e0dd6b21 | [
"MIT"
] | null | null | null | controls.py | juandigomez/me366j | 9f82d0ea2b6e4b422be0add0ceb1a842e0dd6b21 | [
"MIT"
] | null | null | null | import pygame
import sys
pygame.init()
screen = pygame.display.set_mode((640, 480))
clock = pygame.time.Clock()
x = 0
y = 0
# use a (r, g, b) tuple for color
yellow = (255, 255, 0)
# create the basic window/screen and a title/caption
# default is a black background
screen = pygame.display.set_mode((640, 280))
pygame.display.set_caption("Text adventures with Pygame")
# pick a font you have and set its size
myfont = pygame.font.SysFont(None, 30)
pygame.display.set_caption('Animation')
while 1:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
key = pygame.key.get_pressed()
if key[pygame.K_UP]:
y += 1
print(y)
elif key[pygame.K_DOWN]:
y -= 1
print(y)
elif key[pygame.K_RIGHT]:
x += 1
print(x)
elif key[pygame.K_LEFT]:
x -= 1
print(x)
pygame.display.flip()
pygame.quit() | 21.225 | 57 | 0.687868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.229682 |
d798c9e9d158d810636d91b75d39f308e04e9254 | 1,439 | py | Python | scripts/conversion/rename_associations.py | xapple/libcbm_runner | d042bc45e0bb9bcf2c59330b67e9a836d237ccbf | [
"MIT"
] | 2 | 2019-10-18T15:39:53.000Z | 2022-02-22T17:54:56.000Z | scripts/conversion/rename_associations.py | xapple/libcbm_runner | d042bc45e0bb9bcf2c59330b67e9a836d237ccbf | [
"MIT"
] | null | null | null | scripts/conversion/rename_associations.py | xapple/libcbm_runner | d042bc45e0bb9bcf2c59330b67e9a836d237ccbf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair and Paul Rougieux.
JRC Biomass Project.
Unit D1 Bioeconomy.
This script will rename the header column of the file:
* /common/associations.csv
Before running this script the headers are simply "A", "B", "C".
After running this script, the new headers will be:
* "category"
* "name_input"
* "name_aidb"
"""
# Built-in modules #
# Third party modules #
import pandas
from tqdm import tqdm
# First party modules #
# Internal modules #
from libcbm_runner.core.continent import continent
###############################################################################
class RenameAssociations(object):
def __init__(self, country):
# Main attributes #
self.country = country
def __call__(self, verbose=False):
# Get path #
path = self.country.orig_data.paths.associations
# Load dataframe #
df = pandas.read_csv(str(path))
# Modify dataframe #
df.columns = ["category", "name_input", "name_aidb"]
# Write dataframe back to disk #
df.to_csv(str(path), index=False, float_format='%g')
###############################################################################
if __name__ == '__main__':
# Make renamer objects, one per country #
renamers = [RenameAssociations(c) for c in continent]
# Run them all #
for merger in tqdm(renamers):
merger()
| 24.810345 | 79 | 0.589993 | 503 | 0.349548 | 0 | 0 | 0 | 0 | 0 | 0 | 834 | 0.579569 |
d798da1a3531801a3799d983990b94ffd796a480 | 342 | py | Python | _Training_/RegEx - HackerRank/1. Introduction/Matching Anything But a Newline.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | _Training_/RegEx - HackerRank/1. Introduction/Matching Anything But a Newline.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | _Training_/RegEx - HackerRank/1. Introduction/Matching Anything But a Newline.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/matching-anything-but-new-line/problem
import re
# Inputs
standard_input = """123.456.abc.def"""
regex_pattern = r".{3}\..{3}\..{3}\..{3}$" # Do not delete 'r'.
test_string = input()
# 123.456.abc.def
match = re.match(regex_pattern, test_string) is not None
print(str(match).lower())
# true
| 16.285714 | 78 | 0.669591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 176 | 0.51462 |
ad02704fb18873433d885fc6641f661f8839081f | 3,004 | py | Python | test1.py | czyczyyzc/MyForElise | dcbf5924d3d63f441d3247741828804f74a29345 | [
"MIT"
] | null | null | null | test1.py | czyczyyzc/MyForElise | dcbf5924d3d63f441d3247741828804f74a29345 | [
"MIT"
] | null | null | null | test1.py | czyczyyzc/MyForElise | dcbf5924d3d63f441d3247741828804f74a29345 | [
"MIT"
] | null | null | null | import time
import numpy as np
import tensorflow as tf
from yalenet import YaleNet
from Mybase.solver import Solver
"""
def test():
mdl = YaleNet(cls_num=1000, reg=1e-4, typ=tf.float32)
sov = Solver(mdl,
opm_cfg={
'lr_base': 0.005,
'decay_rule': 'fixed',
#'decay_rule': 'exponential',
'decay_rate': 0.5,
'decay_step': 50,
'staircase': False,
#'optim_rule': 'adam',
'optim_rule': 'momentum',
'momentum': 0.9,
'use_nesterov': True
},
gpu_lst = '0',
bat_siz = 50,
tra_num = 2000,
val_num = 100,
epc_num = 200000,
min_que_tra = 10000,
min_que_val = 1000,
prt_ena = True,
itr_per_prt = 20,
tst_num = None,
tst_shw = True,
tst_sav = True,
mdl_nam = 'model.ckpt',
mdl_dir = 'Mybase/Model',
log_dir = 'Mybase/logdata',
dat_dir = 'Mybase/datasets',
mov_ave_dca = 0.99)
print('TRAINING...')
sov.train()
'''
print('TESTING...')
sov.test()
sov.display_detections()
#sov.show_loss_acc()
'''
"""
def test():
mdl = YaleNet(cls_num=21, reg=1e-4, typ=tf.float32)
sov = Solver(mdl,
opm_cfg={
'lr_base': 1e-5,
'decay_rule': 'fixed',
#'decay_rule': 'exponential',
'decay_rate': 0.5,
'decay_step': 50,
'staircase': False,
#'optim_rule': 'adam',
'optim_rule': 'momentum',
'momentum': 0.9,
'use_nesterov': True
},
gpu_lst = '0,1,2,3',
bat_siz = 4,
tra_num = 2000,
val_num = 100,
epc_num = 200000,
min_que_tra = 4000,
min_que_val = 200,
prt_ena = True,
itr_per_prt = 20,
tst_num = None,
tst_shw = True,
tst_sav = True,
mdl_nam = 'model.ckpt',
mdl_dir = 'Mybase/Model',
log_dir = 'Mybase/logdata',
dat_dir = 'Mybase/datasets',
mov_ave_dca = 0.99)
print('TRAINING...')
sov.train()
'''
print('TESTING...')
#sov.test()
sov.display_detections()
#sov.show_loss_acc()
'''
test()
| 32.301075 | 57 | 0.374168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,790 | 0.595872 |
ad032b910fb71a08f9b40c52e3ef58efd6aac044 | 368 | py | Python | a4/decrypt/elliptic.py | fultonms/crypto | a3819e3e81b9f93b818a63382183c1804d2edacc | [
"MIT"
] | null | null | null | a4/decrypt/elliptic.py | fultonms/crypto | a3819e3e81b9f93b818a63382183c1804d2edacc | [
"MIT"
] | null | null | null | a4/decrypt/elliptic.py | fultonms/crypto | a3819e3e81b9f93b818a63382183c1804d2edacc | [
"MIT"
] | null | null | null | import argparse
parser = argparse.ArgumentParser(description="Decrpyt a selection of text from a substitution cypher, with the provided key")
parser.add_argument('cryptFile', metavar='encrypted', type=str, help='Path to the encrpyted text')
parser.add_argument('keyFile', metavar='key', type=str, help='Path to the key file')
args = parser.parse_args()
key = dict()
| 40.888889 | 125 | 0.766304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.44837 |
ad049c2108d387b7a21cb9771949419fab0bb4c8 | 251 | py | Python | examples/hist.py | RyanAugust/geoplotlib | 97ae83fc05d19237db79be66eb577906c35e8db5 | [
"MIT"
] | 1,021 | 2015-02-26T12:08:01.000Z | 2022-03-15T10:04:29.000Z | examples/hist.py | RyanAugust/geoplotlib | 97ae83fc05d19237db79be66eb577906c35e8db5 | [
"MIT"
] | 51 | 2015-03-27T20:46:44.000Z | 2022-02-03T09:58:35.000Z | examples/hist.py | RyanAugust/geoplotlib | 97ae83fc05d19237db79be66eb577906c35e8db5 | [
"MIT"
] | 196 | 2015-03-25T02:32:28.000Z | 2022-03-25T23:07:22.000Z | """
Example of 2D histogram
"""
import geoplotlib
from geoplotlib.utils import read_csv, BoundingBox
data = read_csv('data/opencellid_dk.csv')
geoplotlib.hist(data, colorscale='sqrt', binsize=8)
geoplotlib.set_bbox(BoundingBox.DK)
geoplotlib.show()
| 20.916667 | 51 | 0.784861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.243028 |
ad051771273652e3931222dfcf867589f136b709 | 20,814 | py | Python | gui/StaffScreen.py | Harsh0294/carrentsystem | c94f8cddd02b0057bac2c8813ec90460c9496f3b | [
"MIT"
] | null | null | null | gui/StaffScreen.py | Harsh0294/carrentsystem | c94f8cddd02b0057bac2c8813ec90460c9496f3b | [
"MIT"
] | null | null | null | gui/StaffScreen.py | Harsh0294/carrentsystem | c94f8cddd02b0057bac2c8813ec90460c9496f3b | [
"MIT"
] | null | null | null | from PyQt4 import QtCore, QtGui
from Vehicles import *
class StaffScreen(QtGui.QMainWindow):
combo_box_items = ["Car", "Van", "Camper Van"]
# Class constructor parent represents login screen
def __init__(self, parent, staff_user, vehicles):
super(StaffScreen, self).__init__(parent)
self.staff_user = staff_user
self.vehicles = vehicles
self.setupUi()
self.load_vehicles_to_list()
self.show()
'''
this method loads available vehicles into list so that staff user can
make operations such as delete,update on the vehicle
'''
def load_vehicles_to_list(self):
self.available_cars_list_widget.clear()
for key in self.vehicles:
self.available_cars_list_widget.addItem(str(key))
# initialize GUI elements (this method created with QT designer)
def setupUi(self):
self.resize(608, 494)
self.centralwidget = QtGui.QWidget(self)
self.verticalLayout_3 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.horizontalLayout = QtGui.QHBoxLayout()
self.welcome_label = QtGui.QLabel(self.centralwidget)
self.horizontalLayout.addWidget(self.welcome_label)
self.company_name_label = QtGui.QLabel(self.centralwidget)
self.horizontalLayout.addWidget(self.company_name_label)
self.logout_button = QtGui.QPushButton(self.centralwidget)
self.horizontalLayout.addWidget(self.logout_button)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.available_cars_list_widget = QtGui.QListWidget(self.centralwidget)
self.horizontalLayout_2.addWidget(self.available_cars_list_widget)
self.verticalLayout = QtGui.QVBoxLayout()
self.label_12 = QtGui.QLabel(self.centralwidget)
self.verticalLayout.addWidget(self.label_12)
self.type_list_widget = QtGui.QListWidget(self.centralwidget)
self.type_list_widget.setEnabled(True)
self.verticalLayout.addWidget(self.type_list_widget)
self.gridLayout = QtGui.QGridLayout()
self.daily_cost_text_box = QtGui.QLineEdit(self.centralwidget)
self.gridLayout.addWidget(self.daily_cost_text_box, 4, 1, 1, 1)
self.weekend_cost_text_box = QtGui.QLineEdit(self.centralwidget)
self.gridLayout.addWidget(self.weekend_cost_text_box, 6, 1, 1, 1)
self.number_of_doors_text_box = QtGui.QLineEdit(self.centralwidget)
self.gridLayout.addWidget(self.number_of_doors_text_box, 9, 1, 1, 1)
self.number_of_passenger_text_box = QtGui.QLineEdit(self.centralwidget)
self.gridLayout.addWidget(self.number_of_passenger_text_box, 8, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.centralwidget)
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.label_7 = QtGui.QLabel(self.centralwidget)
self.gridLayout.addWidget(self.label_7, 5, 0, 1, 1)
self.label_6 = QtGui.QLabel(self.centralwidget)
self.gridLayout.addWidget(self.label_6, 4, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.centralwidget)
self.gridLayout.addWidget(self.label_4, 2, 0, 1, 1)
self.number_of_bed_text_box = QtGui.QLineEdit(self.centralwidget)
self.gridLayout.addWidget(self.number_of_bed_text_box, 7, 1, 1, 1)
self.label_11 = QtGui.QLabel(self.centralwidget)
self.gridLayout.addWidget(self.label_11, 9, 0, 1, 1)
self.label_5 = QtGui.QLabel(self.centralwidget)
self.gridLayout.addWidget(self.label_5, 3, 0, 1, 1)
self.label_10 = QtGui.QLabel(self.centralwidget)
self.gridLayout.addWidget(self.label_10, 8, 0, 1, 1)
self.label_8 = QtGui.QLabel(self.centralwidget)
self.gridLayout.addWidget(self.label_8, 6, 0, 1, 1)
self.label_9 = QtGui.QLabel(self.centralwidget)
self.gridLayout.addWidget(self.label_9, 7, 0, 1, 1)
self.weekly_cost_text_box = QtGui.QLineEdit(self.centralwidget)
self.gridLayout.addWidget(self.weekly_cost_text_box, 5, 1, 1, 1)
self.fuel_consumption_text_box = QtGui.QLineEdit(self.centralwidget)
self.gridLayout.addWidget(self.fuel_consumption_text_box, 3, 1, 1, 1)
self.model_text_box = QtGui.QLineEdit(self.centralwidget)
self.gridLayout.addWidget(self.model_text_box, 2, 1, 1, 1)
self.label_14 = QtGui.QLabel(self.centralwidget)
self.gridLayout.addWidget(self.label_14, 0, 0, 1, 1)
self.registration_text_box = QtGui.QLineEdit(self.centralwidget)
self.gridLayout.addWidget(self.registration_text_box, 0, 1, 1, 1)
self.make_text_box = QtGui.QLineEdit(self.centralwidget)
self.gridLayout.addWidget(self.make_text_box, 1, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.delete_button = QtGui.QPushButton(self.centralwidget)
self.horizontalLayout_3.addWidget(self.delete_button)
self.insert_button = QtGui.QPushButton(self.centralwidget)
self.horizontalLayout_3.addWidget(self.insert_button)
self.update_button = QtGui.QPushButton(self.centralwidget)
self.horizontalLayout_3.addWidget(self.update_button)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
self.welcome_label.raise_()
self.logout_button.raise_()
self.company_name_label.raise_()
self.available_cars_list_widget.raise_()
self.label_3.raise_()
self.label_4.raise_()
self.label_5.raise_()
self.label_6.raise_()
self.label_7.raise_()
self.label_8.raise_()
self.label_9.raise_()
self.label_10.raise_()
self.label_11.raise_()
self.label_12.raise_()
self.delete_button.raise_()
self.update_button.raise_()
self.insert_button.raise_()
self.setCentralWidget(self.centralwidget)
self.setWindowTitle("Staff Screen")
self.welcome_label.setText("Welcome:" + self.staff_user.user_name)
self.company_name_label.setText("Company:" + self.staff_user.company.company_name)
self.logout_button.setText("Logout")
self.label_14.setText("Registration Number:")
self.label_12.setText("Type:")
self.label_3.setText("Make:")
self.label_7.setText("Weekly Cost:")
self.label_6.setText("Daily Cost:")
self.label_4.setText("Model:")
self.label_11.setText("Number of Doors:")
self.label_5.setText("Fuel Consumption:")
self.label_10.setText("Number of Passenger:")
self.label_8.setText("Weekend Cost:")
self.label_9.setText("Number of Bed:")
self.delete_button.setText("Delete ")
self.insert_button.setText("Insert")
self.update_button.setText("Update")
self.type_list_widget.addItems(self.combo_box_items)
# adding events to buttons and lists
self.available_cars_list_widget.itemClicked.connect(self.display_vehicle_info)
self.logout_button.clicked.connect(self.logout_button_action)
self.delete_button.clicked.connect(self.delete_button_action)
self.update_button.clicked.connect(self.update_button_action)
self.insert_button.clicked.connect(self.insert_button_action)
def insert_button_action(self):
if not self.type_list_widget.currentItem():
QtGui.QMessageBox.warning(self, "Warning", "Please choose a vehicle type from the list")
else:
vehicle_type = self.type_list_widget.currentItem().text()
vehicle = None
if self.validate_essential_fields(vehicle_type):
if vehicle_type == "Car":
try:
vehicle = Car(self.make_text_box.text().strip(), self.model_text_box.text().strip(),
int(self.fuel_consumption_text_box.text().strip()),
self.registration_text_box.text().strip(),
int(self.number_of_passenger_text_box.text().strip()),
int(self.number_of_passenger_text_box.text().strip()),
int(self.daily_cost_text_box.text().strip()),
int(self.weekly_cost_text_box.text().strip()),
int(self.weekend_cost_text_box.text().strip()))
except ValueError:
QtGui.QMessageBox.warning(self, "Warning",
"Vehicle could not created make sure you have correct format of data")
elif vehicle_type:
try:
vehicle = Van(self.make_text_box.text(), self.model_text_box.text().strip(),
int(self.fuel_consumption_text_box.text().strip()),
self.registration_text_box.text().strip(),
int(self.number_of_passenger_text_box.text().strip()),
int(self.daily_cost_text_box.text().strip()),
int(self.weekly_cost_text_box.text().strip()),
int(self.weekend_cost_text_box.text().strip()))
except ValueError:
QtGui.QMessageBox.warning(self, "Warning",
"Vehicle could not created make sure you have correct format of data")
elif vehicle_type == "Camper Van":
try:
vehicle = CamperVan(self.make_text_box.text().strip(), self.model_text_box.text().strip(),
int(self.fuel_consumption_text_box.text().strip()),
int(self.number_of_bed_text_box.text().strip()),
self.registration_text_box.text().strip(),
int(self.daily_cost_text_box.text().strip()),
int(self.weekly_cost_text_box.text().strip()),
int(self.weekend_cost_text_box.text().strip()))
except ValueError:
QtGui.QMessageBox.warning(self, "Warning",
"Vehicle could not created make sure you have correct format of data")
if vehicle is not None and self.staff_user.insert_vehicle(vehicle):
QtGui.QMessageBox.information(self, "Information", "Vehicle has inserted")
self.clear_all_text_fields()
self.load_vehicles_to_list()
else:
QtGui.QMessageBox.warning(self, "Warning",
"This registration number is exist please update the vehicle")
'''
this method deletes selected evehicle from the dictionary if it has not booked
'''
def delete_button_action(self):
if not self.available_cars_list_widget.currentItem().text():
QtGui.QMessageBox.warning(self, "Warning", "Please choose a vehicle from the list")
else:
registration_number = self.available_cars_list_widget.currentItem().text()
reply = QtGui.QMessageBox.question(self, 'Message',
"Are you sure to you want to delete " + registration_number + " ?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
if self.staff_user.delete_vehicle(registration_number):
self.clear_all_text_fields()
self.load_vehicles_to_list()
QtGui.QMessageBox.information(self, "Information", "vehicle deleted")
else:
QtGui.QMessageBox.warning(self, "Warning",
"Vehicle has booking request please try to update entity")
'''
This method checks whether necessary fields are empty or not. Extra fields to be checked depends
on the param parameter. i.e regardless of type this method checks whether model of vehicle has entered. However, if param equals to 'Car'
it will check number of passenger and number of doors
'''
def validate_essential_fields(self, param):
if not self.make_text_box.text() or not self.model_text_box.text() or not self.fuel_consumption_text_box.text() or \
not self.daily_cost_text_box.text() or not self.weekly_cost_text_box.text() or not self.weekend_cost_text_box.text() or \
len(self.make_text_box.text().strip()) == 0 or len(
self.model_text_box.text().strip()) == 0 or len(
self.fuel_consumption_text_box.text().strip()) == 0 or \
len(self.daily_cost_text_box.text().strip()) == 0 or len(
self.weekly_cost_text_box.text().strip()) == 0 or len(
self.weekend_cost_text_box.text().strip()) == 0:
return False
elif param.lower() == "Car":
if not self.number_of_passenger_text_box.text() or not self.number_of_doors_text_box.text() or len(
self.number_of_passenger_text_box.text().strip()) == 0 or len(
self.number_of_doors_text_box.text().strip() == 0):
return False
elif param.lower() == "Camper Van":
if not self.number_of_bed_text_box.text() or len(self.number_of_bed_text_box.text().strip()) == 0:
return False
else:
return True
'''
this method will update selected vehicle from the list
'''
def update_button_action(self):
if not self.available_cars_list_widget.currentItem() or (self.type_list_widget.currentItem() is None):
QtGui.QMessageBox.warning(self, "Warning", "Please choose a vehicle from the list")
else:
registration_number = self.available_cars_list_widget.currentItem().text().strip()
vehicle = self.vehicles[registration_number]
vehicle_type = self.type_list_widget.currentItem().text()
if self.validate_essential_fields(vehicle_type):
updated_vehicle = None
if isinstance(vehicle, Car):
try:
updated_vehicle = Car(self.make_text_box.text().strip(), self.model_text_box.text().strip(),
int(self.fuel_consumption_text_box.text().strip()), registration_number,
int(self.number_of_passenger_text_box.text().strip()),
int(self.number_of_doors_text_box.text().strip()),
int(self.daily_cost_text_box.text().strip()),
int(self.weekly_cost_text_box.text().strip()),
int(self.weekend_cost_text_box.text().strip()))
except ValueError:
QtGui.QMessageBox.warning(self, "Warning",
"Vehicle could not created make sure you have correct format of data")
elif isinstance(vehicle, Van):
try:
updated_vehicle = Van(self.make_text_box.text(), self.model_text_box.text().strip(),
int(self.fuel_consumption_text_box.text().strip()), registration_number,
int(self.number_of_passenger_text_box.text().strip()),
int(self.daily_cost_text_box.text().strip()),
int(self.weekly_cost_text_box.text().strip()),
int(self.weekend_cost_text_box.text().strip()))
except ValueError:
QtGui.QMessageBox.warning(self, "Warning",
"Vehicle could not created make sure you have correct format of data")
elif isinstance(vehicle, CamperVan):
try:
updated_vehicle = CamperVan(self.make_text_box.text().strip(),
self.model_text_box.text().strip(),
int(self.fuel_consumption_text_box.text().strip()),
int(self.number_of_bed_text_box.text().strip(), registration_number,
int(self.daily_cost_text_box.text().strip()),
int(self.weekly_cost_text_box.text().strip()),
int(self.weekend_cost_text_box.text().strip())))
except ValueError:
QtGui.QMessageBox.warning(self, "Warning",
"Vehicle could not created make sure you have correct format of data")
if updated_vehicle is not None:
self.staff_user.update_vehicle(registration_number, updated_vehicle)
QtGui.QMessageBox.information(self, "Information", "Update process has complete")
else:
QtGui.QMessageBox.warning(self, "Warning",
"You can not leave necessary fields blank")
'''
code sample for dialog box taken from http://zetcode.com/gui/pyqt4/firstprograms/
this method logs user out from the system. basically it loads login screen again while hiding current screen
'''
def logout_button_action(self):
reply = QtGui.QMessageBox.question(self, 'Message', "Are you sure to logout?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.parent().reload()
self.hide()
'''
clears all text fields
'''
def clear_all_text_fields(self):
self.registration_text_box.clear()
self.daily_cost_text_box.clear()
self.weekend_cost_text_box.clear()
self.number_of_doors_text_box.clear()
self.number_of_passenger_text_box.clear()
self.number_of_bed_text_box.clear()
self.weekly_cost_text_box.clear()
self.model_text_box.clear()
self.make_text_box.clear()
self.fuel_consumption_text_box.clear()
'''
when user selects a vehicle from the list this method sets details into the text field using
set_information_to_text_fields method
'''
def display_vehicle_info(self):
self.clear_all_text_fields()
if not self.available_cars_list_widget.currentItem():
QtGui.QMessageBox.warning(self, "Warning", "Please choose a vehicle from the list")
else:
registration_number = self.available_cars_list_widget.currentItem().text()
self.set_information_to_text_fields(registration_number)
def set_information_to_text_fields(self, registration_number):
vehicle = self.vehicles[registration_number]
self.registration_text_box.setText(registration_number)
self.make_text_box.setText(vehicle.make)
self.model_text_box.setText(vehicle.model)
self.fuel_consumption_text_box.setText(str(vehicle.fuel_consumption))
self.daily_cost_text_box.setText(str(vehicle.cost.daily_cost))
self.weekly_cost_text_box.setText(str(vehicle.cost.weekly_cost))
self.weekend_cost_text_box.setText(str(vehicle.cost.weekend_cost))
if isinstance(vehicle, Car):
self.type_list_widget.setCurrentRow(0)
self.number_of_passenger_text_box.setText(str(vehicle.number_of_passenger))
self.number_of_doors_text_box.setText(str(vehicle.number_of_doors))
elif isinstance(vehicle, Van):
self.type_list_widget.setCurrentRow(1)
self.number_of_passenger_text_box.setText(str(vehicle.number_of_passenger))
elif isinstance(vehicle, CamperVan):
self.type_list_widget.setCurrentRow(2)
self.number_of_bed_text_box.setText(str(vehicle.number_of_bed))
| 55.504 | 141 | 0.611608 | 20,756 | 0.997213 | 0 | 0 | 0 | 0 | 0 | 0 | 2,494 | 0.119823 |
ad052c19680261e01fda678f8a14469fccd45f3c | 10,083 | py | Python | southwestalerts/southwest.py | hoopsbwc34/southwest-alerts | 39a9e13cb045cf3601b02518fc4e13753cce9ca6 | [
"MIT"
] | null | null | null | southwestalerts/southwest.py | hoopsbwc34/southwest-alerts | 39a9e13cb045cf3601b02518fc4e13753cce9ca6 | [
"MIT"
] | null | null | null | southwestalerts/southwest.py | hoopsbwc34/southwest-alerts | 39a9e13cb045cf3601b02518fc4e13753cce9ca6 | [
"MIT"
] | null | null | null | import json
import time
import requests
BASE_URL = 'https://mobile.southwest.com'
class Southwest(object):
def __init__(self, username, password, headers, cookies, account):
self._session = _SouthwestSession(username, password, headers, cookies, account)
def get_upcoming_trips(self):
# return self._session.get(
# '/api/mobile-air-booking/v1/mobile-air-booking/page/view-reservation/{record_locator}?{first_name}&last-name={last_name}'.format(
# record_locator=record_locator,
# first_name=first_name,
# last_name=last_name
return self._session.get(
'/api/mobile-misc/v1/mobile-misc/page/upcoming-trips'
)
def start_change_flight(self, record_locator, first_name, last_name):
"""Start the flight change process.
This returns the flight including itinerary."""
resp = self._session.get(
'/api/extensions/v1/mobile/reservations/record-locator/{record_locator}?first-name={first_name}&last-name={last_name}&action=CHANGE'.format(
record_locator=record_locator,
first_name=first_name,
last_name=last_name
))
return resp
def get_available_change_flights(self, record_locator, first_name, last_name, departure_date, origin_airport,
destination_airport):
"""Select a specific flight and continue the checkout process."""
url = '/api/extensions/v1/mobile/reservations/record-locator/{record_locator}/products?first-name={first_name}&last-name={last_name}&is-senior-passenger=false&trip%5B%5D%5Borigination%5D={origin_airport}&trip%5B%5D%5Bdestination%5D={destination_airport}&trip%5B%5D%5Bdeparture-date%5D={departure_date}'.format(
record_locator=record_locator,
first_name=first_name,
last_name=last_name,
origin_airport=origin_airport,
destination_airport=destination_airport,
departure_date=departure_date
)
return self._session.get(url)
def get_price_change_flight(self, record_locator, first_name, last_name, product_id):
url = '/api/reservations-api/v1/air-reservations/reservations/record-locator/{record_locator}/prices?first-name={first_name}&last-name={last_name}&product-id%5B%5D={product_id}'.format(
record_locator=record_locator,
first_name=first_name,
last_name=last_name,
product_id=product_id
)
return self._session.get(url)
def get_cancellation_details(self, record_locator, first_name, last_name):
# url = '/api/reservations-api/v1/air-reservations/reservations/record-locator/{record_locator}?first-name={first_name}&last-name={last_name}&action=CANCEL'.format(
url = '/api/mobile-air-booking/v1/mobile-air-booking/page/view-reservation/{record_locator}?first-name={first_name}&last-name={last_name}'.format(
record_locator=record_locator,
first_name=first_name,
last_name=last_name
)
temp = self._session.get(url)
if not (temp['viewReservationViewPage']['greyBoxMessage'] is None):
return None
url = '/api/mobile-air-booking/v1/mobile-air-booking/page/flights/cancel-bound/{record_locator}?passenger-search-token={token}'.format(
record_locator=record_locator,
token=temp['viewReservationViewPage']['_links']['cancelBound']['query']['passenger-search-token']
)
temp = self._session.get(url)
url = '/api/mobile-air-booking/v1/mobile-air-booking/page/flights/cancel/refund-quote/{record_locator}'.format(
record_locator=record_locator
)
payload = temp['viewForCancelBoundPage']['_links']['refundQuote']['body']
return self._session.post(url, payload)
def get_available_flights(self, departure_date, origin_airport, destination_airport, currency='Points'):
url = '/api/mobile-air-shopping/v1/mobile-air-shopping/page/flights/products?origination-airport={origin_airport}&destination-airport={destination_airport}&departure-date={departure_date}&number-adult-passengers=1¤cy=PTS'.format(
origin_airport=origin_airport,
destination_airport=destination_airport,
departure_date=departure_date
)
#uurl = '{}{}'.format(BASE_URL, url)
#resp = requests.get(uurl, headers=self._get_headers_all(self.headers))
#return resp.json()
return self._session.get(url)
def get_available_flights_dollars(self, departure_date, origin_airport, destination_airport):
url = '/api/mobile-air-shopping/v1/mobile-air-shopping/page/flights/products?origination-airport={origin_airport}&destination-airport={destination_airport}&departure-date={departure_date}&number-adult-passengers=1¤cy=USD'.format(
origin_airport=origin_airport,
destination_airport=destination_airport,
departure_date=departure_date
)
return self._session.get(url)
class _SouthwestSession():
def __init__(self, username, password, headers, cookies, account):
self._session = requests.Session()
self._login(username, password, headers, cookies, account)
def _login(self, username, password, headers, cookies, account):
# headers['content-type']='application/vnd.swacorp.com.accounts.login-v1.0+json'
# headers['user-agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'
# data = requests.post(BASE_URL + '/api/mobile-misc/v1/mobile-misc/feature/my-account', json={
# 'accountNumberOrUserName': username, 'password': password},
# headers=headers
# )
# data = requests.get(BASE_URL + '/api/mobile-misc/v1/mobile-misc/feature/my-account', headers=headers)
# data = data.json()
# self.account_number = data['accessTokenDetails']['accountNumber']
self.account_number = account['customers.userInformation.accountNumber']
self.access_token = account['access_token']
self.headers = headers
self.cookies = cookies
def get(self, path, success_codes=[200]):
f = 1
while f < 8:
print('.', end='', flush=True)
time.sleep(5)
#resp = requests.get(self._get_url(path), headers=self._get_headers_all(self.headers))
#resp = requests.get(self._get_url(path), headers=self._get_headers_all(self.headers))
resp = self._session.get(self._get_url(path), headers=self._get_headers_all(self.headers))
if resp.status_code == 200:
return self._parsed_response(resp, success_codes=success_codes)
break
f = f+1
def getb(self, path, success_codes=[200]):
time.sleep(5)
resp = self._session.get(self._get_url(path), headers=self._get_headers_brief(self.headers))
return self._parsed_response(resp, success_codes=success_codes)
def post(self, path, payload, success_codes=[200]):
#print(json.dumps(payload))
tempheaders = self._get_headers_all(self.headers)
tempheaders['content-type'] = 'application/json'
resp = self._session.post(self._get_url(path), data=json.dumps(payload),
headers=tempheaders)
return self._parsed_response(resp, success_codes=success_codes)
@staticmethod
def _get_url(path):
return '{}{}'.format(BASE_URL, path)
def _get_cookies(self, cookies):
for x in cookies:
self._session.cookies.set(x['name'], x['value'], domain=x['domain'], path=x['path'])
default = self._session.cookies
return default
def _get_headers_brief(self, headers):
default = {
'token': (self.access_token if hasattr(self, 'access_token') else None),
'x-api-key': headers['x-api-key'],
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36',
'origin': None,
'content-type': None,
'accept': None,
'x-requested-with': None,
'referer': None
}
tempheaders = {**headers, **default}
return tempheaders
def _get_headers_all(self, headers):
default = {
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36",
}
tempheaders = {**headers, **default}
# tempheaders['authority'] = 'mobile.southwest.com'
# tempheaders['sec-ch-ua'] = '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"'
# tempheaders['sec-ch-ua-mobile'] = '?0'
# tempheaders.pop('origin')
# tempheaders.pop('x-user-experience-id')
# #tempheaders.pop('user-agent')
# tempheaders.pop('content-type')
# tempheaders.pop('accept')
# tempheaders.pop('x-requested-with')
# tempheaders.pop('cookie')
# tempheaders.pop('referer')
#return default
return tempheaders
@staticmethod
def _parsed_response(response, success_codes=[200]):
if response.status_code == 429:
print(response.text)
print(
'Invalid status code received. Expected {}. Received {}. '
'This error usually indicates a rate limiting has kicked in from southwest. '
'Wait and try again later.'.format(
success_codes, response.status_code))
elif response.status_code not in success_codes:
print(response.text)
raise Exception(
'Invalid status code received. Expected {}. Received {}.'.format(success_codes, response.status_code))
#print(response.json())
return response.json()
| 47.561321 | 318 | 0.648517 | 9,988 | 0.990578 | 0 | 0 | 823 | 0.081623 | 0 | 0 | 4,369 | 0.433304 |
ad05edc84a23e5d2226eaa6c195a89e43c5ab6c0 | 17,502 | py | Python | lear-db/test_data/data_loader.py | jachurchill/lear | 1abeadfa8a68fe84eae28957fcd762d45712b931 | [
"Apache-2.0"
] | 1 | 2019-11-07T20:32:59.000Z | 2019-11-07T20:32:59.000Z | lear-db/test_data/data_loader.py | jachurchill/lear | 1abeadfa8a68fe84eae28957fcd762d45712b931 | [
"Apache-2.0"
] | null | null | null | lear-db/test_data/data_loader.py | jachurchill/lear | 1abeadfa8a68fe84eae28957fcd762d45712b931 | [
"Apache-2.0"
] | null | null | null | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads the businesses from the COLIN_API, as provided in a csv file."""
import copy
import csv
import datetime
import os
from http import HTTPStatus
import pycountry
import requests
from colin_api.models import CorpName
from dotenv import find_dotenv, load_dotenv
from flask import Flask
from legal_api import db
from legal_api.config import get_named_config
from legal_api.models import (
Address,
Alias,
Business,
Filing,
Office,
Party,
PartyRole,
Resolution,
ShareClass,
ShareSeries,
User,
)
from legal_api.models.colin_event_id import ColinEventId
from pytz import timezone
from sqlalchemy_continuum import versioning_manager
load_dotenv(find_dotenv())
FLASK_APP = Flask(__name__)
FLASK_APP.config.from_object(get_named_config('production'))
db.init_app(FLASK_APP)
COLIN_API = os.getenv('COLIN_API', None)
UPDATER_USERNAME = os.getenv('UPDATER_USERNAME')
ROWCOUNT = 0
TIMEOUT = 15
FAILED_CORPS = []
NEW_CORPS = []
LOADED_FILING_HISTORY = []
FAILED_FILING_HISTORY = []
BUSINESS_MODEL_INFO_TYPES = {
Business.LegalTypes.BCOMP.value: [
'business',
'office',
'parties',
'sharestructure',
'resolutions',
'aliases'
],
Business.LegalTypes.COOP.value: [
'business',
'office',
'parties'
]
}
def get_oracle_info(corp_num: str, legal_type: str, info_type: str) -> dict:
"""Get current business info for (business, offices, directors, etc.)."""
if info_type == 'aliases':
info_type = f'names/{CorpName.TypeCodes.TRANSLATION.value}'
url = f'{COLIN_API}/api/v1/businesses/{legal_type}/{corp_num}/{info_type}'
if info_type == 'resolutions':
url = f'{COLIN_API}/api/v1/businesses/internal/{legal_type}/{corp_num}/{info_type}'
elif info_type == 'business':
url = f'{COLIN_API}/api/v1/businesses/{legal_type}/{corp_num}'
r = requests.get(url, timeout=TIMEOUT)
if r.status_code != HTTPStatus.OK or not r.json():
FAILED_CORPS.append(corp_num)
print(f'skipping {corp_num} business {info_type} not found')
return {'failed': True}
return r.json()
def convert_to_datetime(datetime_str: str) -> datetime.datetime:
"""Convert given datetime string into a datetime obj."""
datetime_obj = datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S-00:00')
datetime_utc_tz = datetime_obj.replace(tzinfo=timezone('UTC'))
return datetime_utc_tz
def create_business(business_json: dict) -> Business:
"""Create a new business in lear via the model."""
business = Business(
identifier=business_json['business']['identifier'],
founding_date=convert_to_datetime(business_json['business']['foundingDate']),
last_ledger_timestamp=convert_to_datetime(business_json['business']['lastLedgerTimestamp']),
legal_name=business_json['business']['legalName'],
legal_type=business_json['business']['legalType'],
last_modified=datetime.datetime.utcnow()
)
business.last_ar_date = datetime.datetime.fromisoformat(business_json['business']['lastArDate']) \
if business_json['business']['lastArDate'] else None
business.last_agm_date = datetime.datetime.fromisoformat(business_json['business']['lastAgmDate']) \
if business_json['business']['lastAgmDate'] else business.last_ar_date
if business_json['business'].get('businessNumber', None):
business.tax_id = business_json['business'].get('businessNumber')
return business
def create_address(address_json: dict, address_type: Address.ADDRESS_TYPES) -> Address:
"""Create a new address in lear via the model."""
address = Address()
address.address_type = address_type
address.city = address_json['addressCity']
address.country = pycountry.countries.search_fuzzy(address_json['addressCountry'])[0].alpha_2
address.delivery_instructions = address_json['deliveryInstructions']
address.postal_code = address_json['postalCode']
address.region = address_json['addressRegion']
address.street = address_json['streetAddress']
address.street_additional = address_json['streetAddressAdditional']
return address
def create_office(business: Business, addresses: list, office_type: str):
"""Create office and link it to business."""
office = Office()
office.office_type = office_type
office.addresses = addresses
if business.offices is None:
business.offices = []
business.offices.append(office)
def create_share_class(share_class_info: dict) -> ShareClass:
"""Create a new share class and associated series."""
share_class = ShareClass(
name=share_class_info['name'],
priority=share_class_info['priority'],
max_share_flag=share_class_info['hasMaximumShares'],
max_shares=share_class_info.get('maxNumberOfShares', None),
par_value_flag=share_class_info['hasParValue'],
par_value=share_class_info.get('parValue', None),
currency=share_class_info.get('currency', None),
special_rights_flag=share_class_info['hasRightsOrRestrictions'],
)
for series in share_class_info['series']:
share_series = ShareSeries(
name=series['name'],
priority=series['priority'],
max_share_flag=series['hasMaximumShares'],
max_shares=series.get('maxNumberOfShares', None),
special_rights_flag=series['hasRightsOrRestrictions']
)
share_class.series.append(share_series)
return share_class
def add_business_offices(business: Business, offices_json: dict):
"""Add office addresses to business."""
for office_type in offices_json:
delivery_address = create_address(offices_json[office_type]['deliveryAddress'], Address.DELIVERY)
mailing_address = None
if offices_json[office_type].get('mailingAddress', None):
mailing_address = create_address(offices_json[office_type]['mailingAddress'], Address.MAILING)
else:
# clone delivery to mailing
mailing_address = copy.deepcopy(delivery_address)
mailing_address.address_type = Address.MAILING
create_office(business, [mailing_address, delivery_address], office_type)
def add_business_directors(business: Business, directors_json: dict):
"""Create directors and add them to business."""
for director in directors_json['directors']:
delivery_address = create_address(director['deliveryAddress'], Address.DELIVERY)
mailing_address = create_address(director['mailingAddress'], Address.MAILING)
# create person/organization or get them if they already exist for corp
party = PartyRole.find_party_by_name(
business_id=business.id,
first_name=director['officer'].get('firstName', '').upper(),
last_name=director['officer'].get('lastName', '').upper(),
middle_initial=director['officer'].get('middleInitial', '').upper(),
org_name=director.get('organization_name', '').upper()
)
if not party:
party = Party(
first_name=director['officer'].get('firstName', '').upper(),
last_name=director['officer'].get('lastName', '').upper(),
middle_initial=director['officer'].get('middleInitial', '').upper(),
title=director.get('title', '').upper(),
organization_name=director.get('organization_name', '').upper()
)
# add addresses to party
party.delivery_address = delivery_address
party.mailing_address = mailing_address
# create party role and link party to it
party_role = PartyRole(
role=PartyRole.RoleTypes.DIRECTOR.value,
appointment_date=director.get('appointmentDate'),
cessation_date=director.get('cessationDate'),
party=party
)
business.party_roles.append(party_role)
def add_business_shares(business: Business, shares_json: dict):
"""Create shares and add them to business."""
for share_class_info in shares_json['shareClasses']:
share_class = create_share_class(share_class_info)
business.share_classes.append(share_class)
def add_business_resolutions(business: Business, resolutions_json: dict):
"""Create resolutions and add them to business."""
for resolution_date in resolutions_json['resolutionDates']:
resolution = Resolution(
resolution_date=resolution_date,
resolution_type=Resolution.ResolutionType.SPECIAL.value
)
business.resolutions.append(resolution)
def add_business_aliases(business: Business, aliases_json: dict):
"""Create name translations and add them to business."""
for name_obj in aliases_json['names']:
alias = Alias(alias=name_obj['legalName'], type=Alias.AliasType.TRANSLATION.value)
business.aliases.append(alias)
def history_needed(business: Business):
"""Check if there is history to load for this business."""
if business.legal_type != Business.LegalTypes.COOP.value:
return False
filings = Filing.get_filings_by_status(business.id, [Filing.Status.COMPLETED.value])
for possible_historic in filings:
if possible_historic.json['filing']['header']['date'] < '2019-03-08':
return False
return True
def load_historic_filings(corp_num: str, business: Business, legal_type: str):
"""Load historic filings for a business."""
try:
# get historic filings
r = requests.get(f'{COLIN_API}/api/v1/businesses/{legal_type}/{corp_num}/filings/historic', timeout=TIMEOUT)
if r.status_code != HTTPStatus.OK or not r.json():
print(f'skipping history for {corp_num} historic filings not found')
else:
for historic_filing in r.json():
uow = versioning_manager.unit_of_work(db.session)
transaction = uow.create_transaction(db.session)
filing = Filing()
filing_date = historic_filing['filing']['header']['date']
filing.filing_date = datetime.datetime.strptime(filing_date, '%Y-%m-%d')
filing.business_id = business.id
filing.filing_json = historic_filing
for colin_id in filing.filing_json['filing']['header']['colinIds']:
colin_event_id = ColinEventId()
colin_event_id.colin_event_id = colin_id
filing.colin_event_ids.append(colin_event_id)
filing.transaction_id = transaction.id
filing._filing_type = historic_filing['filing']['header']['name']
filing.paper_only = True
filing.effective_date = datetime.datetime.strptime(
historic_filing['filing']['header']['effectiveDate'], '%Y-%m-%d')
updater_user = User.find_by_username(UPDATER_USERNAME)
filing.submitter_id = updater_user.id
filing.source = Filing.Source.COLIN.value
db.session.add(filing)
# only commit after all historic filings were added successfully
db.session.commit()
LOADED_FILING_HISTORY.append(corp_num)
except requests.exceptions.Timeout:
print('rolling back partial changes...')
db.session.rollback()
FAILED_FILING_HISTORY.append(corp_num)
print('colin_api request timed out getting historic filings.')
except Exception as err:
print('rolling back partial changes...')
db.session.rollback()
FAILED_FILING_HISTORY.append(corp_num)
raise err
def load_corps(csv_filepath: str = 'corp_nums/corps_to_load.csv'):
"""Load corps in given csv file from oracle into postgres."""
global ROWCOUNT
with open(csv_filepath, 'r') as csvfile:
reader = csv.DictReader(csvfile)
with FLASK_APP.app_context():
for row in reader:
corp_num = row['CORP_NUM']
print('loading: ', corp_num)
added = False
ROWCOUNT += 1
try:
legal_type = Business.LegalTypes.COOP.value
if corp_num[:2] != Business.LegalTypes.COOP.value:
legal_type = Business.LegalTypes.BCOMP.value
corp_num = 'BC' + corp_num[-7:]
business = Business.find_by_identifier(corp_num)
if business:
added = True
print('-> business info already exists -- skipping corp load')
else:
try:
# get current company info
business_current_info = {}
for info_type in BUSINESS_MODEL_INFO_TYPES[legal_type]:
business_current_info[info_type] = get_oracle_info(
corp_num=corp_num,
legal_type=legal_type,
info_type=info_type
)
if business_current_info[info_type].get('failed', False):
raise Exception(f'could not load {info_type}')
except requests.exceptions.Timeout:
FAILED_CORPS.append(corp_num)
print('colin_api request timed out getting corporation details.')
continue
except Exception as err:
print(f'exception: {err}')
print(f'skipping load for {corp_num}, exception occurred getting company info')
continue
uow = versioning_manager.unit_of_work(db.session)
transaction = uow.create_transaction(db.session)
try:
# add BC prefix to non coop identifiers
if legal_type != Business.LegalTypes.COOP.value:
business_current_info['business']['business']['identifier'] = 'BC' + \
business_current_info['business']['business']['identifier']
# add company to postgres db
business = create_business(business_current_info['business'])
add_business_offices(business, business_current_info['office'])
add_business_directors(business, business_current_info['parties'])
if legal_type == Business.LegalTypes.BCOMP.value:
add_business_shares(business, business_current_info['sharestructure'])
add_business_resolutions(business, business_current_info['resolutions'])
add_business_aliases(business, business_current_info['aliases'])
filing = Filing()
filing.filing_json = {
'filing': {
'header': {
'name': 'lear_epoch'
},
'business': business.json()
}
}
filing._filing_type = 'lear_epoch'
filing.source = Filing.Source.COLIN.value
filing.transaction_id = transaction.id
business.filings.append(filing)
business.save()
added = True
NEW_CORPS.append(corp_num)
except Exception as err:
print(err)
print(f'skipping {corp_num} missing info')
FAILED_CORPS.append(corp_num)
if added and history_needed(business=business):
load_historic_filings(corp_num=corp_num, business=business, legal_type=legal_type)
else:
print('-> historic filings not needed - skipping history load')
except Exception as err:
print(err)
exit(-1)
if __name__ == '__main__':
load_corps(csv_filepath='corp_nums/corps_to_load.csv')
print(f'processed: {ROWCOUNT} rows')
print(f'Successfully loaded {len(NEW_CORPS)}')
print(f'Failed to load {len(FAILED_CORPS)}')
print(f'Histories loaded for {len(LOADED_FILING_HISTORY)}')
print(f'Histories failed for {len(FAILED_FILING_HISTORY)}')
| 43.214815 | 116 | 0.620958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,401 | 0.251443 |
ad07b147f4f90acb57865d0385c6621563004a6f | 2,115 | py | Python | compiler/extensions/python/runtime/src/zserio/bitfield.py | PeachOS/zserio | ea01f6906c125a6baab7e8ed865eeb08cd46c37c | [
"BSD-3-Clause"
] | 2 | 2019-02-06T17:50:24.000Z | 2019-11-20T16:51:34.000Z | compiler/extensions/python/runtime/src/zserio/bitfield.py | PeachOS/zserio | ea01f6906c125a6baab7e8ed865eeb08cd46c37c | [
"BSD-3-Clause"
] | 1 | 2019-11-25T16:25:51.000Z | 2019-11-25T18:09:39.000Z | compiler/extensions/python/runtime/src/zserio/bitfield.py | PeachOS/zserio | ea01f6906c125a6baab7e8ed865eeb08cd46c37c | [
"BSD-3-Clause"
] | null | null | null | """
The module provides help methods for bit fields calculation.
"""
from zserio.exception import PythonRuntimeException
def getBitFieldLowerBound(length):
"""
Gets the lower bound of a unsigned bitfield type with given length.
:param length: Length of the unsigned bitfield in bits.
:returns: The lowest value the unsigned bitfield can hold.
:raises PythonRuntimeException: If unsigned bitfield with wrong length has been specified.
"""
_checkBitFieldLength(length, MAX_UNSIGNED_BITFIELD_BITS)
return 0
def getBitFieldUpperBound(length):
"""
Gets the upper bound of a unsigned bitfield type with given length.
:param length: Length of the unsigned bitfield in bits.
:returns: The largest value the unsigned bitfield can hold.
:raises PythonRuntimeException: If unsigned bitfield with wrong length has been specified.
"""
_checkBitFieldLength(length, MAX_UNSIGNED_BITFIELD_BITS)
return (1 << length) - 1
def getSignedBitFieldLowerBound(length):
"""
Gets the lower bound of a signed bitfield type with given length.
:param length: Length of the signed bitfield in bits.
:returns: The lowest value the signed bitfield can hold.
:raises PythonRuntimeException: If signed bitfield with wrong length has been specified.
"""
_checkBitFieldLength(length, MAX_SIGNED_BITFIELD_BITS)
return -(1 << (length - 1))
def getSignedBitFieldUpperBound(length):
"""
Gets the upper bound of a signed bitfield type with given length.
:param length: Length of the signed bitfield in bits.
:returns: The largest value the signed bitfield can hold.
:raises PythonRuntimeException: If signed bitfield with wrong length has been specified.
"""
_checkBitFieldLength(length, MAX_SIGNED_BITFIELD_BITS)
return (1 << (length - 1)) - 1
def _checkBitFieldLength(length, maxBitFieldLength):
if length <= 0 or length > maxBitFieldLength:
raise PythonRuntimeException("Asking for bound of bitfield with invalid length %d!" % length)
MAX_SIGNED_BITFIELD_BITS = 64
MAX_UNSIGNED_BITFIELD_BITS = 63
| 34.112903 | 101 | 0.74279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,316 | 0.622222 |
ad08cf47ba42bae95ff25fda628ffbf2136e4ecb | 59 | py | Python | pacote-download/Mundo1/ex002.py | ariadne-pereira/cev-python | b2c6bbebb5106bb0152c9127c04c83f23e9d7757 | [
"MIT"
] | null | null | null | pacote-download/Mundo1/ex002.py | ariadne-pereira/cev-python | b2c6bbebb5106bb0152c9127c04c83f23e9d7757 | [
"MIT"
] | null | null | null | pacote-download/Mundo1/ex002.py | ariadne-pereira/cev-python | b2c6bbebb5106bb0152c9127c04c83f23e9d7757 | [
"MIT"
] | null | null | null | nome = input('Qual o seu nome?')
print('Bem vindo ' , nome) | 29.5 | 32 | 0.644068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.508475 |
ad099a3f7a3f39b1c81dfbd2b6b67a25e14da906 | 25 | py | Python | eqparse/spaceloads/__init__.py | TfedUD/eqparse | ab1fba5b4995bed3f5fa2f77cdf505bb613c7e71 | [
"MIT"
] | 3 | 2021-01-26T18:48:39.000Z | 2021-07-14T23:22:09.000Z | eqparse/spaceloads/__init__.py | TfedUD/eqparse | ab1fba5b4995bed3f5fa2f77cdf505bb613c7e71 | [
"MIT"
] | null | null | null | eqparse/spaceloads/__init__.py | TfedUD/eqparse | ab1fba5b4995bed3f5fa2f77cdf505bb613c7e71 | [
"MIT"
] | 3 | 2020-11-18T20:22:00.000Z | 2021-07-14T18:55:31.000Z | from .spaceloads import * | 25 | 25 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ad0a91316f80ba2f9a71f515c6569203c3867373 | 11,388 | py | Python | demos/HFL/example/pytorch/hugging_face/local_bert_text_classifier/dataset.py | monadyn/fedlearn-algo | c4459d421139b0bb765527d636fff123bf17bda4 | [
"Apache-2.0"
] | 86 | 2021-07-20T01:54:21.000Z | 2021-10-06T04:02:40.000Z | demos/HFL/example/pytorch/hugging_face/local_bert_text_classifier/dataset.py | fedlearnAI/fedlearnalgo | 63d9ceb64d331ff2b5103ae49e54229cad7e2095 | [
"Apache-2.0"
] | 5 | 2021-07-23T21:22:16.000Z | 2021-09-12T15:48:35.000Z | demos/HFL/example/pytorch/hugging_face/local_bert_text_classifier/dataset.py | fedlearnAI/fedlearnalgo | 63d9ceb64d331ff2b5103ae49e54229cad7e2095 | [
"Apache-2.0"
] | 28 | 2021-07-20T07:15:33.000Z | 2021-08-22T20:04:57.000Z | # Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
is_visual = True
is_to_csv = True #False
def visulize_distribution(df):
if 1:
print(df.target.value_counts())
#df.target.value_counts()
else:
import matplotlib.pyplot as plt
print('++')
df['target'].plot.hist(width=0.1, )
#plt.hist(column='target')
#plt.hist(out['target'])
print('--')
plt.show()
def read_20newsgroups(data_file=None, test_file=None, dataset=None, test_size=0.2):
if test_file is not None:
testset = pd.read_csv(test_file)
testset = testset.dropna()
if is_visual:
visulize_distribution(testset)
valid_texts = list(testset['text'])
valid_labels = np.array(testset['target'])
classifier_types = list(testset['title'].unique())
dataset = pd.read_csv(data_file)
dataset = dataset.dropna()
train_texts = list(dataset['text'])
train_labels = np.array(dataset['target'])
classifier_types = list(dataset['title'].unique())
if is_visual:
visulize_distribution(dataset)
return (train_texts, valid_texts, train_labels, valid_labels), classifier_types
else:
if data_file is not None:
print(data_file)
dataset = pd.read_csv(data_file)
#https://stackoverflow.com/questions/63517293/valueerror-textencodeinput-must-be-uniontextinputsequence-tupleinputsequence
dataset = dataset.dropna()
#print(dataset.shape)
if dataset is not None:
#print(dataset.shape)
#print(dataset.columns)
documents = list(dataset['text'])
labels = np.array(dataset['target'])
classifier_types = list(dataset['title'].unique())
#print(type(documents), len(documents), documents[0])
#print(type(labels), len(labels), labels[0])
#print(classifier_types, len(classifier_types))
else:
# download & load 20newsgroups dataset from sklearn's repos
dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
print(type(dataset))
documents = dataset.data
labels = dataset.target
classifier_types = dataset.target_names
#print(type(labels), len(labels), labels[0])
#print(type(dataset.target_names), dataset.target_names, len(dataset.target_names))
# split into training & testing a return data as well as label names
print(type(documents), len(documents))
print('>>', documents[0])
print('>>', documents[1])
return train_test_split(documents, labels, test_size=test_size), classifier_types
def twenty_newsgroup_to_csv(subset=None):
#newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
#newsgroups = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
#newsgroups = fetch_20newsgroups(subset="all", remove=("headers", "footers", "quotes"))
#newsgroups = fetch_20newsgroups(subset="train", remove=("headers", "footers", "quotes"))
#newsgroups = fetch_20newsgroups(subset="test", remove=("headers", "footers", "quotes"))
if subset is not None:
newsgroups = fetch_20newsgroups(subset=subset, remove=("headers", "footers", "quotes"))
df = pd.DataFrame([newsgroups.data, newsgroups.target.tolist()]).T
df.columns = ['text', 'target']
targets = pd.DataFrame( newsgroups.target_names)
targets.columns=['title']
out = pd.merge(df, targets, left_on='target', right_index=True)
print(out.shape, out.columns)
#out.describe(include=['target'])
#out.to_csv('20_newsgroup.csv')
#out.groupby('target').count().plot.bar()
if is_visual:
visulize_distribution(out)
return out
def test_20newsgroups(dataset):
if is_to_csv:
dataset.to_csv('test_20newsgroups.csv', index=False)
def iid_20newsgroups(dataset, num_users):
"""
Sample I.I.D. client data from 20newsgroups dataset
:param dataset:
:param num_users:
:return: dict of users' dataset
"""
num_items = int(len(dataset)/num_users)
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
print(dict_users, num_items)
for i in range(num_users):
chosen_idxs = np.random.choice(all_idxs, num_items, replace=False)
dict_users[i] = dataset.iloc[chosen_idxs]
all_idxs = list(set(all_idxs) - set(chosen_idxs))
#print({x for i, x in enumerate(dict_users[i]) if i < 5})
if is_visual:
print(dict_users[i].head(), dict_users[i].shape)
visulize_distribution(dict_users[i])
if is_to_csv:
dict_users[i].to_csv('iid_20newsgroups_'+str(i)+'.csv', index=False)
#print(dict_users.keys())
return dict_users
def noniid_label_20newsgroups(dataset, num_users, alpha=None):
"""
Sample non-I.I.D client data from 20newsgroups dataset: label imbalance, quantity uniform
:param dataset:
:param num_users:
:alpha: label ratio, total number = 20lables
:return:
"""
if is_visual:
visulize_distribution(dataset)
#dict_users, all_idxs = {}, [i for i in range(len(dataset))]
dict_users = {i: np.array([]) for i in range(num_users)}
labels = np.array(dataset['target'])
num_samples = len(dataset)
num_labels = 20
num_shards = int(len(dataset)/num_labels)
idxs = np.arange(num_samples)
print(dict_users)
print(labels, len(labels))
print(idxs, len(idxs))
# sort labels
idxs_labels = np.vstack((idxs, labels))
#print(idxs_labels, len(idxs_labels))
#idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
#print(idxs_labels)
#idxs = idxs_labels[0, :]
#print(idxs, len(idxs))
safe_idxs = []
seed_idxs = {}
for i in range(len(dataset)): #only two users
key = idxs_labels[1][i]
if key in seed_idxs:
if seed_idxs[key] < 3:
safe_idxs.append(idxs_labels[0][i])
seed_idxs[key] += 1
else:
safe_idxs.append(idxs_labels[0][i])
seed_idxs[key] = 1
#seed_idxs[idxs_labels[1][i]] = idxs_labels[0][i]
print('seed_idxs', seed_idxs)
chosen_idxs = {i:[] for i in range(num_users)}
#for i in range(18000,len(idxs)):
#for i in range(100):
for i in range(len(dataset)): #only two users
user_id = idxs_labels[1][i] % 2
if user_id == 0:
#print(i, idxs_labels[0][i], idxs_labels[1][i])
chosen_idxs[user_id].append(idxs_labels[0][i])
else:
chosen_idxs[user_id].append(idxs_labels[0][i])
for i in range(num_users):
dict_users[i] = dataset.iloc[chosen_idxs[i] + safe_idxs]
#all_idxs = list(set(all_idxs) - set(chosen_idxs))
#print({x for i, x in enumerate(dict_users[i]) if i < 5})
if is_visual:
print(dict_users[i].head(), dict_users[i].shape)
visulize_distribution(dict_users[i])
if is_to_csv:
dict_users[i].to_csv('noniid_label_20newsgroups_alpha'+ str(alpha)+ '_'+str(i)+'.csv', index=False)
return dict_users
def noniid_quantity_20newsgroups(dataset, num_users=2, beta=None):
"""
Sample non-I.I.D client data from 20newsgroups dataset: quantity imbalance, label uniform
:param dataset:
:param num_users:
:return:
"""
if is_visual:
visulize_distribution(dataset)
#dict_users, all_idxs = {}, [i for i in range(len(dataset))]
num_items = {} #int(len(dataset)/num_users)
for i in range(len(beta)):
num_items[i] = int(len(dataset) * beta[i])
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
print(dict_users, num_items)
for i in range(num_users):
chosen_idxs = np.random.choice(all_idxs, num_items[i], replace=False)
dict_users[i] = dataset.iloc[chosen_idxs]
all_idxs = list(set(all_idxs) - set(chosen_idxs))
#print({x for i, x in enumerate(dict_users[i]) if i < 5})
if is_visual:
print(dict_users[i].head(), dict_users[i].shape)
visulize_distribution(dict_users[i])
if is_to_csv:
dict_users[i].to_csv('noniid_quantity_20newsgroups_beta'+ str(beta[i])+ '_'+str(i)+'.csv', index=False)
#print(dict_users.keys())
return dict_users
if __name__ == '__main__':
if 0:
(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups()
print(type(train_texts), len(train_texts))
print(type(train_labels), len(train_labels))
if 0:
start=0
valid_sample_n = 2
sample_n = valid_sample_n*5
train_texts = train_texts[start:sample_n]
train_labels = train_labels[start:sample_n]
valid_texts = valid_texts[start:valid_sample_n]
valid_labels = valid_labels[start:valid_sample_n]
print(len(train_texts), len(train_labels))
print(len(valid_texts), len(valid_labels))
#print(valid_texts, valid_labels)
print(target_names)
if 0: #generate iid-dataset
dataset = twenty_newsgroup_to_csv()
#print(dataset.head(10))
#dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
dict_user = iid_20newsgroups(dataset, 2)
read_20newsgroups(dict_user[0])
read_20newsgroups()
if 0: #load dataset via read_20newsgroups
#(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups(data_file=None)
#(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups(data_file='iid_20newsgroups_1.csv')
(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups(data_file='noniid_label_20newsgroups_alpha0.5_0.csv', test_file='test_20newsgroups.csv')
print(type(train_texts), len(train_texts))
print(type(train_labels), len(train_labels))
print(train_labels[:2])
if 1:
dataset = twenty_newsgroup_to_csv(subset='train')
#print(dataset.head(10))
#dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
#dict_user = noniid_20newsgroups(dataset, 2)
noniid_label_20newsgroups(dataset, 2, alpha=0.5)
num_users = 2
#noniid_quantity_20newsgroups(dataset, beta=[0.1, 0.9])
if 0:
dataset = twenty_newsgroup_to_csv(subset='test')
test_20newsgroups(dataset)
| 40.671429 | 185 | 0.648402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,163 | 0.36556 |
ad0e9389830044b275eaeda53fb94fe0bd3d6df6 | 55 | py | Python | egtaonline/__init__.py | egtaonline/egtaonline-api | a450aad43f5828ab1bc74def7237018b2de9647e | [
"Apache-2.0"
] | null | null | null | egtaonline/__init__.py | egtaonline/egtaonline-api | a450aad43f5828ab1bc74def7237018b2de9647e | [
"Apache-2.0"
] | null | null | null | egtaonline/__init__.py | egtaonline/egtaonline-api | a450aad43f5828ab1bc74def7237018b2de9647e | [
"Apache-2.0"
] | 1 | 2019-03-09T11:45:55.000Z | 2019-03-09T11:45:55.000Z | """Module for egta online api"""
__version__ = '0.8.7'
| 18.333333 | 32 | 0.654545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.709091 |
ad11d47033d4af835763923edb8fa478546cbbc5 | 1,897 | py | Python | views.py | wbellman/Python-Fate-Example | a764d3d386b60d4ecfbb59837321e6c30f1c4249 | [
"MIT"
] | null | null | null | views.py | wbellman/Python-Fate-Example | a764d3d386b60d4ecfbb59837321e6c30f1c4249 | [
"MIT"
] | null | null | null | views.py | wbellman/Python-Fate-Example | a764d3d386b60d4ecfbb59837321e6c30f1c4249 | [
"MIT"
] | null | null | null | import time
import settings
from printLibs import printl, printc
from inputLibs import get_number
def print_character(character):
print()
printc(character["realname"],"-",40)
print()
print( character["name"] + " (" + character["role"] + ") -- " + character["pole"].title() + ":" + str(character["number"]) )
print()
print( "Goal: " + character["goal"])
print()
if len(character["notes"]) > 0:
print("Notes:")
n = 1
for note in character["notes"]:
print(" " + str(n) + ". " + note )
print()
printc("","-",40)
print()
print()
def print_characters(characters,short = True):
if len(characters) < 1:
print("No characters defined.")
i = 1
for character in characters:
if short:
print( str(i) + ". " + character["name"].ljust(20) + " (" + character["realname"] + ")")
i = i + 1
else:
print_character(character)
def do_character_list(characters):
printc("Characters", "-")
print_characters(characters)
print()
def do_view_characters(characters):
printc("Characters", "-")
print_characters(characters,False)
input("Enter to continue: ")
def do_select_character(characters):
print()
printl("0. Abort")
do_character_list(characters)
number = get_number("Character #")
if number == 0:
return None
number = number - 1
if number >= len(characters):
print("Invalid character!")
return do_select_character(characters)
else:
return characters[number]
def do_set_multiplier():
print()
return get_number("Multiplier")
def do_set_pole():
print()
print("0. Abort")
print("1. " + settings.high_pole)
print("2. " + settings.low_pole)
number = get_number("Pole")
if number == 0:
return None
elif number == 1:
return settings.high_pole
elif number == 2:
return settings.low_pole
else:
print("Invalid pole.")
return do_set_pole()
| 22.05814 | 126 | 0.634686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.158144 |
ad12130ce9f4ea80edd96982e4874aa4efd37547 | 4,473 | py | Python | recipes/Python/578871_Simple_Tkinter_strip_chart/recipe-578871.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/578871_Simple_Tkinter_strip_chart/recipe-578871.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/578871_Simple_Tkinter_strip_chart/recipe-578871.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # (c) MIT License Copyright 2014 Ronald H Longo
# Please reuse, modify or distribute freely.
from collections import OrderedDict
import tkinter as tk
class StripChart( tk.Frame ):
def __init__( self, parent, scale, historySize, trackColors, *args, **opts ):
# Initialize
super().__init__( parent, *args, **opts )
self._trackHist = OrderedDict() # Map: TrackName -> list of canvas objID
self._trackColor = trackColors # Map: Track Name -> color
self._chartHeight = scale + 1
self._chartLength = historySize * 2 # Stretch for readability
self._canvas = tk.Canvas( self, height=self._chartHeight + 17,
width=self._chartLength, background='black' )
self._canvas.grid( sticky=tk.N+tk.S+tk.E+tk.W )
# Draw horizontal to divide plot from tick labels
x, y = 0, self._chartHeight + 2
x2, y2 = self._chartLength, y
self._baseLine = self._canvas.create_line( x, y, x2, y2, fill='white' )
# Init track def and histories lists
self._trackColor.update( { 'tick':'white', 'tickline':'white',
'ticklabel':'white' } )
for trackName in self._trackColor.keys():
self._trackHist[ trackName ] = [ None for x in range(historySize) ]
def plotValues( self, **vals ):
for trackName, trackHistory in self._trackHist.items():
# Scroll left-wards
self._canvas.delete( trackHistory.pop(0) )
# Remove left-most canvas objs
self._canvas.move( trackName, -2, 0 )
# Scroll canvas objs 2 pixels left
# Plot the new values
try:
val = vals[ trackName ]
x = self._chartLength
y = self._chartHeight - val
color = self._trackColor[ trackName ]
objId = self._canvas.create_line( x, y, x+1, y, fill=color,
width=3, tags=trackName )
trackHistory.append( objId )
except:
trackHistory.append( None )
def drawTick( self, text=None, **lineOpts ):
# draw vertical tick line
x = self._chartLength
y = 1
x2 = x
y2 = self._chartHeight
color = self._trackColor[ 'tickline' ]
objId = self._canvas.create_line( x, y, x2, y2, fill=color,
tags='tick', **lineOpts )
self._trackHist[ 'tickline' ].append( objId )
# draw tick label
if text is not None:
x = self._chartLength
y = self._chartHeight + 10
color = self._trackColor[ 'ticklabel' ]
objId = self._canvas.create_text( x, y, text=text,
fill=color, tags='tick' )
self._trackHist[ 'ticklabel' ].append( objId )
def configTrackColors( self, **trackColors ):
# Change plotted data color
for trackName, colorName in trackColors.items( ):
self._canvas.itemconfigure( trackName, fill=colorName )
# Change settings so future data has the new color
self._trackColor.update( trackColors )
if __name__ == '__main__':
top = tk.Tk( )
graph = StripChart( top, 100, 300, { 'A':'blue', 'B':'green', 'C':'red' } )
graph.grid( )
val_A = 0
val_B = 0
val_C = 0
delta = [ -3, -2, -1, 0, 1, 2, 3 ] # randomly vary the values by one of these
tickCount = 0
def nextVal( current, lowerBound, upperBound ):
from random import choice
current += choice( delta )
if current < lowerBound:
return lowerBound
elif current > upperBound:
return upperBound
else:
return current
def plotNextVals( ):
global val_A, val_B, val_C, tickCount
if tickCount % 50 == 0:
graph.drawTick( text=str(tickCount), dash=(1,4) )
tickCount += 1
val_A = nextVal( val_A, 0, 99 )
val_B = nextVal( val_B, 0, 99 )
val_C = nextVal( val_C, 0, 99 )
graph.plotValues( A=val_A, B=val_B, C=val_C )
#changeColor = { 800: 'black',
#1200: 'yellow',
#1600: 'orange',
#2000: 'white',
#2400: 'brown',
#2800: 'blue' }
#if tickCount in changeColor:
#graph.configTrackColors( A=changeColor[tickCount] )
top.after( 1, plotNextVals )
top.after( 1, plotNextVals )
top.mainloop( )
| 33.886364 | 81 | 0.56338 | 3,030 | 0.677398 | 0 | 0 | 0 | 0 | 0 | 0 | 886 | 0.198077 |
ad142cceab8899fa59076896998cb49029523f11 | 2,793 | py | Python | sendmail.py | jvadair/simpleforum | d1e602841e64130c0059c7390ac2fbe7950feb89 | [
"MIT"
] | null | null | null | sendmail.py | jvadair/simpleforum | d1e602841e64130c0059c7390ac2fbe7950feb89 | [
"MIT"
] | null | null | null | sendmail.py | jvadair/simpleforum | d1e602841e64130c0059c7390ac2fbe7950feb89 | [
"MIT"
] | null | null | null | import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
SMTP_URL = "example.com"
def send_verification_code(recipient, recipient_name, verification_code):
sender_email = "simpleforum@jvadair.com"
with open('.smtp_passwd') as password_file:
password = password_file.read()
message = MIMEMultipart("alternative")
message["Subject"] = "Email Verification"
message["From"] = sender_email
message["To"] = recipient
# Create the plain-text and HTML version of your message
with open('verification_template.html', 'r') as templateobj:
html = templateobj.read()
html = html.replace('$$name', recipient_name)
html = html.replace('$$verification_code', verification_code)
# Turn these into plain/html MIMEText objects
# part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
# message.attach(part1)
message.attach(part2)
# Create secure connection with server and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL(SMTP_URL, 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, recipient, message.as_string()
)
def send_thread_notif(recipient, recipient_name, forum, author, content):
sender_email = "simpleforum@jvadair.com"
with open('.smtp_passwd') as password_file:
password = password_file.read()
message = MIMEMultipart("alternative")
message["Subject"] = f"New message on {forum}"
message["From"] = sender_email
message["To"] = recipient
# Create the plain-text and HTML version of your message
with open('forum_notif_template.html', 'r') as templateobj:
html = templateobj.read()
html = html.replace('$$name', recipient_name)
html = html.replace('$$forum', forum)
html = html.replace('$$author', author)
html = html.replace('$$content', content)
# Turn these into plain/html MIMEText objects
# part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
# message.attach(part1)
message.attach(part2)
# Create secure connection with server and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL(SMTP_URL, 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, recipient, message.as_string()
)
| 36.75 | 74 | 0.668815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 992 | 0.355174 |
ad14b5c195d0bd8131b0b4d5f2f280f2ab66ece5 | 3,474 | py | Python | research/compression/entropy_coder/lib/block_util.py | Dzinushi/models_1_4 | d7e72793a68c1667d403b1542c205d1cd9b1d17c | [
"Apache-2.0"
] | null | null | null | research/compression/entropy_coder/lib/block_util.py | Dzinushi/models_1_4 | d7e72793a68c1667d403b1542c205d1cd9b1d17c | [
"Apache-2.0"
] | null | null | null | research/compression/entropy_coder/lib/block_util.py | Dzinushi/models_1_4 | d7e72793a68c1667d403b1542c205d1cd9b1d17c | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for blocks."""
from __future__ import division
from __future__ import unicode_literals
import math
import numpy as np
import six
import tensorflow as tf
class RsqrtInitializer(object):
"""Gaussian initializer with standard deviation 1/sqrt(n).
Note that tf.truncated_normal is used internally. Therefore any random sample
outside two-sigma will be discarded and re-sampled.
"""
def __init__(self, dims=(0,), **kwargs):
"""Creates an initializer.
Args:
dims: Dimension(s) index to compute standard deviation:
1.0 / sqrt(product(shape[dims]))
**kwargs: Extra keyword arguments to pass to tf.truncated_normal.
"""
if isinstance(dims, six.integer_types):
self._dims = [dims]
else:
self._dims = dims
self._kwargs = kwargs
def __call__(self, shape, dtype):
stddev = 1.0 / np.sqrt(np.prod([shape[x] for x in self._dims]))
return tf.truncated_normal(
shape=shape, dtype=dtype, stddev=stddev, **self._kwargs)
class RectifierInitializer(object):
"""Gaussian initializer with standard deviation sqrt(2/fan_in).
Note that tf.random_normal is used internally to ensure the expected weight
distribution. This is intended to be used with ReLU activations, specially
in ResNets.
For details please refer to:
Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet
Classification
"""
def __init__(self, dims=(0,), scale=2.0, **kwargs):
"""Creates an initializer.
Args:
dims: Dimension(s) index to compute standard deviation:
sqrt(scale / product(shape[dims]))
scale: A constant scaling for the initialization used as
sqrt(scale / product(shape[dims])).
**kwargs: Extra keyword arguments to pass to tf.truncated_normal.
"""
if isinstance(dims, six.integer_types):
self._dims = [dims]
else:
self._dims = dims
self._kwargs = kwargs
self._scale = scale
def __call__(self, shape, dtype):
stddev = np.sqrt(self._scale / np.prod([shape[x] for x in self._dims]))
return tf.random_normal(
shape=shape, dtype=dtype, stddev=stddev, **self._kwargs)
class GaussianInitializer(object):
"""Gaussian initializer with a given standard deviation.
Note that tf.truncated_normal is used internally. Therefore any random sample
outside two-sigma will be discarded and re-sampled.
"""
def __init__(self, stddev=1.0):
self._stddev = stddev
def __call__(self, shape, dtype):
return tf.truncated_normal(shape=shape, dtype=dtype, stddev=self._stddev)
| 34.058824 | 81 | 0.658031 | 2,599 | 0.748129 | 0 | 0 | 0 | 0 | 0 | 0 | 2,111 | 0.607657 |
ad159cd804674520f14ca7bf3672f76b7911e56a | 10,068 | py | Python | core/models.py | ditttu/gymkhana-Nominations | 2a0e993c1b8362c456a9369b0b549d1c809a21df | [
"MIT"
] | 3 | 2018-02-27T13:48:28.000Z | 2018-03-03T21:57:50.000Z | core/models.py | ditttu/gymkhana-Nominations | 2a0e993c1b8362c456a9369b0b549d1c809a21df | [
"MIT"
] | 6 | 2020-02-12T00:07:46.000Z | 2022-03-11T23:25:59.000Z | core/models.py | ditttu/gymkhana-Nominations | 2a0e993c1b8362c456a9369b0b549d1c809a21df | [
"MIT"
] | 1 | 2019-03-26T20:19:57.000Z | 2019-03-26T20:19:57.000Z | from django.db import models
from django.contrib.auth.models import User
from .choices import *
from datetime import datetime,date
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.utils import timezone
def default_end_date():
now = datetime.now()
end = now.replace(day=31, month=3, year=now.year)
if end > now:
return end
else:
next_year = now.year + 1
return end.replace(year=next_year)
def session_end_date(session):
now = date.today()
next_year = session + 1
return now.replace(day=31, month=3, year=next_year)
class Session(models.Model):
start_year = models.IntegerField(unique=True)
def __str__(self):
return str(self.start_year)
class Club(models.Model):
club_name = models.CharField(max_length=100, null=True)
club_parent = models.ForeignKey('self', null=True, blank=True)
def __str__(self):
return self.club_name
class ClubCreate(models.Model):
club_name = models.CharField(max_length=100, null=True)
club_parent = models.ForeignKey(Club, null=True, blank=True)
take_approval = models.ForeignKey('Post', related_name="give_club_approval", on_delete=models.SET_NULL, null=True,blank=True)
requested_by = models.ForeignKey('Post', related_name="club_request", on_delete=models.SET_NULL, null=True,blank=True)
def __str__(self):
return self.club_name
class Post(models.Model):
post_name = models.CharField(max_length=500, null=True)
club = models.ForeignKey(Club, on_delete=models.CASCADE, null=True, blank=True)
tags = models.ManyToManyField(Club, related_name='club_posts', symmetrical=False, blank=True)
parent = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True)
elder_brother = models.ForeignKey('self', related_name="little_bro", on_delete=models.CASCADE, null=True,blank=True)
post_holders = models.ManyToManyField(User, related_name='posts', blank=True)
post_approvals = models.ManyToManyField('self', related_name='approvals', symmetrical=False, blank=True)
take_approval = models.ForeignKey('self', related_name="give_approval", on_delete=models.SET_NULL, null=True,blank=True)
status = models.CharField(max_length=50, choices=POST_STATUS, default='Post created')
perms = models.CharField(max_length=200, choices=POST_PERMS, default='normal')
def __str__(self):
return self.post_name
def remove_holders(self):
for holder in self.post_holders.all():
history = PostHistory.objects.get(post=self, user=holder)
if datetime.now() > history.end:
self.post_holders.remove(holder)
return self.post_holders
class PostHistory(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
start = models.DateField(auto_now_add=True)
end = models.DateField(null=True, blank=True, editable=True)
post_session = models.ForeignKey(Session, on_delete=models.CASCADE, null=True)
class Nomination(models.Model):
name = models.CharField(max_length=200)
description = models.TextField(max_length=20000, null=True, blank=True)
nomi_post = models.ForeignKey(Post, null=True)
nomi_form = models.OneToOneField('forms.Questionnaire', null=True)
nomi_session = models.IntegerField(null=True)
status = models.CharField(max_length=50, choices=STATUS, default='Nomination created')
result_approvals = models.ManyToManyField(Post, related_name='result_approvals', symmetrical=False, blank=True)
nomi_approvals = models.ManyToManyField(Post, related_name='nomi_approvals', symmetrical=False, blank=True)
group_status = models.CharField(max_length=50, choices=GROUP_STATUS, default='normal')
tags = models.ManyToManyField(Club, related_name='club_nomi', symmetrical=False, blank=True)
opening_date = models.DateField(null=True, blank=True)
re_opening_date = models.DateField(null=True, blank=True, editable=True)
deadline = models.DateField(null=True, blank=True, editable=True)
interview_panel = models.ManyToManyField(User, related_name='panel', symmetrical=False, blank=True)
def __str__(self):
return self.name
def append(self):
selected = NominationInstance.objects.filter(submission_status = True).filter(nomination=self, status='Accepted')
st_year = self.nomi_session
session = Session.objects.filter(start_year=st_year).first()
if session is None:
session = Session.objects.create(start_year = st_year)
self.status = 'Work done'
self.save()
for each in selected:
PostHistory.objects.create(post=self.nomi_post, user=each.user, end=session_end_date(session.start_year),
post_session=session)
self.nomi_post.post_holders.add(each.user)
return self.nomi_post.post_holders
def replace(self):
for holder in self.nomi_post.post_holders.all():
history = PostHistory.objects.get(post=self.nomi_post, user=holder)
history.end = default_end_date()
history.save()
self.nomi_post.post_holders.clear()
self.append()
return self.nomi_post.post_holders
def open_to_users(self):
self.status = 'Nomination out'
self.opening_date = datetime.now()
self.save()
return self.status
class ReopenNomination(models.Model):
nomi = models.OneToOneField(Nomination, on_delete=models.CASCADE)
approvals = models.ManyToManyField(Post,symmetrical=False)
reopening_date = models.DateField(null=True, blank=True)
def re_open_to_users(self):
self.nomi.status = 'Interview period and Nomination reopened'
self.nomi.re_opening_date = datetime.now()
self.nomi.save()
return self.nomi
class GroupNomination(models.Model):
name = models.CharField(max_length=2000, null=True)
description = models.TextField(max_length=5000, null=True, blank=True)
nominations = models.ManyToManyField(Nomination, symmetrical=False, blank=True)
status = models.CharField(max_length=50, choices=G_STATUS, default='created')
opening_date = models.DateField(null=True, blank=True, default=timezone.now)
deadline = models.DateField(null=True, blank=True)
approvals = models.ManyToManyField(Post, related_name='group_approvals', symmetrical=False, blank=True)
tags = models.ManyToManyField(Club, related_name='club_group', symmetrical=False, blank=True)
def __str__(self):
return str(self.name)
class NominationInstance(models.Model):
nomination = models.ForeignKey('Nomination', on_delete=models.CASCADE, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)
status = models.CharField(max_length=20, choices=NOMI_STATUS, null=True, blank=True, default=None)
interview_status = models.CharField(max_length=20, choices=INTERVIEW_STATUS, null=True, blank=True,
default='Interview Not Done')
filled_form = models.OneToOneField('forms.FilledForm', null=True, blank=True)
submission_status = models.BooleanField(default= False)
timestamp = models.DateField(default=timezone.now)
edit_time = models.DateField(null=True, default=timezone.now)
def __str__(self):
return str(self.user) + ' ' + str(self.id)
class Deratification(models.Model):
name = models.ForeignKey(User, max_length=30, null=True)
post = models.ForeignKey(Post, on_delete=models.CASCADE, null=True)
status = models.CharField(max_length=10, choices=DERATIFICATION, default='safe')
deratify_approval = models.ForeignKey(Post, related_name='to_deratify',on_delete=models.CASCADE,null = True)
class Commment(models.Model):
comments = models.TextField(max_length=1000, null=True, blank=True)
nomi_instance = models.ForeignKey(NominationInstance, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
return 'user_{0}/{1}'.format(instance.user.id, filename)
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
user_img = models.ImageField(upload_to=user_directory_path, null=True, blank=True)
name = models.CharField(max_length=100, blank=True)
roll_no = models.IntegerField(null=True)
programme = models.CharField(max_length=100, choices=PROGRAMME, default='B.Tech')
department = models.CharField(max_length=200, default='AE')
hall = models.CharField(max_length=10,default=1)
room_no = models.CharField(max_length=10, null=True, blank=True)
contact = models.CharField(max_length=10, null=True, blank=True)
def __str__(self):
return str(self.name)
def image_url(self):
if self.roll_no:
return 'http://oa.cc.iitk.ac.in/Oa/Jsp/Photo/' + str(self.roll_no) + '_0.jpg'
else:
return '/static/nomi/img/banner.png'
@receiver(post_save, sender=Nomination)
def ensure_parent_in_approvals(sender, **kwargs):
nomi = kwargs.get('instance')
post = nomi.nomi_post
if post:
parent = post.parent
nomi.nomi_approvals.add(parent)
nomi.result_approvals.add(parent)
nomi.tags.add(post.club)
nomi.tags.add(parent.club)
@receiver(post_save, sender=Post)
def ensure_parent_in_post_approvals(sender, **kwargs):
post = kwargs.get('instance')
if post:
try:
parent = post.parent
post.post_approvals.add(parent)
post.tags.add(parent.club)
except:
print('error parent')
pass
try:
big_bro = post.elder_brother
post.tags.add(big_bro.club)
except:
print('error')
post.tags.add(post.club)
| 38.723077 | 129 | 0.708482 | 8,394 | 0.833731 | 0 | 0 | 844 | 0.08383 | 0 | 0 | 646 | 0.064164 |
ad185864b0257450aa7c1d7f4d336d5631a276f2 | 1,232 | py | Python | tests/test/search/test_references_searcher_db_files.py | watermelonwolverine/fvttmv | 8689d47d1f904dd2bf0a083de515fda65713c460 | [
"MIT"
] | 1 | 2022-03-30T19:12:14.000Z | 2022-03-30T19:12:14.000Z | tests/test/search/test_references_searcher_db_files.py | watermelonwolverine/fvttmv | 8689d47d1f904dd2bf0a083de515fda65713c460 | [
"MIT"
] | null | null | null | tests/test/search/test_references_searcher_db_files.py | watermelonwolverine/fvttmv | 8689d47d1f904dd2bf0a083de515fda65713c460 | [
"MIT"
] | null | null | null | from fvttmv.search.__references_searcher_db_files import ReferencesSearcherDbFiles
from test.common import TestCase, AbsPaths, References
class ReferencesSearcherDbFilesTest(TestCase):
def test_search_for_references_in_db_files1(self):
print("test_search_for_references_in_db_files1")
expected = []
result = ReferencesSearcherDbFiles.search_for_references_in_db_files(AbsPaths.Data,
[], # TODO test: additional targets
"does/not/exist")
self.assertEqual(result, expected)
def test_search_for_references_in_db_files2(self):
print("test_search_for_references_in_db_files2")
expected = [AbsPaths.contains_1_db,
AbsPaths.contains_1_and_2_db]
result = ReferencesSearcherDbFiles.search_for_references_in_db_files(AbsPaths.Data,
[], # TODO test: additional targets
References.file1_original)
self.assertEqual(expected, result)
| 42.482759 | 113 | 0.57224 | 1,091 | 0.885552 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.12987 |
ad1ab51f8499f1d4ded5f9bd2c0db3404d94ac2b | 8,956 | py | Python | apps/quiver/views.py | OpenAdaptronik/Rattler | c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4 | [
"MIT"
] | 2 | 2018-05-18T08:38:29.000Z | 2018-05-22T08:26:09.000Z | apps/quiver/views.py | IT-PM-OpenAdaptronik/Webapp | c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4 | [
"MIT"
] | 118 | 2017-10-31T13:45:09.000Z | 2018-02-24T20:51:42.000Z | apps/quiver/views.py | OpenAdaptronik/Rattler | c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4 | [
"MIT"
] | null | null | null | from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution
from django.shortcuts import render, HttpResponseRedirect
from django.core.exceptions import PermissionDenied
from django.views.generic import FormView, CreateView, ListView, DetailView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import AnalyticsServiceForm
from django.core import serializers
from django.utils.encoding import uri_to_iri
from django.shortcuts import render, HttpResponseRedirect
from apps.calc.measurement import measurement_obj
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
import json
from apps.analysis.json import NumPyArangeEncoder
from apps.projects.models import Experiment, Project, Datarow, Value
from apps.projects.serializer import project_serialize
from django.conf import settings
from django.core.exceptions import PermissionDenied
import numpy as np
import random
from apps.quiver import service_executor
# Create your views here.
class NewAnalyticsService(LoginRequiredMixin, CreateView):
form_class = AnalyticsServiceForm
template_name = 'quiver/analyticsservice_create.html'
def get_context_data(self, **kwargs):
data = super(NewAnalyticsService, self).get_context_data(**kwargs)
return data
def form_valid(self, form):
user = self.request.user
form.instance.user = user
context = self.get_context_data()
self.object = form.save()
return super(NewAnalyticsService, self).form_valid(form)
class UpdateAnalyticsService(LoginRequiredMixin, UpdateView):
model = AnalyticsService
form_class = AnalyticsServiceForm
pk_url_kwarg = 'id'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if not self.object.user == self.request.user and not self.object.visibility:
raise PermissionDenied()
return super(UpdateAnalyticsService, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
data = super(UpdateAnalyticsService, self).get_context_data(**kwargs)
return data
def form_valid(self, form):
context = self.get_context_data()
return super(UpdateAnalyticsService, self).form_valid(form)
class MyAnalyticsService(LoginRequiredMixin, ListView):
model = AnalyticsService
allow_empty = True
paginate_by = 10
def get_queryset(self):
user = self.request.user
return AnalyticsService.objects.filter(user=user).order_by('updated')
class AnalyticsServiceDetail(DetailView):
model = AnalyticsService
pk_url_kwarg = 'id'
def get_context_data(self, **kwargs):
user = self.request.user
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the projects
context['project_list'] = Project.objects.filter(user=user).order_by('updated')
return context
#def get(self, request, *args, **kwargs):
# self.object = self.get_object()
# if self.object.user != self.request.user and not self.object.visibility:
# raise PermissionDenied()
# return super(AnalyticsServiceDetail, self).get(request, *args, **kwargs)
def delete_analytics_service(request, analytics_service_id):
AnalyticsService.objects.get(id=analytics_service_id).delete()
return HttpResponseRedirect('/quiver/')
@login_required
def analytics_service_detail(request, experimentId):
if request.method != 'POST':
return HttpResponseRedirect('/dashboard/')
# current user
curruser_id = request.user.id
projectId = Experiment.objects.get(id=experimentId).project_id
# owner of experiment
expowner_id = Project.objects.get(id=projectId).user_id
# read graph visibility from post
graph_visibility = request.POST.get("graphVisibilities", "").split(',')
# Read Data from DB
header_list = np.asarray(Datarow.objects.filter(experiment_id=experimentId).values_list('name', flat=True))
einheiten_list = np.asarray(Datarow.objects.filter(experiment_id=experimentId).values_list('unit', flat=True))
mInstruments_list = np.asarray(
Datarow.objects.filter(experiment_id=experimentId).values_list('measuring_instrument', flat=True))
experimentName = Experiment.objects.get(id=experimentId).name
dateCreated = Experiment.objects.get(id=experimentId).created
timerow = Experiment.objects.get(id=experimentId).timerow
datarow_id = Datarow.objects.filter(experiment_id=experimentId).values_list('id', flat=True)
value_amount = len(Value.objects.filter(datarow_id=datarow_id[0]))
datarow_amount = len(datarow_id)
# values in the right order will be put in here, but for now initialize with 0
values_wo = [0] * datarow_amount
#fill values_wo with only datarow_amount-times of database fetches
i = 0
while i < datarow_amount:
values_wo[i] = Value.objects.filter(datarow_id=datarow_id[i]).values_list('value', flat=True)
i += 1
# order the values in values_wo, so that they can be used without database fetching
data = np.transpose(values_wo).astype(float)
# Create/Initialize the measurement object
measurement = measurement_obj.Measurement(json.dumps(data, cls=NumPyArangeEncoder),json.dumps(header_list, cls=NumPyArangeEncoder),
json.dumps(einheiten_list, cls=NumPyArangeEncoder),timerow)
# Prepare the Data for Rendering
dataForRender = {
'jsonData': json.dumps(measurement.data, cls=NumPyArangeEncoder),
'jsonHeader': json.dumps(measurement.colNames, cls=NumPyArangeEncoder),
'jsonEinheiten': json.dumps(measurement.colUnits, cls=NumPyArangeEncoder),
'jsonZeitreihenSpalte': json.dumps(measurement.timeIndex, cls=NumPyArangeEncoder),
'jsonMeasurementInstruments': json.dumps(mInstruments_list, cls=NumPyArangeEncoder),
'experimentId': experimentId,
'experimentName': experimentName,
'projectId': projectId,
'dateCreated': dateCreated,
'current_user_id': curruser_id,
'experiment_owner_id': expowner_id,
'graphVisibility': json.dumps(graph_visibility, cls=NumPyArangeEncoder),
}
# save experimentId to get it in ajax call when refreshing graph
request.session['experimentId'] = experimentId
return render(request, "quiver/index.html", dataForRender)
#def analyticsService(request):
#
# if request.method == 'POST':
# form = AnalyticsServiceForm(request.POST)
# if form.is_valid():
# print('hi')
#
# form = AnalyticsServiceForm()
#
# return render(request, 'analytics_service_detail.html', {'form': form})
def execute_service(request, analytics_service_id):
#data = request.body
#data = json.loads(data)
#read data and get project id:
if request.method == 'POST':
project_id = request.POST.get("project_id", )
rowcounter = int(request.POST.get("rowcounter", ))
#read out of ajax and adjust format for follwing execution of service
#read and prepare parameter data to send it to the service
input = [];
parameter = [];
i = 0;
while i < rowcounter:
param_attributes = {
'name': request.POST.get('parameter_name_' + str(i), ),
'value': request.POST.get('parameter_value_' + str(i), ),
'type': request.POST.get('type_select_' + str(i), )
}
parameter.append(param_attributes)
i = i + 1;
# work that input
#serialize project as preparation to send it to the service
input = project_serialize(project_id)
#generate a random number between 0 and 9999 as task_id
task_id = random.randrange(0, 10000, 1)
service = AnalyticsService.objects.get(id=analytics_service_id)
status = service_executor.get_status_for_service(service)
if status == service_executor.ServiceState.READY:
user = request.user
service_execution = AnalyticsServiceExecution(service=service, last_state=1, user=user)
service_execution.save()
#while service_execution.last_state != service_executor.ServiceState.DONE:
if service_execution.last_state == service_executor.ServiceState.READY:
task_url = service_executor.execute_next_state(service_execution, None, input, parameter)
if service_execution.last_state == service_executor.ServiceState.RUNNING:
result = service_executor.execute_next_state(service_execution, task_url, None, None).decode('ascii')
return JsonResponse(result, safe=False)
else: raise ValueError('Service does not exist right now.')
return
| 41.082569 | 135 | 0.704891 | 2,262 | 0.252568 | 0 | 0 | 3,028 | 0.338097 | 0 | 0 | 2,062 | 0.230237 |
ad1aeb9442720992cb51bbedc547de7f9083c3fa | 1,102 | py | Python | boml/load_data/experiment.py | LongMa319/BOML | 8cbb5a557e93dabd858438efd67c0685402efa9e | [
"MIT"
] | 2 | 2021-12-20T03:24:27.000Z | 2022-01-10T14:16:21.000Z | boml/load_data/experiment.py | perseveranceLX/BOML | 8cbb5a557e93dabd858438efd67c0685402efa9e | [
"MIT"
] | null | null | null | boml/load_data/experiment.py | perseveranceLX/BOML | 8cbb5a557e93dabd858438efd67c0685402efa9e | [
"MIT"
] | 1 | 2022-03-29T13:21:20.000Z | 2022-03-29T13:21:20.000Z | """
Simple container for useful quantities for a supervised learning experiment, where data is managed
with feed dictionary
"""
import tensorflow as tf
class BOMLExperiment:
def __init__(self, datasets, dtype=tf.float32):
self.datasets = datasets
self.x = tf.placeholder(dtype, name="x", shape=self._compute_input_shape())
self.y = tf.placeholder(dtype, name="y", shape=self._compute_output_shape())
self.x_ = tf.placeholder(dtype, name="x_", shape=self._compute_input_shape())
self.y_ = tf.placeholder(dtype, name="y_", shape=self._compute_output_shape())
self.dtype = dtype
self.model = None
self.errors = {}
self.scores = {}
self.optimizers = {}
# noinspection PyBroadException
def _compute_input_shape(self):
sh = self.datasets.train.dim_data
return (None, sh) if isinstance(sh, int) else (None,) + sh
# noinspection PyBroadException
def _compute_output_shape(self):
sh = self.datasets.train.dim_target
return (None, sh) if isinstance(sh, int) else (None,) + sh
| 36.733333 | 98 | 0.669691 | 947 | 0.859347 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.184211 |
ad1c4914c79a24918776134b469b340712c87fc6 | 11,873 | py | Python | pypy/translator/jvm/opcodes.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/translator/jvm/opcodes.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/translator/jvm/opcodes.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | """
Mapping from OOType opcodes to JVM MicroInstructions. Most of these
come from the oosupport directory.
"""
from pypy.translator.oosupport.metavm import \
PushArg, PushAllArgs, StoreResult, InstructionList, New, DoNothing, Call,\
SetField, GetField, DownCast, RuntimeNew, OOString, OOUnicode, \
CastTo, PushPrimitive
from pypy.translator.jvm.metavm import \
IndirectCall, JvmCallMethod, NewCustomDict, \
CastPrimitive, PushPyPy
from pypy.rpython.ootypesystem import ootype
import pypy.translator.jvm.generator as jvmgen
import pypy.translator.jvm.typesystem as jvmtype
def _proc(val):
if isinstance(val, list):
# Lists of instructions we leave alone:
return InstructionList(val)
elif isinstance(val, jvmgen.Method) and not val.is_static():
# For virtual methods, we first push an instance of the relevant
# class, then the arguments, and then invoke the method. Note
# that we only allow virtual methods of certain pre-designated
# classes to be in the table.
if val.class_name == jvmtype.jPyPy.name:
return InstructionList(
(PushPyPy, PushAllArgs, val, StoreResult))
else:
raise Exception("Unknown class for non-static method")
# For anything else (static methods, strings, etc) we first push
# all arguments, then invoke the emit() routine, and finally
# store the result.
return InstructionList((PushAllArgs, val, StoreResult))
def _proc_dict(original):
""" Function which is used to post-process each entry in the
opcodes table."""
res = {}
for key, val in original.items():
res[key] = _proc(val)
return res
def _check_zer(op):
# Note: we convert from Java's ArithmeticException to RPython's
# ZeroDivisionError in the *catch* code, not here where the
# exception is generated. See introduce_exception_conversions()
# in node.py for details.
return op
def _check_ovf(op):
return op
Ignore = []
# This table maps the opcodes to micro-ops for processing them.
# It is post-processed by _proc.
opcodes = _proc_dict({
# __________ object oriented operations __________
'new': [New, StoreResult],
'runtimenew': [RuntimeNew, StoreResult],
'oosetfield': [SetField],
'oogetfield': [GetField, StoreResult],
'oosend': [JvmCallMethod, StoreResult],
'ooupcast': DoNothing,
'oodowncast': [DownCast, StoreResult],
'oois': 'ref_is_eq',
'oononnull': 'is_not_null',
'instanceof': [CastTo, StoreResult],
'subclassof': [PushAllArgs, jvmgen.SWAP, jvmgen.CLASSISASSIGNABLEFROM, StoreResult],
'ooidentityhash': [PushAllArgs, jvmgen.OBJHASHCODE, StoreResult],
'oohash': [PushAllArgs, jvmgen.OBJHASHCODE, StoreResult],
'oostring': [OOString, StoreResult],
'oounicode': [OOUnicode, StoreResult],
'ooparse_float': jvmgen.PYPYOOPARSEFLOAT,
'oonewcustomdict': [NewCustomDict, StoreResult],
'same_as': DoNothing,
'hint': [PushArg(0), StoreResult],
'direct_call': [Call, StoreResult],
'indirect_call': [PushAllArgs, IndirectCall, StoreResult],
'gc__collect': jvmgen.SYSTEMGC,
'gc_set_max_heap_size': Ignore,
'resume_point': Ignore,
'debug_assert': [], # TODO: implement?
# __________ numeric operations __________
'bool_not': 'logical_not',
'char_lt': 'less_than',
'char_le': 'less_equals',
'char_eq': 'equals',
'char_ne': 'not_equals',
'char_gt': 'greater_than',
'char_ge': 'greater_equals',
'unichar_eq': 'equals',
'unichar_ne': 'not_equals',
'int_is_true': 'not_equals_zero',
'int_neg': jvmgen.INEG,
'int_neg_ovf': jvmgen.INEGOVF,
'int_abs': 'iabs',
'int_abs_ovf': jvmgen.IABSOVF,
'int_invert': 'bitwise_negate',
'int_add': jvmgen.IADD,
'int_sub': jvmgen.ISUB,
'int_mul': jvmgen.IMUL,
'int_floordiv': jvmgen.IDIV,
'int_floordiv_zer': _check_zer(jvmgen.IDIV),
'int_mod': jvmgen.IREM,
'int_lt': 'less_than',
'int_le': 'less_equals',
'int_eq': 'equals',
'int_ne': 'not_equals',
'int_gt': 'greater_than',
'int_ge': 'greater_equals',
'int_and': jvmgen.IAND,
'int_or': jvmgen.IOR,
'int_lshift': jvmgen.ISHL,
'int_rshift': jvmgen.ISHR,
'int_xor': jvmgen.IXOR,
'int_add_ovf': jvmgen.IADDOVF,
'int_add_nonneg_ovf': jvmgen.IADDOVF,
'int_sub_ovf': jvmgen.ISUBOVF,
'int_mul_ovf': jvmgen.IMULOVF,
'int_floordiv_ovf': jvmgen.IDIV, # these can't overflow!
'int_mod_zer': _check_zer(jvmgen.IREM),
'int_mod_ovf': jvmgen.IREMOVF,
'int_lt_ovf': 'less_than',
'int_le_ovf': 'less_equals',
'int_eq_ovf': 'equals',
'int_ne_ovf': 'not_equals',
'int_gt_ovf': 'greater_than',
'int_ge_ovf': 'greater_equals',
'int_and_ovf': jvmgen.IAND,
'int_or_ovf': jvmgen.IOR,
'int_lshift_ovf': jvmgen.ISHLOVF,
'int_lshift_ovf_val': jvmgen.ISHLOVF, # VAL... what is val used for??
'int_rshift_ovf': jvmgen.ISHR, # these can't overflow!
'int_xor_ovf': jvmgen.IXOR,
'int_floordiv_ovf_zer': _check_zer(jvmgen.IDIV),
'int_mod_ovf_zer': _check_zer(jvmgen.IREMOVF),
'uint_is_true': 'not_equals_zero',
'uint_invert': 'bitwise_negate',
'uint_add': jvmgen.IADD,
'uint_sub': jvmgen.ISUB,
'uint_mul': jvmgen.PYPYUINTMUL,
'uint_div': jvmgen.PYPYUINTDIV,
'uint_truediv': None, # TODO
'uint_floordiv': jvmgen.PYPYUINTDIV,
'uint_mod': jvmgen.PYPYUINTMOD,
'uint_lt': 'u_less_than',
'uint_le': 'u_less_equals',
'uint_eq': 'u_equals',
'uint_ne': 'u_not_equals',
'uint_gt': 'u_greater_than',
'uint_ge': 'u_greater_equals',
'uint_and': jvmgen.IAND,
'uint_or': jvmgen.IOR,
'uint_lshift': jvmgen.ISHL,
'uint_rshift': jvmgen.IUSHR,
'uint_xor': jvmgen.IXOR,
'float_is_true': [PushAllArgs, jvmgen.DCONST_0, 'dbl_not_equals', StoreResult],
'float_neg': jvmgen.DNEG,
'float_abs': 'dbl_abs',
'float_add': jvmgen.DADD,
'float_sub': jvmgen.DSUB,
'float_mul': jvmgen.DMUL,
'float_truediv': jvmgen.DDIV,
'float_lt': 'dbl_less_than',
'float_le': 'dbl_less_equals',
'float_eq': 'dbl_equals',
'float_ne': 'dbl_not_equals',
'float_gt': 'dbl_greater_than',
'float_ge': 'dbl_greater_equals',
'llong_is_true': [PushAllArgs, jvmgen.LCONST_0, 'long_not_equals', StoreResult],
'llong_neg': jvmgen.LNEG,
'llong_neg_ovf': jvmgen.LNEGOVF,
'llong_abs': jvmgen.MATHLABS,
'llong_abs_ovf': jvmgen.LABSOVF,
'llong_invert': jvmgen.PYPYLONGBITWISENEGATE,
'llong_add': jvmgen.LADD,
'llong_sub': jvmgen.LSUB,
'llong_mul': jvmgen.LMUL,
'llong_div': jvmgen.LDIV,
'llong_truediv': None, # TODO
'llong_floordiv': jvmgen.LDIV,
'llong_floordiv_zer': _check_zer(jvmgen.LDIV),
'llong_mod': jvmgen.LREM,
'llong_mod_zer': _check_zer(jvmgen.LREM),
'llong_lt': 'long_less_than',
'llong_le': 'long_less_equals',
'llong_eq': 'long_equals',
'llong_ne': 'long_not_equals',
'llong_gt': 'long_greater_than',
'llong_ge': 'long_greater_equals',
'llong_and': jvmgen.LAND,
'llong_or': jvmgen.LOR,
'llong_lshift': [PushAllArgs, jvmgen.L2I, jvmgen.LSHL, StoreResult], # XXX - do we care about shifts of >(1<<32) bits??
'llong_rshift': [PushAllArgs, jvmgen.L2I, jvmgen.LSHR, StoreResult],
'llong_xor': jvmgen.LXOR,
'llong_floordiv_ovf': jvmgen.LDIV, # these can't overflow!
'llong_mod_ovf': jvmgen.LREMOVF,
'llong_lshift_ovf': jvmgen.LSHLOVF,
'ullong_is_true': [PushAllArgs, jvmgen.LCONST_0, 'long_not_equals', StoreResult],
'ullong_invert': jvmgen.PYPYLONGBITWISENEGATE,
'ullong_add': jvmgen.LADD,
'ullong_sub': jvmgen.LSUB,
'ullong_mul': jvmgen.LMUL,
'ullong_div': jvmgen.LDIV, # valid?
'ullong_truediv': None, # TODO
'ullong_floordiv': jvmgen.LDIV, # valid?
'ullong_mod': jvmgen.PYPYULONGMOD,
'ullong_lt': 'ulong_less_than',
'ullong_le': 'ulong_less_equals',
'ullong_eq': 'ulong_equals',
'ullong_ne': 'ulong_not_equals',
'ullong_gt': 'ulong_greater_than',
'ullong_ge': 'ulong_greater_equals',
'ullong_lshift': [PushAllArgs, jvmgen.L2I, jvmgen.LSHL, StoreResult],
'ullong_rshift': [PushAllArgs, jvmgen.L2I, jvmgen.LUSHR, StoreResult],
'ullong_mod_zer': jvmgen.PYPYULONGMOD,
# when casting from bool we want that every truth value is casted
# to 1: we can't simply DoNothing, because the CLI stack could
# contains a truth value not equal to 1, so we should use the !=0
# trick. #THIS COMMENT NEEDS TO BE VALIDATED AND UPDATED
'cast_bool_to_int': DoNothing,
'cast_bool_to_uint': DoNothing,
'cast_bool_to_float': jvmgen.PYPYBOOLTODOUBLE, #PAUL, inefficient
'cast_char_to_int': DoNothing,
'cast_unichar_to_int': DoNothing,
'cast_int_to_char': DoNothing,
'cast_int_to_unichar': DoNothing,
'cast_int_to_uint': DoNothing,
'cast_int_to_float': jvmgen.I2D,
'cast_int_to_longlong': jvmgen.I2L,
'cast_uint_to_int': DoNothing,
'cast_uint_to_float': jvmgen.PYPYUINTTODOUBLE,
'cast_float_to_int': jvmgen.D2I,
'cast_float_to_longlong': jvmgen.PYPYDOUBLETOLONG, #PAUL
'cast_float_to_uint': jvmgen.PYPYDOUBLETOUINT,
'truncate_longlong_to_int': jvmgen.L2I,
'cast_longlong_to_float': jvmgen.L2D,
'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult],
'is_early_constant': [PushPrimitive(ootype.Bool, False), StoreResult]
})
| 44.137546 | 135 | 0.544176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,587 | 0.386339 |
ad1c7e0f78e361f8b94e7d3cccfcbd1e73831978 | 459 | py | Python | graph_explorer/structured_metrics/plugins/vmstat.py | farheenkaifee/dashboard_3 | bc557a6190a99182ec7a1c96dfdd33208a8575cd | [
"Apache-2.0"
] | 284 | 2015-01-03T05:35:18.000Z | 2022-01-19T08:30:31.000Z | graph_explorer/structured_metrics/plugins/vmstat.py | farheenkaifee/dashboard_3 | bc557a6190a99182ec7a1c96dfdd33208a8575cd | [
"Apache-2.0"
] | 9 | 2015-01-20T16:41:01.000Z | 2017-02-03T08:02:39.000Z | graph_explorer/structured_metrics/plugins/vmstat.py | isabella232/graph-explorer | bc557a6190a99182ec7a1c96dfdd33208a8575cd | [
"Apache-2.0"
] | 35 | 2015-02-05T13:03:51.000Z | 2022-01-19T08:31:15.000Z | from . import Plugin
class VmstatPlugin(Plugin):
targets = [
{
'match': '^servers\.(?P<server>[^\.]+)\.vmstat\.(?P<type>.*)$',
'target_type': 'rate',
'tags': {'unit': 'Page'}
}
]
def sanitize(self, target):
target['tags']['type'] = target['tags']['type'].replace('pgpg', 'paging_')
target['tags']['type'] = target['tags']['type'].replace('pswp', 'swap_')
# vim: ts=4 et sw=4:
| 27 | 82 | 0.490196 | 414 | 0.901961 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.420479 |
ad1cabe254e2aa9697b539f3226adbf97155e405 | 819 | py | Python | 2018/day02.py | iKevinY/advent | d160fb711a0a4d671f53cbd61088117e7ff0276a | [
"MIT"
] | 11 | 2019-12-03T06:32:37.000Z | 2021-12-24T12:23:57.000Z | 2018/day02.py | iKevinY/advent | d160fb711a0a4d671f53cbd61088117e7ff0276a | [
"MIT"
] | null | null | null | 2018/day02.py | iKevinY/advent | d160fb711a0a4d671f53cbd61088117e7ff0276a | [
"MIT"
] | 1 | 2019-12-07T06:21:31.000Z | 2019-12-07T06:21:31.000Z | import fileinput
from collections import Counter
BOXES = [line.strip() for line in fileinput.input()]
DOUBLES = 0
TRIPLES = 0
COMMON = None
for box_1 in BOXES:
doubles = 0
triples = 0
for char, count in Counter(box_1).items():
if count == 2:
doubles += 1
elif count == 3:
triples += 1
if doubles > 0:
DOUBLES += 1
if triples > 0:
TRIPLES += 1
for box_2 in BOXES:
if box_1 == box_2:
continue
diffs = 0
for i in range(len(box_1)):
if box_1[i] != box_2[i]:
diffs += 1
if diffs == 1:
COMMON = ''.join(a for a, b in zip(box_1, box_2) if a == b)
print "Checksum for list of box IDs:", DOUBLES * TRIPLES
print "Common letters for right IDs:", COMMON
| 19.5 | 71 | 0.534799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.078144 |
ad1e497bcf39064afc3262311487df49eca70a14 | 3,324 | py | Python | pyreindexer/tests/tests/test_sql.py | Restream/reindexer-py | 9a5925f167ac676f07ba39e32985cc6f6a0abebf | [
"Apache-2.0"
] | 2 | 2020-08-07T16:44:33.000Z | 2020-08-07T20:57:18.000Z | pyreindexer/tests/tests/test_sql.py | Restream/reindexer-py | 9a5925f167ac676f07ba39e32985cc6f6a0abebf | [
"Apache-2.0"
] | null | null | null | pyreindexer/tests/tests/test_sql.py | Restream/reindexer-py | 9a5925f167ac676f07ba39e32985cc6f6a0abebf | [
"Apache-2.0"
] | 3 | 2020-08-07T20:57:24.000Z | 2021-09-07T14:52:14.000Z | from hamcrest import *
from tests.helpers.sql import sql_query
class TestSqlQueries:
def test_sql_select(self, namespace, index, item):
# Given("Create namespace with item")
db, namespace_name = namespace
item_definition = item
# When ("Execute SQL query SELECT")
query = f'SELECT * FROM {namespace_name}'
item_list = sql_query(namespace, query)
# Then ("Check that selected item is in result")
assert_that(item_list, has_item(equal_to(item_definition)), "Can't SQL select data")
def test_sql_select_with_join(self, namespace, second_namespace_for_join, index, items):
# Given("Create two namespaces")
db, namespace_name = namespace
second_namespace_name, second_ns_item_definition_join = second_namespace_for_join
# When ("Execute SQL query SELECT with JOIN")
query = f'SELECT id FROM {namespace_name} INNER JOIN {second_namespace_name} ON {namespace_name}.id = {second_namespace_name}.id'
item_list = sql_query(namespace, query)
# Then ("Check that selected item is in result")
assert_that(item_list,
has_item(equal_to({'id': 1, f'joined_{second_namespace_name}': [second_ns_item_definition_join]})),
"Can't SQL select data with JOIN")
def test_sql_select_with_condition(self, namespace, index, items):
# Given("Create namespace with item")
db, namespace_name = namespace
# When ("Execute SQL query SELECT")
query = f'SELECT * FROM {namespace_name} WHERE id=3'
item_list = sql_query(namespace, query)
# Then ("Check that selected item is in result")
assert_that(item_list, has_item(equal_to({'id': 3, 'val': 'testval3'})), "Can't SQL select data with condition")
def test_sql_update(self, namespace, index, item):
# Given("Create namespace with item")
db, namespace_name = namespace
# When ("Execute SQL query UPDATE")
query = f"UPDATE {namespace_name} SET \"val\" = 'new_val' WHERE id = 100"
item_list = sql_query(namespace, query)
# Then ("Check that item is updated")
assert_that(item_list, has_item(equal_to({'id': 100, 'val': 'new_val'})), "Can't SQL update data")
def test_sql_delete(self, namespace, index, item):
# Given("Create namespace with item")
db, namespace_name = namespace
# When ("Execute SQL query DELETE")
query_delete = f"DELETE FROM {namespace_name} WHERE id = 100"
sql_query(namespace, query_delete)
# Then ("Check that item is deleted")
query_select = f"SELECT * FROM {namespace_name}"
item_list = sql_query(namespace, query_select)
assert_that(item_list, equal_to([]), "Can't SQL delete data")
def test_sql_select_with_syntax_error(self, namespace, index, item):
# Given("Create namespace with item")
# When ("Execute SQL query SELECT with incorrect syntax")
query = f'SELECT *'
# Then ("Check that selected item is in result")
assert_that(calling(sql_query).with_args(namespace, query),
raises(Exception, matching=has_string(string_contains_in_order(
"Expected", "but found"))), "Error wasn't raised when syntax was incorrect")
| 50.363636 | 137 | 0.659446 | 3,257 | 0.979844 | 0 | 0 | 0 | 0 | 0 | 0 | 1,360 | 0.409146 |
ad1e54977e7558a8f0c8a31c237e57a940caccfa | 184 | py | Python | app/auth/__init__.py | Muxi-Studio/ccnu-network-culture-festival | 3ff62b2a3052d1c0fcbc62df53f8985ea8bfd9d3 | [
"MIT"
] | 3 | 2016-12-01T07:38:17.000Z | 2016-12-17T14:37:24.000Z | examples/HelloAPI/app/auth/__init__.py | misakar/rest | 8bf7369aaa9da5cc4a300c625e4d7fea21f52681 | [
"MIT"
] | 7 | 2020-03-24T16:05:11.000Z | 2022-01-13T00:51:53.000Z | examples/HelloAPI/app/auth/__init__.py | misakar/rest | 8bf7369aaa9da5cc4a300c625e4d7fea21f52681 | [
"MIT"
] | 4 | 2015-12-11T03:20:27.000Z | 2016-02-03T04:47:52.000Z | # coding: utf-8
from flask import Blueprint
auth = Blueprint(
'auth',
__name__,
template_folder = 'templates',
static_folder = 'static'
)
from . import views, forms
| 14.153846 | 34 | 0.663043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.217391 |
ad1e748da5fe246fe028cfff71db337937c5eff0 | 4,800 | py | Python | generator/paperplane.py | isikdogan/paperplane | 4f1e3510ef88ede0d0c6b5d3fc19e91ad48b66df | [
"MIT"
] | 3 | 2019-03-23T03:26:15.000Z | 2021-05-09T01:20:52.000Z | generator/paperplane.py | isikdogan/paperplane | 4f1e3510ef88ede0d0c6b5d3fc19e91ad48b66df | [
"MIT"
] | 1 | 2019-03-24T05:22:42.000Z | 2019-03-24T17:42:04.000Z | generator/paperplane.py | isikdogan/paperplane | 4f1e3510ef88ede0d0c6b5d3fc19e91ad48b66df | [
"MIT"
] | 4 | 2015-12-07T11:51:17.000Z | 2019-03-24T04:26:28.000Z | # -*- coding: utf-8 -*-
""" PaperPlane: a very simple, flat-file, static blog generator.
Created on Sat Feb 21 2015
Author: Leo Isikdogan
"""
import codecs, unicodedata
import dateutil.parser
import os, re, glob
import markdown
import jinja2
class Page:
def __init__(self, markdown_file):
self._read_markdown(markdown_file)
self._parse_markdown_content()
self._embed_videos()
def _read_markdown(self, markdown_file):
with codecs.open(markdown_file, "r", "utf-8") as f:
self.title = f.readline()
f.readline() #skip a line
self.content = f.read()
def _parse_markdown_content(self):
extensions = ['markdown.extensions.extra']
self.content = markdown.markdown(self.content, extensions=extensions)
def get_content_text(self):
# strips html, returns raw text
p = re.compile(r'<.*?>')
return p.sub('', self.content)
def get_slugified_title(self):
slugs = self.title
if not isinstance(slugs, str):
slugs = unicode(slugs, 'utf8')
slugs = slugs.replace(u'\u0131', 'i')
slugs = unicodedata.normalize('NFKD', slugs).encode('ascii', 'ignore').decode('ascii')
slugs = re.sub('[^\w\s-]', '', slugs).strip().lower()
return re.sub('[-\s]+', '-', slugs)
@staticmethod
def parse_youtube_url(url):
youtube_regex = (r'(https?://)?(www\.)?'
'(youtube|youtu|youtube-nocookie)\.(com|be)/'
'(watch\?v=|embed/|v/|.+\?v=)?([^&=%\?]{11})')
youtube_regex_match = re.match(youtube_regex, url)
if youtube_regex_match:
return youtube_regex_match.group(6)
return youtube_regex_match
def _embed_videos(self):
matches = re.finditer("\[vid\](.*?)\[/vid\]", self.content)
for match in matches:
vidcode = self.parse_youtube_url(match.group(1))
if(vidcode != None):
embed_code = ('<div class="embed-responsive embed-responsive-16by9">'
'<iframe class="embed-responsive-item" src="https://www.youtube.com/embed/{}?'
'wmode=transparent&fs=1&hl=en&showinfo=0&iv_load_policy=3&'
'showsearch=0&rel=0&theme=light"></iframe></div>').format(vidcode)
self.content = self.content.replace(match.group(0), embed_code)
def get_dictionary(self):
return self.__dict__
class BlogPost(Page):
def __init__(self, markdown_file):
super().__init__(markdown_file)
self._parse_date()
self.filename = self.get_slugified_title() + ".html"
def _read_markdown(self, markdown_file):
with codecs.open(markdown_file, "r", "utf-8") as f:
self.title = f.readline()
self.date = f.readline()
self.tags = f.readline().rstrip(os.linesep)
self.description = f.readline()
self.thumbnail = f.readline()
f.readline() #skip a line
self.content = f.read()
def _parse_date(self):
self.date = dateutil.parser.parse(self.date)
self.formatted_date = self.date.strftime('%B %d, %Y').replace(" 0", " ")
class Blog:
def __init__(self, markdown_dir):
self.files = glob.glob(markdown_dir)
self._create_posts()
def _create_posts(self):
self.posts = []
for markdown_file in self.files:
blog_post = BlogPost(markdown_file)
self.posts.append(blog_post)
# sort posts by date
self.posts = sorted(self.posts, key=lambda post: post.date, reverse=True)
def create_html_pages(self, blog_dir, blog_template, index_template):
# create blog post htmls
for post in self.posts:
filename = blog_dir + post.filename
TemplateRenderer.create_html(filename, blog_template, post=post.get_dictionary(), subdir='../')
# create index page
filename = blog_dir + "index.html"
TemplateRenderer.create_html(filename, index_template, posts=self.posts, subdir='../')
class TemplateRenderer:
env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'))
@classmethod
def create_html(cls, filename, template, **kwargs):
template = cls.env.get_template(template)
html_file = template.render(kwargs)
with open(filename, 'wb') as f:
f.write(html_file.encode('utf8'))
class Homepage(Page):
def __init__(self, markdown_file):
super().__init__(markdown_file)
def create_html_page(self):
TemplateRenderer.create_html('../index.html', 'homepage_template.html',
post=self.get_dictionary(), subdir='')
| 38.4 | 107 | 0.606875 | 4,547 | 0.947292 | 0 | 0 | 662 | 0.137917 | 0 | 0 | 897 | 0.186875 |
ad1e941439ad470245712f61db2a8e49fea80a56 | 9,723 | py | Python | app/recepie/tests/test_recepie_api.py | TheMysteryPuzzles/recepie-app-api | a62f3104ead34f40b310b12e7cecfde4c248c2fc | [
"Apache-2.0"
] | null | null | null | app/recepie/tests/test_recepie_api.py | TheMysteryPuzzles/recepie-app-api | a62f3104ead34f40b310b12e7cecfde4c248c2fc | [
"Apache-2.0"
] | null | null | null | app/recepie/tests/test_recepie_api.py | TheMysteryPuzzles/recepie-app-api | a62f3104ead34f40b310b12e7cecfde4c248c2fc | [
"Apache-2.0"
] | null | null | null | import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recepie, Tag, Ingredient
from recepie.serializers import RecepieSerializer, RecepieDetailSerializer
RECEPIE_URLS = reverse('recepie:recepie-list')
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recepie:recepie-upload-image', args=[recipe_id])
def sample_tag(user, name='Main course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recepie:recepie-detail', args=[recipe_id])
def sample_recepie(user, **params):
defaults = {
'title': 'sample recepie',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recepie.objects.create(user=user, **defaults)
class PublicRecepieApiTest(TestCase):
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
res = self.client.get(RECEPIE_URLS)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecepieApiTest(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'email@gmail.com',
'12345678'
)
self.client.force_authenticate(self.user)
def test_retrieve_recepies(self):
sample_recepie(self.user)
sample_recepie(self.user)
res = self.client.get(RECEPIE_URLS)
recepie = Recepie.objects.all().order_by('-id')
serializer = RecepieSerializer(recepie, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recepies_limited_to_user(self):
user2 = get_user_model().objects.create_user(
'email2@gmail.com',
'1234567'
)
sample_recepie(user=user2)
sample_recepie(user=self.user)
res = self.client.get(RECEPIE_URLS)
recepie = Recepie.objects.filter(user=self.user)
serializer = RecepieSerializer(recepie, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recepie_detail(self):
"""Test viewing a recipe detail"""
recepie = sample_recepie(user=self.user)
recepie.tags.add(sample_tag(user=self.user))
recepie.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recepie.id)
res = self.client.get(url)
serializer = RecepieDetailSerializer(recepie)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Test recipe',
'time_minutes': 30,
'price': 10.00,
}
res = self.client.post(RECEPIE_URLS, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recepie.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
tag1 = sample_tag(user=self.user, name='Tag 1')
tag2 = sample_tag(user=self.user, name='Tag 2')
payload = {
'title': 'Test recipe with two tags',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 10.00
}
res = self.client.post(RECEPIE_URLS, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recepie.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
payload = {
'title': 'Test recipe with ingredients',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 45,
'price': 15.00
}
res = self.client.post(RECEPIE_URLS, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recepie.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recepie(self):
recepie = sample_recepie(user=self.user)
recepie.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {
'title': 'New Recepie Changed',
'tags': [new_tag.id]
}
url = detail_url(recepie.id)
self.client.patch(url, payload)
recepie.refresh_from_db()
self.assertEqual(recepie.title, payload['title'])
tags = recepie.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recepie(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('user', 'testpass')
self.client.force_authenticate(self.user)
self.recepie = sample_recepie(user=self.user)
def tearDown(self):
self.recepie.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recepie.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recepie.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recepie.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recepie.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recepie(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recepie(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recepie(user=self.user, title='Fish and chips')
res = self.client.get(
RECEPIE_URLS,
{'tags': '{},{}'.format(tag1.id, tag2.id)}
)
serializer1 = RecepieSerializer(recipe1)
serializer2 = RecepieSerializer(recipe2)
serializer3 = RecepieSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recepie(user=self.user, title='Posh beans on toast')
recipe2 = sample_recepie(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recepie(user=self.user, title='Steak and mushrooms')
res = self.client.get(
RECEPIE_URLS,
{'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)}
)
serializer1 = RecepieSerializer(recipe1)
serializer2 = RecepieSerializer(recipe2)
serializer3 = RecepieSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data) | 34.97482 | 79 | 0.635709 | 8,487 | 0.872879 | 0 | 0 | 0 | 0 | 0 | 0 | 1,303 | 0.134012 |
ad1eb8418e4c93dbcc4f80cc958dc638df52a380 | 809 | py | Python | scrap_single_news.py | pralhad88/Web_scraping | c40c2dcf0549cb8a0b18a981a583db3caaec5213 | [
"MIT"
] | 1 | 2020-04-14T08:31:35.000Z | 2020-04-14T08:31:35.000Z | scrap_single_news.py | pralhad88/Web_scraping | c40c2dcf0549cb8a0b18a981a583db3caaec5213 | [
"MIT"
] | null | null | null | scrap_single_news.py | pralhad88/Web_scraping | c40c2dcf0549cb8a0b18a981a583db3caaec5213 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import urllib.request
article = []
data_storage = {}
source = urllib.request.urlopen("https://www.ndtv.com/india-news/pm-modi-in-telangana-says-seek-your-support-blessings-for-bjp-in-coming-polls-1953954").read()
soup = BeautifulSoup(source,'lxml')
data_storage['Title'] = soup.h1.string
data_storage["PublishDate"] = (soup.find('span', {"itemprop":"dateModified"}).string)
data_storage["Publisher/Author"] = (soup.find('span', {"itemprop":"author"}).string)
for paragraph in soup.find_all('p'):
if "Advertisement" in paragraph.text:
break
article.append(paragraph.text)
connector = ' '*(len(data_storage["Publisher/Author"])-1)
for i in article[1:]:
connector = connector + i + ' '
data_storage['Article'] = connector
for data in data_storage.values():
print(data)
| 31.115385 | 159 | 0.729295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.331273 |
ad1ec9e6f76fa6eefed47245dd47853b28775217 | 435 | py | Python | localstack/services/awslambda/multivalue_transformer.py | zonywhoop/localstack | 673e1a23374362c64606fb36c0746ee29cbf5553 | [
"Apache-2.0"
] | 1 | 2021-02-19T19:28:30.000Z | 2021-02-19T19:28:30.000Z | localstack/services/awslambda/multivalue_transformer.py | zonywhoop/localstack | 673e1a23374362c64606fb36c0746ee29cbf5553 | [
"Apache-2.0"
] | null | null | null | localstack/services/awslambda/multivalue_transformer.py | zonywhoop/localstack | 673e1a23374362c64606fb36c0746ee29cbf5553 | [
"Apache-2.0"
] | 1 | 2021-01-10T03:21:47.000Z | 2021-01-10T03:21:47.000Z | from collections import defaultdict
from localstack.utils.common import to_str
def multi_value_dict_for_list(elements):
temp_mv_dict = defaultdict(list)
for key in elements:
if isinstance(key, (list, tuple)):
key, value = key
else:
value = elements[key]
key = to_str(key)
temp_mv_dict[key].append(value)
return dict((k, tuple(v)) for k, v in temp_mv_dict.items())
| 27.1875 | 63 | 0.648276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ad1f043741d903cb1b322256803ad59d9dd73fb5 | 3,601 | py | Python | code/dgp/dgp_sorf_optim.py | GiaLacTRAN/convolutional_deep_gp_random_features | 93330f3171ab4e9539f6bae0d4a68ae1f6a1e104 | [
"Apache-2.0"
] | 5 | 2019-09-16T10:51:49.000Z | 2020-10-13T14:44:29.000Z | code/dgp/dgp_sorf_optim.py | GiaLacTRAN/convolutional_deep_gp_random_features | 93330f3171ab4e9539f6bae0d4a68ae1f6a1e104 | [
"Apache-2.0"
] | 1 | 2020-08-09T06:33:46.000Z | 2020-08-20T03:11:50.000Z | code/dgp/dgp_sorf_optim.py | GiaLacTRAN/convolutional_deep_gp_random_features | 93330f3171ab4e9539f6bae0d4a68ae1f6a1e104 | [
"Apache-2.0"
] | 4 | 2019-05-06T03:57:13.000Z | 2020-04-24T13:37:40.000Z | ## Copyright 2019 Gia-Lac TRAN, Edwin V. Bonilla, John P. Cunningham, Pietro Michiardi, and Maurizio Filippone
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import tensorflow as tf
import dgp.dgp as dgp
import dgp.sorf_transform as sorf_transform
class Dgp_Sorf_Optim(dgp.Dgp):
def __init__(self, feature_dim, d_out, nb_gp_blocks=1, ratio_nrf_df=1, keep_prob=0.5, p_sigma2_d=0.01):
# Initialize for superclass
super(Dgp_Sorf_Optim, self).__init__(feature_dim=feature_dim, d_out=d_out, nb_gp_blocks=nb_gp_blocks, ratio_nrf_df=ratio_nrf_df, keep_prob=keep_prob)
# Set p_sigma2_d
self.p_sigma2_d = p_sigma2_d
# Define the initialized value d1_init, d2_init and d3_init
self.d1_init, self.d2_init, self.d3_init = self.create_init_value_d()
# Define variable d1, d2, d3
self.d1, self.d2, self.d3 = self.get_variable_d()
self.omegas = self.d1 + self.d2 + self.d3 + self.d1_init + self.d2_init + self.d3_init
def create_binary_scaling_vector(self, d):
r_u = tf.random_uniform([1, d], minval=0, maxval=1.0, dtype=tf.float32)
ones = tf.ones([1, d])
means = tf.multiply(0.5, ones)
B = tf.cast(tf.where(r_u > means, ones, tf.multiply(-1.0, ones)), tf.float32)
return B
# Define initialized value for variable d1, d2 and d3
def create_init_value_d(self):
d1 = [tf.Variable(self.create_binary_scaling_vector(self.d_omegas_out[i]), dtype=tf.float32, trainable=False) for i in range(self.nb_gp_blocks)]
d2 = [tf.Variable(self.create_binary_scaling_vector(self.d_omegas_out[i]), dtype=tf.float32, trainable=False) for i in range(self.nb_gp_blocks)]
d3 = [tf.Variable(self.create_binary_scaling_vector(self.d_omegas_out[i]), dtype=tf.float32, trainable=False) for i in range(self.nb_gp_blocks)]
return d1, d2, d3
# Define variable d1, d2 and d3
def get_variable_d(self):
d1 = [tf.Variable(self.d1_init[i], dtype=tf.float32) for i in range(self.nb_gp_blocks)]
d2 = [tf.Variable(self.d2_init[i], dtype=tf.float32) for i in range(self.nb_gp_blocks)]
d3 = [tf.Variable(self.d3_init[i], dtype=tf.float32) for i in range(self.nb_gp_blocks)]
return d1, d2, d3
def get_name(self):
return "dgpsorfoptimrelu" + str(self.nb_gp_blocks) + "nb_gp_blocks"
def get_omegas(self):
return self.omegas
def compute_layer_times_omega(self, x, id_nb_gp_blocks):
layer_times_omega = 1 / (tf.exp(self.log_theta_lengthscales[id_nb_gp_blocks]) * self.d_omegas_in[id_nb_gp_blocks]) \
* sorf_transform.sorf_transform(self.layers[id_nb_gp_blocks], self.d1[id_nb_gp_blocks], self.d2[id_nb_gp_blocks], self.d3[id_nb_gp_blocks])
return layer_times_omega
def get_regu_loss(self):
regu_loss = 0.0
for i in range(self.nb_gp_blocks):
regu_loss = regu_loss + tf.nn.l2_loss(tf.subtract(self.d1[i], self.d1_init[i])) / self.p_sigma2_d
regu_loss = regu_loss + tf.nn.l2_loss(tf.subtract(self.d2[i], self.d2_init[i])) / self.p_sigma2_d
regu_loss = regu_loss + tf.nn.l2_loss(tf.subtract(self.d3[i], self.d3_init[i])) / self.p_sigma2_d
regu_loss = regu_loss + self.keep_prob * tf.nn.l2_loss(self.w[i])
return regu_loss
| 46.766234 | 162 | 0.741461 | 2,835 | 0.787281 | 0 | 0 | 0 | 0 | 0 | 0 | 903 | 0.250764 |
ad2017893d41afb16c0e80e4020d3f6bd20849ba | 109 | py | Python | tests/__init__.py | antoinebourayne/sd2c | c76a0c56d5836caba9e6b90cdf7235516e2dd694 | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | antoinebourayne/sd2c | c76a0c56d5836caba9e6b90cdf7235516e2dd694 | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | antoinebourayne/sd2c | c76a0c56d5836caba9e6b90cdf7235516e2dd694 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Ceci est un module avec les tests unitaires et les tests d'intégrations.
"""
| 21.8 | 76 | 0.633028 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.981818 |
ad201c7e5400ff477533d2ab2495459d41d30028 | 7,534 | py | Python | Tutorial 2 - Data Navigation/PlugIns/experimental/scripts/MultiEELS.py | paradimdata/Cornell_EM_SummerSchool_2021 | 9f3583e1b85a9cdd86e1b91800027966d501ce96 | [
"MIT"
] | 8 | 2021-06-13T20:02:12.000Z | 2022-03-24T09:19:23.000Z | Tutorial 2 - Data Navigation/PlugIns/experimental/scripts/MultiEELS.py | paradimdata/Cornell_EM_SummerSchool_2021 | 9f3583e1b85a9cdd86e1b91800027966d501ce96 | [
"MIT"
] | null | null | null | Tutorial 2 - Data Navigation/PlugIns/experimental/scripts/MultiEELS.py | paradimdata/Cornell_EM_SummerSchool_2021 | 9f3583e1b85a9cdd86e1b91800027966d501ce96 | [
"MIT"
] | 1 | 2021-07-16T20:12:28.000Z | 2021-07-16T20:12:28.000Z | import numpy
import uuid
from nion.data import Calibration
from nion.data import DataAndMetadata
from nion.data import xdata_1_0 as xd
from nion.utils import Registry
def acquire_multi_eels(interactive, api):
# first grab the stem controller object by asking the Registry
stem_controller = Registry.get_component("stem_controller")
# establish the EELS camera object and stop it if it is playing
eels_camera = stem_controller.eels_camera
eels_camera.stop_playing()
print(eels_camera.hardware_source_id)
# this table represents the acquisitions to be performed
# each entry is energy offset, exposure (milliseconds), and the number of frames to integrate
table = [
# energy offset, exposure(ms), N frames
(0, 100, 2),
(10, 100, 2),
#(250, 1000, 10),
#(0, 100, 5),
]
# this is the list of integrated spectra that will be the result of this script
spectra = list()
# this algorithm handles dark subtraction specially - so dark subtraction and gain normalization should
# be disabled in the camera settings; this algorithm will handle dark subtraction itself.
do_dark = True
do_gain = False
print("start taking data")
energy_offset_control = "EELS_MagneticShift_Offset" # for hardware EELS
# energy_offset_control = "EELS_MagneticShift_Offset" # for simulator
tolerance_factor_from_nominal = 1.0
timeout_for_confirmation_ms = 3000
for energy_offset_ev, exposure_ms, frame_count in table:
# for each table entry, set the drift tube loss to the energy offset
stem_controller.SetValAndConfirm(energy_offset_control, energy_offset_ev, tolerance_factor_from_nominal, timeout_for_confirmation_ms)
# configure the camera to have the desired exposure
frame_parameters = eels_camera.get_current_frame_parameters()
frame_parameters["exposure_ms"] = exposure_ms
eels_camera.set_current_frame_parameters(frame_parameters)
# disable blanker
stem_controller.SetValAndConfirm("C_Blank", 0, tolerance_factor_from_nominal, timeout_for_confirmation_ms)
# acquire a sequence of images and discard it; this ensures a steady state
eels_camera.grab_sequence_prepare(frame_count)
eels_camera.grab_sequence(frame_count)
# acquire a sequence of images again, but now integrate the acquired images into a single image
eels_camera.grab_sequence_prepare(frame_count)
xdata = eels_camera.grab_sequence(frame_count)[0]
print(f"grabbed data of shape {xdata.data_shape}")
# extract the calibration info
counts_per_electron = xdata.metadata.get("hardware_source", dict()).get("counts_per_electron", 1)
exposure_ms = xdata.metadata.get("hardware_source", dict()).get("exposure", 1)
intensity_scale = xdata.intensity_calibration.scale / counts_per_electron / xdata.dimensional_calibrations[-1].scale / exposure_ms / frame_count
# now sum the data in the sequence/time dimension. use xd.sum to automatically handle metadata such as calibration.
xdata = xd.sum(xdata, 0)
# if dark subtraction is enabled, perform another similar acquisition with blanker enabled and subtract it
if do_dark:
# enable blanker
stem_controller.SetValAndConfirm("C_Blank", 1, tolerance_factor_from_nominal, timeout_for_confirmation_ms)
# acquire a sequence of images and discard it; this ensures a steady state
eels_camera.grab_sequence_prepare(frame_count)
eels_camera.grab_sequence(frame_count)
# acquire a sequence of images again, but now integrate the acquired images into a single image
eels_camera.grab_sequence_prepare(frame_count)
dark_xdata = eels_camera.grab_sequence(frame_count)[0]
# sum it and subtract it from xdata
dark_xdata = xd.sum(dark_xdata, 0)
xdata = xdata - dark_xdata
print(f"subtracted dark data of shape {dark_xdata.data_shape}")
if do_gain:
# divide out the gain
gain_uuid = uuid.uuid4() # fill this in with the actual gain image uuid
gain = interactive.document_controller.document_model.get_data_item_by_uuid(gain_uuid)
if gain is not None:
xdata = xdata / gain.xdata
# next sum the 2d data into a 1d spectrum by collapsing the y-axis (0th dimension)
# also configure the intensity calibration and title.
spectrum = xd.sum(xdata, 0)
spectrum.data_metadata._set_intensity_calibration(Calibration.Calibration(scale=intensity_scale, units="e/eV/s"))
spectrum.data_metadata._set_metadata({"title": f"{energy_offset_ev}eV {int(exposure_ms*1000)}ms [x{frame_count}]"})
# add it to the list of spectra
spectra.append(spectrum)
# disable blanking and return drift tube loss to 0.0eV
stem_controller.SetValAndConfirm("C_Blank", 0, tolerance_factor_from_nominal, timeout_for_confirmation_ms)
stem_controller.SetValAndConfirm(energy_offset_control, 0, tolerance_factor_from_nominal, timeout_for_confirmation_ms)
print("finished taking data")
# when multi display is available, we can combine the spectra into a single line plot display without
# padding the data; but for now, we need to use a single master data item where each row is the same length.
if len(spectra) > 0:
# define the padded spectra list
padded_spectra = list()
# extract calibration info
ev_per_channel = spectra[0].dimensional_calibrations[-1].scale
units = spectra[0].dimensional_calibrations[-1].units
min_ev = min([spectrum.dimensional_calibrations[-1].convert_to_calibrated_value(0) for spectrum in spectra])
max_ev = max([spectrum.dimensional_calibrations[-1].convert_to_calibrated_value(spectrum.data_shape[-1]) for spectrum in spectra])
# calculate what the length of the padded data will be
data_length = int((max_ev - min_ev) / ev_per_channel)
# for each spectra, pad it out to the appropriate length, putting the actual data in the proper range
for spectrum in spectra:
energy_offset_ev = int((spectrum.dimensional_calibrations[-1].convert_to_calibrated_value(0) - min_ev) / ev_per_channel)
calibration_factor = spectrum.intensity_calibration.scale / spectra[0].intensity_calibration.scale
data = numpy.zeros((data_length, ))
data[energy_offset_ev:energy_offset_ev + spectrum.data_shape[-1]] = spectrum.data * calibration_factor
padded_spectrum = DataAndMetadata.new_data_and_metadata(data, spectrum.intensity_calibration, [Calibration.Calibration(min_ev, ev_per_channel, units)])
padded_spectra.append(padded_spectrum)
# stack all of the padded data together for display
master_xdata = xd.vstack(padded_spectra)
# show the data
window = api.application.document_windows[0]
data_item = api.library.create_data_item_from_data_and_metadata(master_xdata)
legends = [s.metadata["title"] for s in spectra]
data_item.title = f"MultiEELS ({', '.join(legends)})"
window.display_data_item(data_item)
print("finished")
def script_main(api_broker):
interactive = api_broker.get_interactive(version="1")
interactive.print_debug = interactive.print
api = api_broker.get_api(version="~1.0")
acquire_multi_eels(interactive, api)
| 47.987261 | 163 | 0.719273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,685 | 0.356384 |
ad21ba75d05a89c78aac426e67b6209c152b8f74 | 3,571 | py | Python | torch_connectomics/data/augmentation/rotation.py | al093/pytorch_connectomics | 52821951233b061102380fc0d2521843652c580a | [
"MIT"
] | 2 | 2019-11-16T23:14:00.000Z | 2020-09-25T09:51:46.000Z | torch_connectomics/data/augmentation/rotation.py | al093/pytorch_connectomics | 52821951233b061102380fc0d2521843652c580a | [
"MIT"
] | 1 | 2020-09-22T08:49:04.000Z | 2020-09-22T08:49:04.000Z | torch_connectomics/data/augmentation/rotation.py | al093/pytorch_connectomics | 52821951233b061102380fc0d2521843652c580a | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from .augmentor import DataAugment
import math
class Rotate(DataAugment):
"""
Continuous rotatation.
The sample size for x- and y-axes should be at least sqrt(2) times larger
than the input size to make sure there is no non-valid region after center-crop.
Args:
p (float): probability of applying the augmentation
"""
def __init__(self, p=0.5):
super(Rotate, self).__init__(p=p)
self.image_interpolation = cv2.INTER_LINEAR
self.label_interpolation = cv2.INTER_NEAREST
self.border_mode = cv2.BORDER_CONSTANT
self.set_params()
def set_params(self):
self.sample_params['ratio'] = [1.0, 1.42, 1.42]
def rotate(self, imgs, M, interpolation):
height, width = imgs.shape[-2:]
if imgs.ndim == 4:
channels = imgs.shape[-4]
slices = imgs.shape[-3]
if imgs.ndim == 3:
channels = 1
slices = imgs.shape[-3]
transformedimgs = np.copy(imgs)
for z in range(slices):
if channels == 1:
img = transformedimgs[z, :, :]
dst = cv2.warpAffine(img, M, (height, width), 1.0, flags=interpolation, borderMode=self.border_mode)
transformedimgs[z, :, :] = dst
elif channels == 3:
img = transformedimgs[:, z, :, :]
img = np.moveaxis(img, 0, -1)
dst = cv2.warpAffine(img, M, (height, width), 1.0, flags=interpolation, borderMode=self.border_mode)
transformedimgs[:, z, :, :] = np.moveaxis(dst, -1, 0)
else:
raise Exception('Unknown number of channels in 2d slice')
return transformedimgs
def rotation_matrix(self, axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta degrees.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
theta = float(theta) * np.pi / 180.0
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def __call__(self, data, random_state=None):
if random_state is None:
random_state = np.random.RandomState()
image = data['image']
height, width = image.shape[-2:]
angle = random_state.rand()*360.0
M = cv2.getRotationMatrix2D((height/2, width/2), angle, 1)
output = {}
for key, val in data.items():
if key in ['label', 'skeleton', 'weight', 'context', 'skeleton_probability']:
output[key] = self.rotate(val, M, self.label_interpolation)
elif key == 'flux':
r_img = self.rotate(val, M, self.image_interpolation)
r_mat = self.rotation_matrix((1, 0, 0), angle)
r_field = np.matmul(r_mat, r_img.reshape((3, -1)))
output[key] = r_field.reshape(val.shape)
elif key == 'image':
output[key] = self.rotate(val, M, self.image_interpolation)
else:
raise TypeError('Input data key not identified, Key was: ' + key)
return output | 39.241758 | 116 | 0.541865 | 3,493 | 0.978157 | 0 | 0 | 0 | 0 | 0 | 0 | 581 | 0.1627 |
ad21e1a931641e8b434612db21482b74d41ff9af | 1,073 | py | Python | constants.py | LuisHernandez96/Pichon | 7c7a1da6a404eae216b919dc2140ee4ca6624901 | [
"MIT"
] | 1 | 2018-03-12T00:23:37.000Z | 2018-03-12T00:23:37.000Z | constants.py | LuisHernandez96/Pichon | 7c7a1da6a404eae216b919dc2140ee4ca6624901 | [
"MIT"
] | null | null | null | constants.py | LuisHernandez96/Pichon | 7c7a1da6a404eae216b919dc2140ee4ca6624901 | [
"MIT"
] | null | null | null | import re
# Used to access the DATA_TYPES dictionary
INT = "INT"
FLOAT = "FLOAT"
BOOLEAN = "BOOLEAN"
INT_LIST = "INT_LIST"
FLOAT_LIST = "FLOAT_LIST"
BOOLEAN_LIST = "BOOLEAN_LIST"
VOID = "VOID"
OBJECT = "OBJECT"
SEMANTIC_ERROR = 99
# Regular expressiones to match data types
REGEX_BOOLEAN = r'true|false'
regex_boolean = re.compile(REGEX_BOOLEAN)
REGEX_INT = r'[0-9][0-9]*'
regex_int = re.compile(REGEX_INT)
REGEX_FLOAT = r'[0-9]*[\.][0-9]+'
regex_float = re.compile(REGEX_FLOAT)
REGEX_OBJECT = r'cube|sphere'
regex_object = re.compile(REGEX_OBJECT)
# Data types as integers used during compilation
DATA_TYPES = {
INT : 0,
FLOAT : 1,
BOOLEAN : 3,
INT_LIST : 4,
FLOAT_LIST : 5,
BOOLEAN_LIST : 6,
VOID : 8,
OBJECT : 9
}
# Operators as integers used during compilation
OPERATORS = {
# Arithmetic
"+" : 0,
"-" : 1,
"/" : 2,
"*" : 3,
"=" : 4,
# Relational
"==" : 5,
"<" : 6,
">" : 7,
"<=" : 8,
">=" : 9,
"!=" : 10,
"||" : 11,
"&&" : 12,
# Unary
"!" : 13,
"~" : 14,
} | 17.590164 | 48 | 0.571295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 392 | 0.365331 |
ad229f171cc9684921e7ee20ce7549cff09359f6 | 204 | py | Python | server/app/__init__.py | mrchipzhou/simple-android-demo | 69b7f40924b8e62fab6cc2fcccb89f3e728e6ef4 | [
"MIT"
] | null | null | null | server/app/__init__.py | mrchipzhou/simple-android-demo | 69b7f40924b8e62fab6cc2fcccb89f3e728e6ef4 | [
"MIT"
] | null | null | null | server/app/__init__.py | mrchipzhou/simple-android-demo | 69b7f40924b8e62fab6cc2fcccb89f3e728e6ef4 | [
"MIT"
] | null | null | null | from flask import Flask
from . import user
from . import attendance
app = Flask(__name__)
app.register_blueprint(user.bp, url_prefix='/User')
app.register_blueprint(attendance.bp, url_prefix='/Attend')
| 22.666667 | 59 | 0.784314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.078431 |
ad2372f4683a8e3edf11b3a342a6152ebfa81c44 | 3,877 | py | Python | SimCalorimetry/EcalSelectiveReadoutProducers/python/ecalDigis_craft_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | SimCalorimetry/EcalSelectiveReadoutProducers/python/ecalDigis_craft_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | SimCalorimetry/EcalSelectiveReadoutProducers/python/ecalDigis_craft_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
simEcalDigis = cms.EDProducer("EcalSelectiveReadoutProducer",
# Label of input EB and EE digi collections
digiProducer = cms.string('simEcalUnsuppressedDigis'),
# Instance name of input EB digi collections
EBdigiCollection = cms.string(''),
# Instance name of input EB digi collections
EEdigiCollection = cms.string(''),
# Instance name of output EB SR flags collection
EBSrFlagCollection = cms.string('ebSrFlags'),
# Instance name of output EE SR flags collection
EESrFlagCollection = cms.string('eeSrFlags'),
# Instance name of output EB digis collection
EBSRPdigiCollection = cms.string('ebDigis'),
# Instance name of output EE digis collection
EESRPdigiCollection = cms.string('eeDigis'),
# Label name of input ECAL trigger primitive collection
trigPrimProducer = cms.string('simEcalTriggerPrimitiveDigis'),
# Instance name of ECAL trigger primitive collection
trigPrimCollection = cms.string(''),
# Neighbour eta range, neighborhood: (2*deltaEta+1)*(2*deltaPhi+1)
deltaEta = cms.int32(1),
# Neighbouring eta range, neighborhood: (2*deltaEta+1)*(2*deltaPhi+1)
deltaPhi = cms.int32(1),
# Index of time sample (staring from 1) the first DCC weights is implied
ecalDccZs1stSample = cms.int32(3),
# ADC to GeV conversion factor used in ZS filter for EB
ebDccAdcToGeV = cms.double(0.035),
# ADC to GeV conversion factor used in ZS filter for EE
eeDccAdcToGeV = cms.double(0.06),
#DCC ZS FIR weights.
#d-efault value set of DCC firmware used in CRUZET and CRAFT
dccNormalizedWeights = cms.vdouble(-1.1865, 0.0195, 0.2900, 0.3477, 0.3008,
0.2266),
# Switch to use a symetric zero suppression (cut on absolute value). For
# studies only, for time being it is not supported by the hardware.
symetricZS = cms.bool(False),
# ZS energy threshold in GeV to apply to low interest channels of barrel
srpBarrelLowInterestChannelZS = cms.double(3*.035),
# ZS energy threshold in GeV to apply to low interest channels of endcap
srpEndcapLowInterestChannelZS = cms.double(3*0.06),
# ZS energy threshold in GeV to apply to high interest channels of barrel
srpBarrelHighInterestChannelZS = cms.double(-1.e9),
# ZS energy threshold in GeV to apply to high interest channels of endcap
srpEndcapHighInterestChannelZS = cms.double(-1.e9),
#switch to run w/o trigger primitive. For debug use only
trigPrimBypass = cms.bool(False),
#for debug mode only:
trigPrimBypassLTH = cms.double(1.0),
#for debug mode only:
trigPrimBypassHTH = cms.double(1.0),
#for debug mode only
trigPrimBypassWithPeakFinder = cms.bool(True),
# Mode selection for "Trig bypass" mode
# 0: TT thresholds applied on sum of crystal Et's
# 1: TT thresholds applies on compressed Et from Trigger primitive
# @ee trigPrimByPass_ switch
trigPrimBypassMode = cms.int32(0),
#number of events whose TT and SR flags must be dumped (for debug purpose):
dumpFlags = cms.untracked.int32(0),
#logical flag to write out SrFlags
writeSrFlags = cms.untracked.bool(True),
#switch to apply selective readout decision on the digis and produce
#the "suppressed" digis
produceDigis = cms.untracked.bool(True),
#Trigger Tower Flag to use when a flag is not found from the input
#Trigger Primitive collection. Must be one of the following values:
# 0: low interest, 1: mid interest, 3: high interest
# 4: forced low interest, 5: forced mid interest, 7: forced high interest
defaultTtf_ = cms.int32(4),
# SR->action flag map
actions = cms.vint32(1, 3, 3, 3, 5, 7, 7, 7)
)
| 36.233645 | 79 | 0.685324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,161 | 0.55739 |
ad248d2fae3558935a3c28c5b44d8c20fef9db65 | 33,264 | py | Python | HDXer/methods.py | TMB-CSB/HDXer | f1e860427a0db2caccb37d630bc85de4a247c0cc | [
"BSD-3-Clause"
] | 3 | 2022-01-28T03:50:00.000Z | 2022-02-01T11:04:55.000Z | HDXer/methods.py | TMB-CSB/HDXer | f1e860427a0db2caccb37d630bc85de4a247c0cc | [
"BSD-3-Clause"
] | 1 | 2022-03-31T20:41:31.000Z | 2022-03-31T22:07:50.000Z | HDXer/methods.py | Lucy-Forrest-Lab/HDXer | 6ad1d73931f6a53922c3c960e6c3f67ebcbd7161 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Class for HDX trajectories, inherited from MDTraj
#
import mdtraj as md
import numpy as np
import os, glob, copy
from .dfpred import DfPredictor
from .errors import HDX_Error
from . import functions
class BV(DfPredictor):
"""Class for Best/Vendruscolo-style analysis. Subclass of DfPredictor.
Initialises with a dictionary of default parameters for analysis,
accessible as BV.params
Default parameters can either be updated directly in the BV.params
dictionary or by supplying a extra parameters as kwargs during
initialisation, e.g.: BV(cut_nc=1.0) or BV(**param_dict)
Additional method parameters (type, default value) that can be defined here:
hbond_method (str, 'contacts') : Method to calculate H-bonds (see help(BV.calc_hbonds) for options)
contact_method (str, 'cutoff') : Method to calculate contacts (see help(BV.calc_contacts) for options)
switch_method (str, 'rational_6_12') : Method for switching function if contact_method == 'switch'
switch_scale_Nc (float, 1.0) : scale (gradient) for contacts switching function if contact_method == 'switch'
switch_scale_Nh (float, 1.0) : scale (gradient) for H-bonds switching function if hbond_method == 'contacts' and contact_method == 'switch'
switch_width (float, 0.25) : Width in nm over which switching function is applied
cut_Nc (float, 0.65) : Cutoff in nm for calculating contacts
cut_Nh (float, 0.24 : Cutoff in nm for calculating H-bonds if hbond_method == 'contacts'
bh_dist (float, 0.25) : Cutoff in nm for calculating Baker-Hubbard H-bonds if hbond_method == 'bh'
bh_ang (float, 120.0) : Cutoff in degrees for calculating Baker-Hubbard H-bonds if hbond_method == 'bh'
betac (float, 0.35) : Value of beta_C for protection factor prediction
betah (float, 2.0) : Value of beta_H for protection factor prediction
Run a by-residue deuterated fraction prediction with these parameters
using the BV.run method."""
def __init__(self, **extra_params):
"""Initialises parameters for Best/Vendruscolo-style analysis.
See self.params for default values"""
# Initialise main parameters with defaults
bvparams = { 'hbond_method' : 'contacts',
'contact_method' : 'cutoff',
'switch_method' : 'rational_6_12',
'switch_scale_Nc' : 1.0,
'switch_scale_Nh' : 1.0,
'switch_width' : 0.25,
'cut_Nc' : 0.65,
'cut_Nh' : 0.24,
'bh_dist' : 0.25,
'bh_ang' : 120.0,
'betac' : 0.35,
'betah' : 2.0 }
bvparams.update(extra_params) # Update main parameter set from kwargs
super(BV, self).__init__(**bvparams)
def __str__(self):
"""Print the method name"""
return 'BestVendruscolo'
def __add__(self, other):
"""Sum results in other method object to this one, weighted by number of frames in each"""
if isinstance(other, BV):
new = copy.deepcopy(self)
try:
if np.array_equal(new.rates, other.rates):
new.pfs[:,0] = (self.n_frames * self.pfs[:,0]) + (other.n_frames * other.pfs[:,0])
# SD = sqrt((a^2 * var(A)) + (b^2 * var(B)))
new.pfs[:,1] = np.sqrt((self.n_frames**2 * self.pfs[:,1]**2) + (other.n_frames**2 * other.pfs[:,1]**2))
new.n_frames += other.n_frames
new.pfs[:,0] /= self.n_frames
# SD = sd(A)/a
new.pfs[:,1] /= self.n_frames
new.pf_byframe = np.concatenate((self.pf_byframe, other.pf_byframe), axis=1)
new.contacts = np.concatenate((self.contacts, other.contacts), axis=1)
new.hbonds = np.concatenate((self.hbonds, other.hbonds), axis=1)
# Same for log(protection factors)
new.lnpf_byframe = np.concatenate((self.lnpf_byframe, other.lnpf_byframe), axis=1)
new.lnpfs[:,0] = np.mean(new.lnpf_byframe, axis=1)
new.lnpfs[:,1] = np.std(new.lnpf_byframe, axis=1, ddof=1)
new.resfracs = new.dfrac(write=False)
return new
else:
raise HDX_Error("Cannot sum two method objects with different intrinsic rates.")
except AttributeError:
return self
else:
return self
def _calc_contacts_cutoff(self, qidx, cidx, cutoff):
"""Calculate contacts between 'query' and 'contact' atom selections
within a specified hard cutoff (in nm).
Periodicity is included in MDtraj function by default.
Usage: _calc_contacts_cutoff(qidx, cidx, cutoff).
Qidx and cidx are the atom index lists to search for contacts from
and to respectively (e.g. from amide NH to all heavy atoms).
Returns count of contacts for each frame in trajectory BV.t."""
try:
byframe_ctacts = md.compute_neighbors(self.t, cutoff, qidx, haystack_indices=cidx)
except TypeError:
# print("Now calculating contacts to single atom, idx %d" % qidx)
qidx = np.array([qidx])
byframe_ctacts = md.compute_neighbors(self.t, cutoff, qidx, haystack_indices=cidx)
return list(map(lambda x: len(x), byframe_ctacts))
def _calc_contacts_switch(self, qidx, cidx, cutoff, scale):
"""Calculate contacts between 'query' and 'contact' atom selections
within a specified cutoff (in nm), with contacts scaled by a
switching function beyond that cutoff.
Periodicity is included in MDtraj function by default.
Usage: _calc_contacts_switch(qidx, cidx, cutoff, scale).
Qidx and cidx are the atom index lists to search for contacts from
and to respectively (e.g. from amide NH to all heavy atoms).
Options for switching function are defined in BV.params. Current options:
'switch_method' [rational_6_12, sigmoid, exponential, gaussian] : form of switching function
'switch_scale_Nc' : Scaling of switching function for contacts (default 1.0), see functions.py for equations
'switch_scale_Nh' : Scaling of switching function for Hbonds (default 1.0), see functions.py for equations
'cut_Nc' : Center of contacts switching
'cut_Nh' : Center of Hbond switching
'switch_width' : Width of switching. r > cut_Nc + switch_width, count == 0.0 (not used in this version)
Returns count of contacts for each frame in trajectory BV.t."""
smethods = {
'rational_6_12' : functions.rational_6_12,
'sigmoid' : functions.sigmoid,
'exponential' : functions.exponential,
'gaussian' : functions.gaussian
}
do_switch = lambda x: smethods[self.params['switch_method']](x, scale, cutoff)
# Contacts will be the same for every frame - all heavys
highcut_ctacts = np.broadcast_to(cidx, (self.t.n_frames, len(cidx)))
pairs = np.insert(np.reshape(cidx,(len(cidx),1)), 0, qidx, axis=1)
totdists = md.compute_distances(self.t, pairs)
contact_count = np.sum(np.array(list((map(do_switch, totdists)))), axis=1)
return contact_count
### Old, does switching only between a low_cut and a high_cut, not everywhere
# smethods = {
# 'rational_6_12' : functions.rational_6_12,
# 'sigmoid' : functions.sigmoid,
# 'exponential' : functions.exponential,
# 'gaussian' : functions.gaussian
# }
# do_switch = lambda x: smethods[self.params['switch_method']](x, self.params['switch_scale'], self.params['cut_Nc'])
#
# # Get count within lowcut
# try:
# lowcut_ctacts = md.compute_neighbors(self.t, cutoff, qidx, haystack_indices=cidx)
# except TypeError:
# qidx = np.array([qidx])
# lowcut_ctacts = md.compute_neighbors(self.t, cutoff, qidx, haystack_indices=cidx)
#
# highcut_ctacts = md.compute_neighbors(self.t, cutoff + self.params['switch_width'], qidx, haystack_indices=cidx)
#
# # Calculate & add switching function value for contacts between lowcut and highcut.
# contact_count = np.asarray(map(lambda y: len(y), lowcut_ctacts))
# for frameidx, count, lowidxs, highidxs in zip(range(0, self.t.n_frames), contact_count, lowcut_ctacts, highcut_ctacts):
# betweenidxs = highidxs[np.in1d(highidxs, lowidxs)==False]
# pairs = np.insert(np.reshape(betweenidxs,(len(betweenidxs),1)), 0, qidx, axis=1) # Insert qidx before each contact to create 2D array of atom pairs
# currdists = md.compute_distances(self.t[frameidx], pairs)[0] ### TODO expensive because of multiple calls to compute_distances?
# count += np.sum(map(do_switch, currdists))
#
# return contact_count
def calc_contacts(self, qidx, cidx, cutoff, scale=None):
"""Calculate contacts between 'query' and 'contact' atom selections
using a given method defined in BV.params['contact_method'].
Current options:
'cutoff' : Use a hard cutoff for counting contacts, defined as BV.params['cut_Nc']
'switch' : Use a switching function for counting contacts.
r <= BV.params['cut_Nc'], count = 1
r > BV.params['cut_Nc'], 0 < switched_count < 1
Options for the switching function should be defined in the 'BV.params'
dictionary.
Qidx and cidx are the atom index lists to search for contacts from
and to respectively (e.g. from amide NH to all heavy atoms).
Returns count of contacts for each frame in trajectory BV.t."""
# Switch for contacts methods
cmethods = {
'cutoff' : self._calc_contacts_cutoff,
'switch' : self._calc_contacts_switch
}
if scale is not None:
return cmethods[self.params['contact_method']](qidx, cidx, cutoff, scale)
else:
return cmethods[self.params['contact_method']](qidx, cidx, cutoff)
def _calc_hbonds_contacts(self, HN):
"""Calculates number of protein H-bonds for a particular atom index
using the 'contacts' method. Bonds to all protein O* evaluated
by default, optionally all non-protein too (including waters) if
BV.params['protonly'] is False.
Usage: _calc_hbonds_contacts(atom)"""
if self.params['protonly']:
c = self.top.select("protein and symbol O")
else:
c = self.top.select("symbol O")
if self.params['contact_method'] == 'switch':
hbond_counts = self.calc_contacts(HN, c, self.params['cut_Nh'], self.params['switch_scale_Nh'])
else:
hbond_counts = self.calc_contacts(HN, c, self.params['cut_Nh'])
return hbond_counts
def _calc_hbonds_bh(self, HN, minfreq=0.0):
"""Calculates number of protein H-bonds for a particular atom index
using the 'Baker-Hubbard' method. Default donor-acceptor distance < 0.25 nm
+ angle > 120 degrees in BV.params.
Reports all H-bonds (minimum freq=0.0) by default. Bonds to all protein
O* evaluated by default, optionally all non-protein too
(including waters) if BV.params['protonly'] is False.
Usage: _calc_hbonds_bh(atom, [minfreq])
Returns: n_frames length array of H-bond counts for desired atom"""
# Atoms for H-bonds includes protein or all O* and single HN hydrogen
if self.params['protonly']:
c = self.t.atom_slice(self.top.select("(protein and symbol O) or index %s" % HN))
else:
c = self.t.atom_slice(self.top.select("symbol O or index %s" % HN))
# Call internal functions of md.baker_hubbard directly to return
# distances & angles, otherwise only bond_triplets averaged across
# a trajectory are returned
bond_triplets = md.geometry.hbond._get_bond_triplets(c.topology, exclude_water=self.params['protonly'])
mask, distances, angles = md.geometry.hbond._compute_bounded_geometry(c, bond_triplets,
self.params['bh_dist'], [1, 2], [0, 1, 2], freq=minfreq, periodic=True)
# can select distance/angle criteria here
try:
ang_rad = 2.0*np.pi / (360./self.params['bh_ang'])
except ZeroDivisionError:
self.params['bh_ang'] = 360.0
ang_rad = 2.0*np.pi / (360./self.params['bh_ang'])
hbond_counts = np.sum(np.logical_and(distances < self.params['bh_dist'], angles > ang_rad), axis=1)
return hbond_counts
def calc_hbonds(self, donors):
"""Calculates H-bond counts per frame for each atom in 'donors' array
to each acceptor atom in the system. H-bonds can be defined using
any one of the methods below, selected with BV.params['hbond_method']
Available methods:
'contacts' : Distance-based cutoff of 0.24 nm
'bh' : Baker-Hubbard distance ( < 0.25 nm) and angle ( > 120 deg) cutoff
Default cutoff/angle can be adjusted with entries 'cut_Nh'/'bh_dist'/
'bh_ang'in BV.params.
Usage: calc_hbonds(donors)
Returns: n_donors * n_frames 2D array of H-bond counts per frame for all donors"""
# Switch for H-bond methods
hmethods = {
'contacts' : self._calc_hbonds_contacts,
'bh' : self._calc_hbonds_bh
}
if self.params['skip_first']:
for firstres in [ c.residue(0) for c in self.top.chains ]:
seltxt = "(name H or name HN) and resid %s" % firstres.index
hn_idx = self.top.select(seltxt)
if hn_idx.shape == (0,): # Empty array, no HN in first residue
pass
else:
donors = donors[donors != hn_idx] # Remove matching atom from list
try:
total_counts = np.zeros((len(donors), self.t.n_frames))
except TypeError:
total_counts = np.zeros((1, self.t.n_frames))
for i, v in enumerate(donors):
total_counts[i] = hmethods[self.params['hbond_method']](v)
reslist = [ self.top.atom(a).residue.index for a in donors ]
# hbonds = np.concatenate((np.asarray([reslist]).reshape(len(reslist),1), total_counts), axis=1) # Array of [[ Res idx, Contact count ]]
return np.asarray(reslist), total_counts
def calc_nh_contacts(self, reslist):
"""Calculates contacts between each NH atom and the surrounding heavy atoms,
excluding those in residues n-2 to n+2.
By BV.params default contacts < 0.65 nm are calculated, and only
protein-heavys, are included, but can include all heavys if desired.
Also skips first residue (N-terminus) in a residue list by default too
- see BV.params['protonly'] and BV.params['skip_first']
Usage: calc_nh_contacts(reslist)
Returns: (reslist, n_res x n_frames 2D array of contacts per frame for each residue)"""
# Check if current atom is a heavy atom
is_heavy = lambda _: self.top.atom(_).element.symbol is not 'H'
if self.params['skip_first']:
for firstres in [ c.residue(0) for c in self.top.chains ]:
try:
reslist.remove(firstres.index) # Remove matching residue from list
except ValueError: # Empty array, no matching resid of first residue
pass
contact_count = np.zeros((len(reslist), self.t.n_frames))
for idx, res in enumerate(reslist):
robj = self.top.residue(res)
excl_idxs = range(robj.index - 2, robj.index + 3, 1) # Exclude n-2 to n+2 residues
inv_atms = functions.select_residxs(self.t, excl_idxs, protonly=self.params['protonly'], invert=True) # At this stage includes H + heavys
heavys = inv_atms[ np.array( [ is_heavy(i) for i in inv_atms ] ) ] # Filter out non-heavys
if self.params['contact_method'] == 'switch':
contact_count[idx] = self.calc_contacts(robj.atom('N').index, heavys, cutoff=self.params['cut_Nc'], scale=self.params['switch_scale_Nc'])
else:
contact_count[idx] = self.calc_contacts(robj.atom('N').index, heavys, cutoff=self.params['cut_Nc'])
# contacts = np.concatenate((np.asarray([reslist]).reshape(len(reslist),1), contact_count), axis=1) # Array of [[ Res idx, Contact count ]]
return np.asarray(reslist), contact_count
def PF(self):
"""Calculates Best & Vendruscolo protection factors for a provided trajectory.
Empirical scaling factors of Nh * betah and Nc * betac taken from
BV.params (2.0 & 0.35 respectively by default).
H-bonds can be calculated using either the 'contacts' definition or
the Baker-Hubbard distance + angle definition. Printout of temporary
files containing by-residue contacts can be enabled/disabled with
BV.params['save_detailed'].
All proline residues and the N-terminal residue are skipped. See
calc_hbonds and calc_nh_contacts for optional kwargs.
Usage: PF()
Returns: (array of residue indices,
array of mean protection factors & standard deviations thereof,
array of by-frame protection factors for each residue)"""
# Setup residue/atom lists
all_hn_atms = functions.extract_HN(self.t, log=self.params['logfile'])
prolines = functions.list_prolines(self.t, log=self.params['logfile'])
# Check all hn_atoms are from protein residues except prolines
if prolines is not None:
reslist = [ self.top.atom(a).residue.index for a in all_hn_atms if self.top.atom(a).residue.is_protein and self.top.atom(a).residue.index not in prolines[:,1] ]
prot_hn_atms = np.array([ a for a in all_hn_atms if self.top.atom(a).residue.is_protein and self.top.atom(a).residue.index not in prolines[:,1] ])
else:
reslist = [ self.top.atom(a).residue.index for a in all_hn_atms if self.top.atom(a).residue.is_protein ]
prot_hn_atms = np.array([ a for a in all_hn_atms if self.top.atom(a).residue.is_protein ])
# Calc Nc/Nh
hres, n_hbonds = self.calc_hbonds(prot_hn_atms)
cres, n_contacts = self.calc_nh_contacts(reslist)
if not np.array_equal(hres, cres):
raise HDX_Error("The residues analysed for Nc and Nh appear to be different. Check your inputs!")
# Option to save outputs
if self.params['contact_method'] == 'switch':
outfmt = '%10.8e'
else:
outfmt = '%d'
if self.params['save_detailed']:
for i, residx in enumerate(hres):
with open("Hbonds_chain_%d_res_%d.tmp" % (self.top.residue(residx).chain.index, self.top.residue(residx).resSeq), 'ab') as hbond_f:
np.savetxt(hbond_f, n_hbonds[i], fmt=outfmt) # Use residue indices internally, print out IDs
for i, residx in enumerate(cres):
with open("Contacts_chain_%d_res_%d.tmp" % (self.top.residue(residx).chain.index, self.top.residue(residx).resSeq), 'ab') as contacts_f:
np.savetxt(contacts_f, n_contacts[i], fmt=outfmt) # Use residue indices internally, print out IDs
# Calc PF with phenomenological equation
hbonds = n_hbonds * self.params['betah'] # Beta parameter 1
contacts = n_contacts * self.params['betac'] # Beta parameter 2
pf_byframe = np.exp(hbonds + contacts)
pf_bar = np.mean(pf_byframe, axis=1)
pf_bar = np.stack((pf_bar, np.std(pf_byframe, axis=1, ddof=1)), axis=1)
rids = np.asarray([ self.top.residue(i).resSeq for i in hres ])
rids = np.reshape(rids, (len(rids), 1))
# Save PFs to separate log file, appending filenames for trajectories read as chunks
if os.path.exists(self.params['outprefix']+"Protection_factors.dat"):
filenum = len(glob.glob(self.params['outprefix']+"Protection_factors*"))
np.savetxt(self.params['outprefix']+"Protection_factors_chunk_%d.dat" % (filenum+1),
np.concatenate((rids, pf_bar), axis=1), fmt=['%7d', '%18.8f', '%18.8f'],
header="ResID Protection factor Std. Dev.") # Use residue indices internally, print out IDs
else:
np.savetxt(self.params['outprefix']+"Protection_factors.dat", np.concatenate((rids, pf_bar), axis=1),
fmt=['%7d', '%18.8f', '%18.8f'], header="ResID Protection factor Std. Dev.") # Use residue indices internally, print out IDs
# Do same for ln(Pf)
lnpf_byframe = hbonds + contacts
lnpf_bar = np.mean(lnpf_byframe, axis=1)
lnpf_bar = np.stack((lnpf_bar, np.std(lnpf_byframe, axis=1, ddof=1)), axis=1)
# Save PFs to separate log file, appending filenames for trajectories read as chunks
if os.path.exists(self.params['outprefix']+"logProtection_factors.dat"):
filenum = len(glob.glob(self.params['outprefix']+"logProtection_factors*"))
np.savetxt(self.params['outprefix']+"logProtection_factors_chunk_%d.dat" % (filenum+1),
np.concatenate((rids, lnpf_bar), axis=1), fmt=['%7d', '%18.8f', '%18.8f'],
header="ResID ln(Protection factor) Std. Dev.") # Use residue indices internally, print out IDs
else:
np.savetxt(self.params['outprefix']+"logProtection_factors.dat", np.concatenate((rids, lnpf_bar), axis=1),
fmt=['%7d', '%18.8f', '%18.8f'], header="ResID ln(Protection factor) Std. Dev.") # Use residue indices internally, print out IDs
return hres, n_contacts, n_hbonds, pf_bar, pf_byframe, lnpf_bar, lnpf_byframe
@functions.cacheobj()
def run(self, trajectory):
"""Runs a by-residue HDX prediction for the provided MDTraj trajectory
Usage: run(traj)
Returns: None (results are stored as BV.resfracs)"""
self.t = trajectory # Note this will add attributes to the original trajectory, not a copy
self.n_frames = self.t.n_frames
self.top = trajectory.topology.copy() # This does not add attributes to the original topology
self.assign_cis_proline()
self.assign_disulfide()
self.assign_his_protonation()
self.assign_termini()
self.reslist, self.contacts, self.hbonds, self.pfs, self.pf_byframe, self.lnpfs, self.lnpf_byframe = self.PF()
self.rates = self.kint()
self.resfracs = self.dfrac()
print("Residue predictions complete")
return self # Required for consistency with pickle
### Add further classes for methods below here
class PH(DfPredictor):
"""Class for Persson-Halle style analysis. PNAS, 2015, 112(33), 10383-10388.
Subclass of DfPredictor. Initialises with a dictionary of default
parameters for analysis, accessible as PH.params
Default parameters can either be updated directly in the PH.params
dictionary or by supplying a extra parameters as kwargs during
initialisation, e.g.: PH() or PH(**param_dict)
Additional method parameters (type, default value) that can be defined here:
cut_O (float, 0.26) : Cutoff in nm for calculating protein-water contacts
contact_method (str, 'cutoff') : Method for calculating protein-water contacts
switch_method (str, 'rational_6_12') : Method for switching function if contact_method == 'switch'
switch_scale (float, 1.0) : scale (gradient) for switching function if contact_method == 'switch'
switch_width (float, 0.25) : Width in nm over which switching function is applied
Run a by-residue deuterated fraction prediction with these parameters
using the PH.run method."""
def __init__(self, **extra_params):
"""Initialise parameters for Persson-Halle-style analysis.
See self.params for default values"""
# Initialise main parameters with defaults
phparams = { 'cut_O' : 0.26,
'contact_method' : 'cutoff',
'switch_method' : 'rational_6_12',
'switch_scale' : 1.0,
'switch_width' : 0.25, }
phparams.update(extra_params) # Update main parameter set from kwargs
super(PH, self).__init__(**phparams)
def __str__(self):
"""Print the method name"""
return 'Persson-Halle'
def __add__(self, other):
"""Sum results in other method object to this one, weighted by number of frames in each"""
if isinstance(other, PH):
new = copy.deepcopy(self)
try:
if np.array_equal(new.rates, other.rates):
new.n_frames += other.n_frames
new.watcontacts = np.concatenate((self.watcontacts, other.watcontacts), axis=1)
new.pf_byframe = np.concatenate((self.pf_byframe, other.pf_byframe), axis=1)
new.PF(update_only=True)
new.resfracs = new.dfrac(write=False)
return new
else:
raise HDX_Error("Cannot sum two method objects with different intrinsic rates.")
except AttributeError:
return self
else:
return self
def calc_wat_contacts(self, hn_atms):
"""Calculate contacts for each amide and frame in the trajectory
using a given method defined in PH.params['contact_method'].
Current options:
'cutoff' : Use a hard cutoff for counting contacts, defined as PH.params['cut_O']
'switch' : Use a switching function for counting contacts.
r <= PH.params['cut_O'], count = 1
r > PH.params['cut_O'], 0 < switched_count < 1
Options for the switching function should be defined in the 'PH.params'
dictionary.
hn_atms are the amide H atoms to search for contacts
Returns count of contacts for each frame in trajectory PH.t."""
# Switch for contacts methods
cmethods = {
'cutoff' : self._calc_wat_contacts_cutoff,
'switch' : self._calc_wat_contacts_switch
}
return cmethods[self.params['contact_method']](hn_atms)
def _calc_wat_contacts_cutoff(self, hn_atms):
"""Calculate water contacts for each frame and residue in the trajectory
using a hard distance cutoff"""
solidxs = self.top.select("water and element O")
if self.params['skip_first']:
hn_atms = hn_atms[1:]
reslist = [ self.top.atom(i).residue.index for i in hn_atms ]
contacts = np.zeros((len(reslist), self.t.n_frames))
for idx, hn in enumerate(hn_atms):
contacts[idx] = np.array(list(map(len, md.compute_neighbors(self.t, self.params['cut_O'],
np.asarray([hn]), haystack_indices=solidxs))))
if self.params['save_detailed']:
with open("Waters_chain_%d_res_%d.tmp" % (self.top.atom(hn).residue.chain.index, self.top.atom(hn).residue.resSeq), 'ab') as wat_f:
np.savetxt(wat_f, contacts[idx], fmt='%d')
return np.asarray(reslist), contacts
def _calc_wat_contacts_switch(self, hn_atms):
"""Calculate water contacts for each frame and residue in the trajectory
using a switching function with parameters defined in the PH.params dictionary"""
smethods = {
'rational_6_12' : functions.rational_6_12,
'sigmoid' : functions.sigmoid,
'exponential' : functions.exponential,
'gaussian' : functions.gaussian
}
do_switch = lambda x: smethods[self.params['switch_method']](x, self.params['switch_scale'], self.params['cut_O'])
solidxs = self.top.select("water and element O")
if self.params['skip_first']:
hn_atms = hn_atms[1:]
reslist = [ self.top.atom(i).residue.index for i in hn_atms ]
contacts = np.zeros((len(reslist), self.t.n_frames))
for idx, hn in enumerate(hn_atms):
# Get count within lowcut
lowcut_ctacts = md.compute_neighbors(self.t, self.params['cut_O'], np.asarray([hn]), haystack_indices=solidxs)
highcut_ctacts = md.compute_neighbors(self.t, self.params['cut_O'] + self.params['switch_width'], np.asarray([hn]), haystack_indices=solidxs)
contact_count = np.asarray(map(lambda y: len(y), lowcut_ctacts))
pairs = self.t.top.select_pairs(np.array([hn]), solidxs)
fulldists = md.compute_distances(self.t, pairs)
new_contact_count = []
for frameidx, count, lowidxs, highidxs in zip(range(0, self.t.n_frames), contact_count, lowcut_ctacts, highcut_ctacts):
betweenidxs = highidxs[np.in1d(highidxs, lowidxs) == False]
# pairs = np.insert(np.reshape(betweenidxs,(len(betweenidxs),1)), 0, np.asarray([hn]), axis=1) # Insert hn before each contact to create 2D array of atom pairs
currdists = fulldists[frameidx][np.where(np.in1d(pairs[:,1], betweenidxs))[0]]
count += np.sum(map(do_switch, currdists))
new_contact_count.append(count)
contacts[idx] = np.array(new_contact_count)
if self.params['save_detailed']:
with open("Waters_%d.tmp" % self.top.atom(hn).residue.resSeq, 'ab') as wat_f:
np.savetxt(wat_f, contacts[idx], fmt='%10.8f')
return np.asarray(reslist), contacts
def PF(self, update_only=False):
if not update_only:
hn_atms = functions.extract_HN(self.t, log=self.params['logfile'])
prolines = functions.list_prolines(self.t, log=self.params['logfile'])
# Check all hn_atoms are from protein residues except prolines
if prolines is not None:
protlist = np.asarray([ self.top.atom(a).residue.index for a in hn_atms if self.top.atom(a).residue.is_protein and self.top.atom(a).residue.index not in prolines[:,1] ])
else:
protlist = np.asarray([ self.top.atom(a).residue.index for a in hn_atms if self.top.atom(a).residue.is_protein ])
self.reslist, self.watcontacts = self.calc_wat_contacts(hn_atms)
if self.params['skip_first']:
if not np.array_equal(protlist[1:], self.reslist):
raise HDX_Error("One or more residues analysed for water contacts is either proline or a non-protein residue. Check your inputs!")
else:
if not np.array_equal(protlist, self.reslist):
raise HDX_Error("One or more residues analysed for water contacts is either proline or a non-protein residue. Check your inputs!")
# Update/calculation of PF
opencount, closedcount = np.sum(self.watcontacts >= 2, axis=1), np.sum(self.watcontacts < 2, axis=1)
with np.errstate(divide='ignore'):
self.pfs = closedcount/opencount # Ignores divide by zero
# self.pfs[np.isinf(self.pfs)] = self.n_frames
if not update_only:
self.pf_byframe = np.repeat(np.atleast_2d(self.pfs).T, self.n_frames, axis=1)
self.pfs = np.stack((self.pfs, np.zeros(len(self.watcontacts))), axis=1)
@functions.cacheobj()
def run(self, trajectory):
"""Runs a by-residue HDX prediction for the provided MDTraj trajectory
Usage: run(traj)
Returns: None (results are stored as PH.resfracs)"""
self.t = trajectory # Note this will add attributes to the original trajectory, not a copy
self.n_frames = self.t.n_frames
self.top = trajectory.topology.copy() # This does not add attributes to the original topology
self.assign_cis_proline()
self.assign_disulfide()
self.assign_his_protonation()
self.assign_termini()
self.PF()
self.rates = self.kint()
self.resfracs = self.dfrac()
print("Residue predictions complete")
return self # Required for consistency with pickle
| 52.21978 | 185 | 0.616432 | 32,987 | 0.991673 | 0 | 0 | 1,722 | 0.051768 | 0 | 0 | 16,909 | 0.508327 |
ad24ff76711efc4ad761466ca54751008e0bd6ce | 1,176 | py | Python | datasource/interface.py | YAmikep/datasource | 6c8d72bd299aa0a9e2880228f0f39d2b8721b146 | [
"MIT"
] | 1 | 2018-06-16T11:33:56.000Z | 2018-06-16T11:33:56.000Z | datasource/interface.py | YAmikep/datasource | 6c8d72bd299aa0a9e2880228f0f39d2b8721b146 | [
"MIT"
] | 1 | 2020-03-24T17:32:45.000Z | 2020-03-24T17:32:45.000Z | datasource/interface.py | YAmikep/datasource | 6c8d72bd299aa0a9e2880228f0f39d2b8721b146 | [
"MIT"
] | 2 | 2018-06-16T11:37:34.000Z | 2020-07-30T17:56:54.000Z | MAX_MEMORY = 5 * 1024 * 2 ** 10 # 5 MB
BUFFER_SIZE = 1 * 512 * 2 ** 10 # 512 KB
class DataSourceInterface(object):
"""Provides a uniform API regardless of how the data should be fetched."""
def __init__(self, target, preload=False, **kwargs):
raise NotImplementedError()
@property
def is_loaded(self):
raise NotImplementedError()
def load(self):
"""
Loads the data if not already loaded.
"""
raise NotImplementedError()
def size(self, force_load=False):
"""
Returns the size of the data.
If the datasource has not loaded the data yet (see preload argument in constructor), the size is by default equal to 0.
Set force_load to True if you want to trigger data loading if not done yet.
:param boolean force_load: if set to True will force data loading if not done yet.
"""
raise NotImplementedError()
def get_reader(self):
"""
Returns an independent reader (with the read and seek methods).
The data will be automatically loaded if not done yet.
"""
raise NotImplementedError()
| 30.153846 | 127 | 0.62415 | 1,092 | 0.928571 | 0 | 0 | 70 | 0.059524 | 0 | 0 | 667 | 0.567177 |
ad2588d5d21fa5a0ab0624f48b923896aa205c21 | 214 | py | Python | python/mock_patch/test_topathch.py | amitsaha/playground | 82cb5ac02ac90d3fa858a5153b0a5705187c14ce | [
"Unlicense"
] | 4 | 2018-04-14T16:28:39.000Z | 2021-11-14T12:08:02.000Z | python/mock_patch/test_topathch.py | amitsaha/playground | 82cb5ac02ac90d3fa858a5153b0a5705187c14ce | [
"Unlicense"
] | 3 | 2022-02-14T10:38:51.000Z | 2022-02-27T16:01:16.000Z | python/mock_patch/test_topathch.py | amitsaha/playground | 82cb5ac02ac90d3fa858a5153b0a5705187c14ce | [
"Unlicense"
] | 4 | 2015-07-07T01:01:27.000Z | 2019-04-12T05:38:26.000Z | from mock import patch
@patch('topatch.afunction')
class TestToPatch():
def test_afunction(self, mock_afunction):
mock_afunction('foo', 'bar')
mock_afunction.assert_any_call('foo', 'bar')
| 23.777778 | 52 | 0.682243 | 161 | 0.752336 | 0 | 0 | 189 | 0.883178 | 0 | 0 | 39 | 0.182243 |
ad276315f893a288238d66dae8d8e08290828c3c | 85,143 | py | Python | pyjsg/parser/jsgParser.py | hsolbrig/pyjsg | 5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429 | [
"CC0-1.0"
] | 3 | 2017-07-23T11:11:23.000Z | 2020-11-30T15:36:51.000Z | pyjsg/parser/jsgParser.py | hsolbrig/pyjsg | 5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429 | [
"CC0-1.0"
] | 15 | 2018-01-05T17:18:34.000Z | 2021-12-13T17:40:25.000Z | try2/lib/python3.9/site-packages/pyjsg/parser/jsgParser.py | diatomsRcool/eco-kg | 4251f42ca2ab353838a39b640cb97593db76d4f4 | [
"BSD-3-Clause"
] | 1 | 2021-01-18T10:32:56.000Z | 2021-01-18T10:32:56.000Z | # Generated from jsgParser.g4 by ANTLR 4.9
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\'")
buf.write("\u0143\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\3\2\5\2F\n\2\3\2\7\2I\n")
buf.write("\2\f\2\16\2L\13\2\3\2\7\2O\n\2\f\2\16\2R\13\2\3\2\5\2")
buf.write("U\n\2\3\2\3\2\3\3\3\3\3\3\5\3\\\n\3\3\3\3\3\3\4\3\4\6")
buf.write("\4b\n\4\r\4\16\4c\3\5\3\5\7\5h\n\5\f\5\16\5k\13\5\3\5")
buf.write("\3\5\3\6\3\6\3\6\3\6\5\6s\n\6\3\7\3\7\3\7\3\b\3\b\5\b")
buf.write("z\n\b\3\b\3\b\3\b\5\b\177\n\b\3\b\3\b\3\b\5\b\u0084\n")
buf.write("\b\3\b\3\b\5\b\u0088\n\b\3\t\3\t\6\t\u008c\n\t\r\t\16")
buf.write("\t\u008d\3\t\3\t\7\t\u0092\n\t\f\t\16\t\u0095\13\t\3\t")
buf.write("\3\t\5\t\u0099\n\t\5\t\u009b\n\t\3\n\7\n\u009e\n\n\f\n")
buf.write("\16\n\u00a1\13\n\3\13\3\13\5\13\u00a5\n\13\3\f\3\f\3\r")
buf.write("\3\r\3\r\3\r\5\r\u00ad\n\r\3\r\3\r\5\r\u00b1\n\r\3\r\3")
buf.write("\r\6\r\u00b5\n\r\r\r\16\r\u00b6\3\r\3\r\3\r\3\r\5\r\u00bd")
buf.write("\n\r\5\r\u00bf\n\r\3\16\3\16\3\17\3\17\3\17\3\20\3\20")
buf.write("\3\20\3\20\7\20\u00ca\n\20\f\20\16\20\u00cd\13\20\3\20")
buf.write("\5\20\u00d0\n\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3")
buf.write("\22\3\22\3\22\3\22\3\22\7\22\u00de\n\22\f\22\16\22\u00e1")
buf.write("\13\22\3\22\3\22\3\23\3\23\3\24\3\24\5\24\u00e9\n\24\3")
buf.write("\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\5\25\u00f4")
buf.write("\n\25\3\26\3\26\3\26\6\26\u00f9\n\26\r\26\16\26\u00fa")
buf.write("\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\5\30\u0106")
buf.write("\n\30\5\30\u0108\n\30\3\30\5\30\u010b\n\30\3\31\3\31\7")
buf.write("\31\u010f\n\31\f\31\16\31\u0112\13\31\3\32\3\32\3\32\3")
buf.write("\32\3\32\3\33\3\33\5\33\u011b\n\33\3\34\3\34\3\34\7\34")
buf.write("\u0120\n\34\f\34\16\34\u0123\13\34\3\35\3\35\5\35\u0127")
buf.write("\n\35\3\36\6\36\u012a\n\36\r\36\16\36\u012b\3\37\3\37")
buf.write("\5\37\u0130\n\37\3\37\3\37\5\37\u0134\n\37\5\37\u0136")
buf.write("\n\37\3 \3 \3 \3 \3!\3!\3!\5!\u013f\n!\3\"\3\"\3\"\2\2")
buf.write("#\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62")
buf.write("\64\668:<>@B\2\7\4\2\3\3\7\7\3\2\4\5\4\2\7\7\f\22\4\2")
buf.write("\6\6\32\32\4\2\5\5$$\2\u0154\2E\3\2\2\2\4X\3\2\2\2\6_")
buf.write("\3\2\2\2\be\3\2\2\2\nr\3\2\2\2\ft\3\2\2\2\16\u0087\3\2")
buf.write("\2\2\20\u009a\3\2\2\2\22\u009f\3\2\2\2\24\u00a2\3\2\2")
buf.write("\2\26\u00a6\3\2\2\2\30\u00be\3\2\2\2\32\u00c0\3\2\2\2")
buf.write("\34\u00c2\3\2\2\2\36\u00c5\3\2\2\2 \u00d3\3\2\2\2\"\u00d8")
buf.write("\3\2\2\2$\u00e4\3\2\2\2&\u00e8\3\2\2\2(\u00f3\3\2\2\2")
buf.write("*\u00f5\3\2\2\2,\u00fc\3\2\2\2.\u010a\3\2\2\2\60\u010c")
buf.write("\3\2\2\2\62\u0113\3\2\2\2\64\u0118\3\2\2\2\66\u011c\3")
buf.write("\2\2\28\u0126\3\2\2\2:\u0129\3\2\2\2<\u0135\3\2\2\2>\u0137")
buf.write("\3\2\2\2@\u013e\3\2\2\2B\u0140\3\2\2\2DF\5\4\3\2ED\3\2")
buf.write("\2\2EF\3\2\2\2FJ\3\2\2\2GI\5\b\5\2HG\3\2\2\2IL\3\2\2\2")
buf.write("JH\3\2\2\2JK\3\2\2\2KP\3\2\2\2LJ\3\2\2\2MO\5\n\6\2NM\3")
buf.write("\2\2\2OR\3\2\2\2PN\3\2\2\2PQ\3\2\2\2QT\3\2\2\2RP\3\2\2")
buf.write("\2SU\5\60\31\2TS\3\2\2\2TU\3\2\2\2UV\3\2\2\2VW\7\2\2\3")
buf.write("W\3\3\2\2\2XY\7\t\2\2Y[\5\32\16\2Z\\\5\6\4\2[Z\3\2\2\2")
buf.write("[\\\3\2\2\2\\]\3\2\2\2]^\7\25\2\2^\5\3\2\2\2_a\7\26\2")
buf.write("\2`b\5,\27\2a`\3\2\2\2bc\3\2\2\2ca\3\2\2\2cd\3\2\2\2d")
buf.write("\7\3\2\2\2ei\7\n\2\2fh\5\32\16\2gf\3\2\2\2hk\3\2\2\2i")
buf.write("g\3\2\2\2ij\3\2\2\2jl\3\2\2\2ki\3\2\2\2lm\7\25\2\2m\t")
buf.write("\3\2\2\2ns\5\f\7\2os\5\34\17\2ps\5 \21\2qs\5\"\22\2rn")
buf.write("\3\2\2\2ro\3\2\2\2rp\3\2\2\2rq\3\2\2\2s\13\3\2\2\2tu\7")
buf.write("\4\2\2uv\5\16\b\2v\r\3\2\2\2wy\7\27\2\2xz\5\20\t\2yx\3")
buf.write("\2\2\2yz\3\2\2\2z{\3\2\2\2{\u0088\7\30\2\2|~\7\27\2\2")
buf.write("}\177\t\2\2\2~}\3\2\2\2~\177\3\2\2\2\177\u0080\3\2\2\2")
buf.write("\u0080\u0081\7\13\2\2\u0081\u0083\5&\24\2\u0082\u0084")
buf.write("\5.\30\2\u0083\u0082\3\2\2\2\u0083\u0084\3\2\2\2\u0084")
buf.write("\u0085\3\2\2\2\u0085\u0086\7\30\2\2\u0086\u0088\3\2\2")
buf.write("\2\u0087w\3\2\2\2\u0087|\3\2\2\2\u0088\17\3\2\2\2\u0089")
buf.write("\u009b\7\31\2\2\u008a\u008c\5\24\13\2\u008b\u008a\3\2")
buf.write("\2\2\u008c\u008d\3\2\2\2\u008d\u008b\3\2\2\2\u008d\u008e")
buf.write("\3\2\2\2\u008e\u0093\3\2\2\2\u008f\u0090\7\37\2\2\u0090")
buf.write("\u0092\5\22\n\2\u0091\u008f\3\2\2\2\u0092\u0095\3\2\2")
buf.write("\2\u0093\u0091\3\2\2\2\u0093\u0094\3\2\2\2\u0094\u0098")
buf.write("\3\2\2\2\u0095\u0093\3\2\2\2\u0096\u0097\7\37\2\2\u0097")
buf.write("\u0099\5\26\f\2\u0098\u0096\3\2\2\2\u0098\u0099\3\2\2")
buf.write("\2\u0099\u009b\3\2\2\2\u009a\u0089\3\2\2\2\u009a\u008b")
buf.write("\3\2\2\2\u009b\21\3\2\2\2\u009c\u009e\5\24\13\2\u009d")
buf.write("\u009c\3\2\2\2\u009e\u00a1\3\2\2\2\u009f\u009d\3\2\2\2")
buf.write("\u009f\u00a0\3\2\2\2\u00a0\23\3\2\2\2\u00a1\u009f\3\2")
buf.write("\2\2\u00a2\u00a4\5\30\r\2\u00a3\u00a5\7\31\2\2\u00a4\u00a3")
buf.write("\3\2\2\2\u00a4\u00a5\3\2\2\2\u00a5\25\3\2\2\2\u00a6\u00a7")
buf.write("\7\31\2\2\u00a7\27\3\2\2\2\u00a8\u00a9\5\32\16\2\u00a9")
buf.write("\u00aa\7 \2\2\u00aa\u00ac\5&\24\2\u00ab\u00ad\5.\30\2")
buf.write("\u00ac\u00ab\3\2\2\2\u00ac\u00ad\3\2\2\2\u00ad\u00bf\3")
buf.write("\2\2\2\u00ae\u00b0\5,\27\2\u00af\u00b1\5.\30\2\u00b0\u00af")
buf.write("\3\2\2\2\u00b0\u00b1\3\2\2\2\u00b1\u00bf\3\2\2\2\u00b2")
buf.write("\u00b4\7\35\2\2\u00b3\u00b5\5\32\16\2\u00b4\u00b3\3\2")
buf.write("\2\2\u00b5\u00b6\3\2\2\2\u00b6\u00b4\3\2\2\2\u00b6\u00b7")
buf.write("\3\2\2\2\u00b7\u00b8\3\2\2\2\u00b8\u00b9\7\36\2\2\u00b9")
buf.write("\u00ba\7 \2\2\u00ba\u00bc\5&\24\2\u00bb\u00bd\5.\30\2")
buf.write("\u00bc\u00bb\3\2\2\2\u00bc\u00bd\3\2\2\2\u00bd\u00bf\3")
buf.write("\2\2\2\u00be\u00a8\3\2\2\2\u00be\u00ae\3\2\2\2\u00be\u00b2")
buf.write("\3\2\2\2\u00bf\31\3\2\2\2\u00c0\u00c1\t\3\2\2\u00c1\33")
buf.write("\3\2\2\2\u00c2\u00c3\7\4\2\2\u00c3\u00c4\5\36\20\2\u00c4")
buf.write("\35\3\2\2\2\u00c5\u00c6\7\23\2\2\u00c6\u00cb\5&\24\2\u00c7")
buf.write("\u00c8\7\37\2\2\u00c8\u00ca\5&\24\2\u00c9\u00c7\3\2\2")
buf.write("\2\u00ca\u00cd\3\2\2\2\u00cb\u00c9\3\2\2\2\u00cb\u00cc")
buf.write("\3\2\2\2\u00cc\u00cf\3\2\2\2\u00cd\u00cb\3\2\2\2\u00ce")
buf.write("\u00d0\5.\30\2\u00cf\u00ce\3\2\2\2\u00cf\u00d0\3\2\2\2")
buf.write("\u00d0\u00d1\3\2\2\2\u00d1\u00d2\7\24\2\2\u00d2\37\3\2")
buf.write("\2\2\u00d3\u00d4\7\4\2\2\u00d4\u00d5\7!\2\2\u00d5\u00d6")
buf.write("\5\20\t\2\u00d6\u00d7\7\25\2\2\u00d7!\3\2\2\2\u00d8\u00d9")
buf.write("\7\4\2\2\u00d9\u00da\7!\2\2\u00da\u00df\5(\25\2\u00db")
buf.write("\u00dc\7\37\2\2\u00dc\u00de\5(\25\2\u00dd\u00db\3\2\2")
buf.write("\2\u00de\u00e1\3\2\2\2\u00df\u00dd\3\2\2\2\u00df\u00e0")
buf.write("\3\2\2\2\u00e0\u00e2\3\2\2\2\u00e1\u00df\3\2\2\2\u00e2")
buf.write("\u00e3\7\25\2\2\u00e3#\3\2\2\2\u00e4\u00e5\t\4\2\2\u00e5")
buf.write("%\3\2\2\2\u00e6\u00e9\5,\27\2\u00e7\u00e9\5(\25\2\u00e8")
buf.write("\u00e6\3\2\2\2\u00e8\u00e7\3\2\2\2\u00e9\'\3\2\2\2\u00ea")
buf.write("\u00f4\7\3\2\2\u00eb\u00f4\7\5\2\2\u00ec\u00f4\5$\23\2")
buf.write("\u00ed\u00f4\5\16\b\2\u00ee\u00f4\5\36\20\2\u00ef\u00f0")
buf.write("\7\35\2\2\u00f0\u00f1\5*\26\2\u00f1\u00f2\7\36\2\2\u00f2")
buf.write("\u00f4\3\2\2\2\u00f3\u00ea\3\2\2\2\u00f3\u00eb\3\2\2\2")
buf.write("\u00f3\u00ec\3\2\2\2\u00f3\u00ed\3\2\2\2\u00f3\u00ee\3")
buf.write("\2\2\2\u00f3\u00ef\3\2\2\2\u00f4)\3\2\2\2\u00f5\u00f8")
buf.write("\5&\24\2\u00f6\u00f7\7\37\2\2\u00f7\u00f9\5&\24\2\u00f8")
buf.write("\u00f6\3\2\2\2\u00f9\u00fa\3\2\2\2\u00fa\u00f8\3\2\2\2")
buf.write("\u00fa\u00fb\3\2\2\2\u00fb+\3\2\2\2\u00fc\u00fd\7\4\2")
buf.write("\2\u00fd-\3\2\2\2\u00fe\u010b\7\33\2\2\u00ff\u010b\7\32")
buf.write("\2\2\u0100\u010b\7\34\2\2\u0101\u0102\7\27\2\2\u0102\u0107")
buf.write("\7\6\2\2\u0103\u0105\7\31\2\2\u0104\u0106\t\5\2\2\u0105")
buf.write("\u0104\3\2\2\2\u0105\u0106\3\2\2\2\u0106\u0108\3\2\2\2")
buf.write("\u0107\u0103\3\2\2\2\u0107\u0108\3\2\2\2\u0108\u0109\3")
buf.write("\2\2\2\u0109\u010b\7\30\2\2\u010a\u00fe\3\2\2\2\u010a")
buf.write("\u00ff\3\2\2\2\u010a\u0100\3\2\2\2\u010a\u0101\3\2\2\2")
buf.write("\u010b/\3\2\2\2\u010c\u0110\7\b\2\2\u010d\u010f\5\62\32")
buf.write("\2\u010e\u010d\3\2\2\2\u010f\u0112\3\2\2\2\u0110\u010e")
buf.write("\3\2\2\2\u0110\u0111\3\2\2\2\u0111\61\3\2\2\2\u0112\u0110")
buf.write("\3\2\2\2\u0113\u0114\7$\2\2\u0114\u0115\7 \2\2\u0115\u0116")
buf.write("\5\64\33\2\u0116\u0117\7\25\2\2\u0117\63\3\2\2\2\u0118")
buf.write("\u011a\5\66\34\2\u0119\u011b\5$\23\2\u011a\u0119\3\2\2")
buf.write("\2\u011a\u011b\3\2\2\2\u011b\65\3\2\2\2\u011c\u0121\5")
buf.write("8\35\2\u011d\u011e\7\37\2\2\u011e\u0120\58\35\2\u011f")
buf.write("\u011d\3\2\2\2\u0120\u0123\3\2\2\2\u0121\u011f\3\2\2\2")
buf.write("\u0121\u0122\3\2\2\2\u0122\67\3\2\2\2\u0123\u0121\3\2")
buf.write("\2\2\u0124\u0127\5:\36\2\u0125\u0127\3\2\2\2\u0126\u0124")
buf.write("\3\2\2\2\u0126\u0125\3\2\2\2\u01279\3\2\2\2\u0128\u012a")
buf.write("\5<\37\2\u0129\u0128\3\2\2\2\u012a\u012b\3\2\2\2\u012b")
buf.write("\u0129\3\2\2\2\u012b\u012c\3\2\2\2\u012c;\3\2\2\2\u012d")
buf.write("\u012f\5@!\2\u012e\u0130\5.\30\2\u012f\u012e\3\2\2\2\u012f")
buf.write("\u0130\3\2\2\2\u0130\u0136\3\2\2\2\u0131\u0133\5> \2\u0132")
buf.write("\u0134\5.\30\2\u0133\u0132\3\2\2\2\u0133\u0134\3\2\2\2")
buf.write("\u0134\u0136\3\2\2\2\u0135\u012d\3\2\2\2\u0135\u0131\3")
buf.write("\2\2\2\u0136=\3\2\2\2\u0137\u0138\7\35\2\2\u0138\u0139")
buf.write("\5\66\34\2\u0139\u013a\7\36\2\2\u013a?\3\2\2\2\u013b\u013f")
buf.write("\5B\"\2\u013c\u013f\7%\2\2\u013d\u013f\7\7\2\2\u013e\u013b")
buf.write("\3\2\2\2\u013e\u013c\3\2\2\2\u013e\u013d\3\2\2\2\u013f")
buf.write("A\3\2\2\2\u0140\u0141\t\6\2\2\u0141C\3\2\2\2+EJPT[cir")
buf.write("y~\u0083\u0087\u008d\u0093\u0098\u009a\u009f\u00a4\u00ac")
buf.write("\u00b0\u00b6\u00bc\u00be\u00cb\u00cf\u00df\u00e8\u00f3")
buf.write("\u00fa\u0105\u0107\u010a\u0110\u011a\u0121\u0126\u012b")
buf.write("\u012f\u0133\u0135\u013e")
return buf.getvalue()
class jsgParser ( Parser ):
grammarFileName = "jsgParser.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "'@terminals'", "'.TYPE'",
"'.IGNORE'", "'->'", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"'['", "']'", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "'='" ]
symbolicNames = [ "<INVALID>", "LEXER_ID_REF", "ID", "STRING", "INT",
"ANY", "TERMINALS", "TYPE", "IGNORE", "MAPSTO", "JSON_STRING",
"JSON_NUMBER", "JSON_INT", "JSON_BOOL", "JSON_NULL",
"JSON_ARRAY", "JSON_OBJECT", "OBRACKET", "CBRACKET",
"SEMI", "DASH", "OBRACE", "CBRACE", "COMMA", "STAR",
"QMARK", "PLUS", "OPREN", "CPREN", "BAR", "COLON",
"EQUALS", "PASS", "COMMENT", "LEXER_ID", "LEXER_CHAR_SET",
"LEXER_PASS", "LEXER_COMMENT" ]
RULE_doc = 0
RULE_typeDirective = 1
RULE_typeExceptions = 2
RULE_ignoreDirective = 3
RULE_grammarElt = 4
RULE_objectDef = 5
RULE_objectExpr = 6
RULE_membersDef = 7
RULE_altMemberDef = 8
RULE_member = 9
RULE_lastComma = 10
RULE_pairDef = 11
RULE_name = 12
RULE_arrayDef = 13
RULE_arrayExpr = 14
RULE_objectMacro = 15
RULE_valueTypeMacro = 16
RULE_builtinValueType = 17
RULE_valueType = 18
RULE_nonRefValueType = 19
RULE_typeAlternatives = 20
RULE_idref = 21
RULE_ebnfSuffix = 22
RULE_lexerRules = 23
RULE_lexerRuleSpec = 24
RULE_lexerRuleBlock = 25
RULE_lexerAltList = 26
RULE_lexerAlt = 27
RULE_lexerElements = 28
RULE_lexerElement = 29
RULE_lexerBlock = 30
RULE_lexerAtom = 31
RULE_lexerTerminal = 32
ruleNames = [ "doc", "typeDirective", "typeExceptions", "ignoreDirective",
"grammarElt", "objectDef", "objectExpr", "membersDef",
"altMemberDef", "member", "lastComma", "pairDef", "name",
"arrayDef", "arrayExpr", "objectMacro", "valueTypeMacro",
"builtinValueType", "valueType", "nonRefValueType", "typeAlternatives",
"idref", "ebnfSuffix", "lexerRules", "lexerRuleSpec",
"lexerRuleBlock", "lexerAltList", "lexerAlt", "lexerElements",
"lexerElement", "lexerBlock", "lexerAtom", "lexerTerminal" ]
EOF = Token.EOF
LEXER_ID_REF=1
ID=2
STRING=3
INT=4
ANY=5
TERMINALS=6
TYPE=7
IGNORE=8
MAPSTO=9
JSON_STRING=10
JSON_NUMBER=11
JSON_INT=12
JSON_BOOL=13
JSON_NULL=14
JSON_ARRAY=15
JSON_OBJECT=16
OBRACKET=17
CBRACKET=18
SEMI=19
DASH=20
OBRACE=21
CBRACE=22
COMMA=23
STAR=24
QMARK=25
PLUS=26
OPREN=27
CPREN=28
BAR=29
COLON=30
EQUALS=31
PASS=32
COMMENT=33
LEXER_ID=34
LEXER_CHAR_SET=35
LEXER_PASS=36
LEXER_COMMENT=37
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class DocContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EOF(self):
return self.getToken(jsgParser.EOF, 0)
def typeDirective(self):
return self.getTypedRuleContext(jsgParser.TypeDirectiveContext,0)
def ignoreDirective(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.IgnoreDirectiveContext)
else:
return self.getTypedRuleContext(jsgParser.IgnoreDirectiveContext,i)
def grammarElt(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.GrammarEltContext)
else:
return self.getTypedRuleContext(jsgParser.GrammarEltContext,i)
def lexerRules(self):
return self.getTypedRuleContext(jsgParser.LexerRulesContext,0)
def getRuleIndex(self):
return jsgParser.RULE_doc
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDoc" ):
return visitor.visitDoc(self)
else:
return visitor.visitChildren(self)
def doc(self):
localctx = jsgParser.DocContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_doc)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 67
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.TYPE:
self.state = 66
self.typeDirective()
self.state = 72
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.IGNORE:
self.state = 69
self.ignoreDirective()
self.state = 74
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 78
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.ID:
self.state = 75
self.grammarElt()
self.state = 80
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 82
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.TERMINALS:
self.state = 81
self.lexerRules()
self.state = 84
self.match(jsgParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeDirectiveContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TYPE(self):
return self.getToken(jsgParser.TYPE, 0)
def name(self):
return self.getTypedRuleContext(jsgParser.NameContext,0)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def typeExceptions(self):
return self.getTypedRuleContext(jsgParser.TypeExceptionsContext,0)
def getRuleIndex(self):
return jsgParser.RULE_typeDirective
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeDirective" ):
return visitor.visitTypeDirective(self)
else:
return visitor.visitChildren(self)
def typeDirective(self):
localctx = jsgParser.TypeDirectiveContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_typeDirective)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 86
self.match(jsgParser.TYPE)
self.state = 87
self.name()
self.state = 89
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.DASH:
self.state = 88
self.typeExceptions()
self.state = 91
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeExceptionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def DASH(self):
return self.getToken(jsgParser.DASH, 0)
def idref(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.IdrefContext)
else:
return self.getTypedRuleContext(jsgParser.IdrefContext,i)
def getRuleIndex(self):
return jsgParser.RULE_typeExceptions
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeExceptions" ):
return visitor.visitTypeExceptions(self)
else:
return visitor.visitChildren(self)
def typeExceptions(self):
localctx = jsgParser.TypeExceptionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_typeExceptions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 93
self.match(jsgParser.DASH)
self.state = 95
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 94
self.idref()
self.state = 97
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==jsgParser.ID):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IgnoreDirectiveContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IGNORE(self):
return self.getToken(jsgParser.IGNORE, 0)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def name(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.NameContext)
else:
return self.getTypedRuleContext(jsgParser.NameContext,i)
def getRuleIndex(self):
return jsgParser.RULE_ignoreDirective
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIgnoreDirective" ):
return visitor.visitIgnoreDirective(self)
else:
return visitor.visitChildren(self)
def ignoreDirective(self):
localctx = jsgParser.IgnoreDirectiveContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_ignoreDirective)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 99
self.match(jsgParser.IGNORE)
self.state = 103
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.ID or _la==jsgParser.STRING:
self.state = 100
self.name()
self.state = 105
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 106
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GrammarEltContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def objectDef(self):
return self.getTypedRuleContext(jsgParser.ObjectDefContext,0)
def arrayDef(self):
return self.getTypedRuleContext(jsgParser.ArrayDefContext,0)
def objectMacro(self):
return self.getTypedRuleContext(jsgParser.ObjectMacroContext,0)
def valueTypeMacro(self):
return self.getTypedRuleContext(jsgParser.ValueTypeMacroContext,0)
def getRuleIndex(self):
return jsgParser.RULE_grammarElt
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrammarElt" ):
return visitor.visitGrammarElt(self)
else:
return visitor.visitChildren(self)
def grammarElt(self):
localctx = jsgParser.GrammarEltContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_grammarElt)
try:
self.state = 112
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 108
self.objectDef()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 109
self.arrayDef()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 110
self.objectMacro()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 111
self.valueTypeMacro()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObjectDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def objectExpr(self):
return self.getTypedRuleContext(jsgParser.ObjectExprContext,0)
def getRuleIndex(self):
return jsgParser.RULE_objectDef
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitObjectDef" ):
return visitor.visitObjectDef(self)
else:
return visitor.visitChildren(self)
def objectDef(self):
localctx = jsgParser.ObjectDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_objectDef)
try:
self.enterOuterAlt(localctx, 1)
self.state = 114
self.match(jsgParser.ID)
self.state = 115
self.objectExpr()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObjectExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OBRACE(self):
return self.getToken(jsgParser.OBRACE, 0)
def CBRACE(self):
return self.getToken(jsgParser.CBRACE, 0)
def membersDef(self):
return self.getTypedRuleContext(jsgParser.MembersDefContext,0)
def MAPSTO(self):
return self.getToken(jsgParser.MAPSTO, 0)
def valueType(self):
return self.getTypedRuleContext(jsgParser.ValueTypeContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(jsgParser.EbnfSuffixContext,0)
def LEXER_ID_REF(self):
return self.getToken(jsgParser.LEXER_ID_REF, 0)
def ANY(self):
return self.getToken(jsgParser.ANY, 0)
def getRuleIndex(self):
return jsgParser.RULE_objectExpr
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitObjectExpr" ):
return visitor.visitObjectExpr(self)
else:
return visitor.visitChildren(self)
def objectExpr(self):
localctx = jsgParser.ObjectExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_objectExpr)
self._la = 0 # Token type
try:
self.state = 133
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,11,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 117
self.match(jsgParser.OBRACE)
self.state = 119
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ID) | (1 << jsgParser.STRING) | (1 << jsgParser.COMMA) | (1 << jsgParser.OPREN))) != 0):
self.state = 118
self.membersDef()
self.state = 121
self.match(jsgParser.CBRACE)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 122
self.match(jsgParser.OBRACE)
self.state = 124
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.LEXER_ID_REF or _la==jsgParser.ANY:
self.state = 123
_la = self._input.LA(1)
if not(_la==jsgParser.LEXER_ID_REF or _la==jsgParser.ANY):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 126
self.match(jsgParser.MAPSTO)
self.state = 127
self.valueType()
self.state = 129
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 128
self.ebnfSuffix()
self.state = 131
self.match(jsgParser.CBRACE)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MembersDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def COMMA(self):
return self.getToken(jsgParser.COMMA, 0)
def member(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.MemberContext)
else:
return self.getTypedRuleContext(jsgParser.MemberContext,i)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def altMemberDef(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.AltMemberDefContext)
else:
return self.getTypedRuleContext(jsgParser.AltMemberDefContext,i)
def lastComma(self):
return self.getTypedRuleContext(jsgParser.LastCommaContext,0)
def getRuleIndex(self):
return jsgParser.RULE_membersDef
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMembersDef" ):
return visitor.visitMembersDef(self)
else:
return visitor.visitChildren(self)
def membersDef(self):
localctx = jsgParser.MembersDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_membersDef)
self._la = 0 # Token type
try:
self.state = 152
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.COMMA]:
self.enterOuterAlt(localctx, 1)
self.state = 135
self.match(jsgParser.COMMA)
pass
elif token in [jsgParser.ID, jsgParser.STRING, jsgParser.OPREN]:
self.enterOuterAlt(localctx, 2)
self.state = 137
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 136
self.member()
self.state = 139
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ID) | (1 << jsgParser.STRING) | (1 << jsgParser.OPREN))) != 0)):
break
self.state = 145
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 141
self.match(jsgParser.BAR)
self.state = 142
self.altMemberDef()
self.state = 147
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
self.state = 150
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.BAR:
self.state = 148
self.match(jsgParser.BAR)
self.state = 149
self.lastComma()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AltMemberDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def member(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.MemberContext)
else:
return self.getTypedRuleContext(jsgParser.MemberContext,i)
def getRuleIndex(self):
return jsgParser.RULE_altMemberDef
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAltMemberDef" ):
return visitor.visitAltMemberDef(self)
else:
return visitor.visitChildren(self)
def altMemberDef(self):
localctx = jsgParser.AltMemberDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_altMemberDef)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 157
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ID) | (1 << jsgParser.STRING) | (1 << jsgParser.OPREN))) != 0):
self.state = 154
self.member()
self.state = 159
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MemberContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def pairDef(self):
return self.getTypedRuleContext(jsgParser.PairDefContext,0)
def COMMA(self):
return self.getToken(jsgParser.COMMA, 0)
def getRuleIndex(self):
return jsgParser.RULE_member
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMember" ):
return visitor.visitMember(self)
else:
return visitor.visitChildren(self)
def member(self):
localctx = jsgParser.MemberContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_member)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 160
self.pairDef()
self.state = 162
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.COMMA:
self.state = 161
self.match(jsgParser.COMMA)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LastCommaContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def COMMA(self):
return self.getToken(jsgParser.COMMA, 0)
def getRuleIndex(self):
return jsgParser.RULE_lastComma
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLastComma" ):
return visitor.visitLastComma(self)
else:
return visitor.visitChildren(self)
def lastComma(self):
localctx = jsgParser.LastCommaContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_lastComma)
try:
self.enterOuterAlt(localctx, 1)
self.state = 164
self.match(jsgParser.COMMA)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PairDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def name(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.NameContext)
else:
return self.getTypedRuleContext(jsgParser.NameContext,i)
def COLON(self):
return self.getToken(jsgParser.COLON, 0)
def valueType(self):
return self.getTypedRuleContext(jsgParser.ValueTypeContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(jsgParser.EbnfSuffixContext,0)
def idref(self):
return self.getTypedRuleContext(jsgParser.IdrefContext,0)
def OPREN(self):
return self.getToken(jsgParser.OPREN, 0)
def CPREN(self):
return self.getToken(jsgParser.CPREN, 0)
def getRuleIndex(self):
return jsgParser.RULE_pairDef
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPairDef" ):
return visitor.visitPairDef(self)
else:
return visitor.visitChildren(self)
def pairDef(self):
localctx = jsgParser.PairDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_pairDef)
self._la = 0 # Token type
try:
self.state = 188
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,22,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 166
self.name()
self.state = 167
self.match(jsgParser.COLON)
self.state = 168
self.valueType()
self.state = 170
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 169
self.ebnfSuffix()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 172
self.idref()
self.state = 174
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 173
self.ebnfSuffix()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 176
self.match(jsgParser.OPREN)
self.state = 178
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 177
self.name()
self.state = 180
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==jsgParser.ID or _la==jsgParser.STRING):
break
self.state = 182
self.match(jsgParser.CPREN)
self.state = 183
self.match(jsgParser.COLON)
self.state = 184
self.valueType()
self.state = 186
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 185
self.ebnfSuffix()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def STRING(self):
return self.getToken(jsgParser.STRING, 0)
def getRuleIndex(self):
return jsgParser.RULE_name
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitName" ):
return visitor.visitName(self)
else:
return visitor.visitChildren(self)
def name(self):
localctx = jsgParser.NameContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_name)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 190
_la = self._input.LA(1)
if not(_la==jsgParser.ID or _la==jsgParser.STRING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArrayDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def arrayExpr(self):
return self.getTypedRuleContext(jsgParser.ArrayExprContext,0)
def getRuleIndex(self):
return jsgParser.RULE_arrayDef
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArrayDef" ):
return visitor.visitArrayDef(self)
else:
return visitor.visitChildren(self)
def arrayDef(self):
localctx = jsgParser.ArrayDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_arrayDef)
try:
self.enterOuterAlt(localctx, 1)
self.state = 192
self.match(jsgParser.ID)
self.state = 193
self.arrayExpr()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArrayExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OBRACKET(self):
return self.getToken(jsgParser.OBRACKET, 0)
def valueType(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.ValueTypeContext)
else:
return self.getTypedRuleContext(jsgParser.ValueTypeContext,i)
def CBRACKET(self):
return self.getToken(jsgParser.CBRACKET, 0)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def ebnfSuffix(self):
return self.getTypedRuleContext(jsgParser.EbnfSuffixContext,0)
def getRuleIndex(self):
return jsgParser.RULE_arrayExpr
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArrayExpr" ):
return visitor.visitArrayExpr(self)
else:
return visitor.visitChildren(self)
def arrayExpr(self):
localctx = jsgParser.ArrayExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_arrayExpr)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 195
self.match(jsgParser.OBRACKET)
self.state = 196
self.valueType()
self.state = 201
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.BAR:
self.state = 197
self.match(jsgParser.BAR)
self.state = 198
self.valueType()
self.state = 203
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 205
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 204
self.ebnfSuffix()
self.state = 207
self.match(jsgParser.CBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObjectMacroContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def EQUALS(self):
return self.getToken(jsgParser.EQUALS, 0)
def membersDef(self):
return self.getTypedRuleContext(jsgParser.MembersDefContext,0)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def getRuleIndex(self):
return jsgParser.RULE_objectMacro
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitObjectMacro" ):
return visitor.visitObjectMacro(self)
else:
return visitor.visitChildren(self)
def objectMacro(self):
localctx = jsgParser.ObjectMacroContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_objectMacro)
try:
self.enterOuterAlt(localctx, 1)
self.state = 209
self.match(jsgParser.ID)
self.state = 210
self.match(jsgParser.EQUALS)
self.state = 211
self.membersDef()
self.state = 212
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ValueTypeMacroContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def EQUALS(self):
return self.getToken(jsgParser.EQUALS, 0)
def nonRefValueType(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.NonRefValueTypeContext)
else:
return self.getTypedRuleContext(jsgParser.NonRefValueTypeContext,i)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def getRuleIndex(self):
return jsgParser.RULE_valueTypeMacro
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitValueTypeMacro" ):
return visitor.visitValueTypeMacro(self)
else:
return visitor.visitChildren(self)
def valueTypeMacro(self):
localctx = jsgParser.ValueTypeMacroContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_valueTypeMacro)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 214
self.match(jsgParser.ID)
self.state = 215
self.match(jsgParser.EQUALS)
self.state = 216
self.nonRefValueType()
self.state = 221
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.BAR:
self.state = 217
self.match(jsgParser.BAR)
self.state = 218
self.nonRefValueType()
self.state = 223
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 224
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BuiltinValueTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def JSON_STRING(self):
return self.getToken(jsgParser.JSON_STRING, 0)
def JSON_NUMBER(self):
return self.getToken(jsgParser.JSON_NUMBER, 0)
def JSON_INT(self):
return self.getToken(jsgParser.JSON_INT, 0)
def JSON_BOOL(self):
return self.getToken(jsgParser.JSON_BOOL, 0)
def JSON_NULL(self):
return self.getToken(jsgParser.JSON_NULL, 0)
def JSON_ARRAY(self):
return self.getToken(jsgParser.JSON_ARRAY, 0)
def JSON_OBJECT(self):
return self.getToken(jsgParser.JSON_OBJECT, 0)
def ANY(self):
return self.getToken(jsgParser.ANY, 0)
def getRuleIndex(self):
return jsgParser.RULE_builtinValueType
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBuiltinValueType" ):
return visitor.visitBuiltinValueType(self)
else:
return visitor.visitChildren(self)
def builtinValueType(self):
localctx = jsgParser.BuiltinValueTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_builtinValueType)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 226
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ANY) | (1 << jsgParser.JSON_STRING) | (1 << jsgParser.JSON_NUMBER) | (1 << jsgParser.JSON_INT) | (1 << jsgParser.JSON_BOOL) | (1 << jsgParser.JSON_NULL) | (1 << jsgParser.JSON_ARRAY) | (1 << jsgParser.JSON_OBJECT))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ValueTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def idref(self):
return self.getTypedRuleContext(jsgParser.IdrefContext,0)
def nonRefValueType(self):
return self.getTypedRuleContext(jsgParser.NonRefValueTypeContext,0)
def getRuleIndex(self):
return jsgParser.RULE_valueType
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitValueType" ):
return visitor.visitValueType(self)
else:
return visitor.visitChildren(self)
def valueType(self):
localctx = jsgParser.ValueTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_valueType)
try:
self.state = 230
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.ID]:
self.enterOuterAlt(localctx, 1)
self.state = 228
self.idref()
pass
elif token in [jsgParser.LEXER_ID_REF, jsgParser.STRING, jsgParser.ANY, jsgParser.JSON_STRING, jsgParser.JSON_NUMBER, jsgParser.JSON_INT, jsgParser.JSON_BOOL, jsgParser.JSON_NULL, jsgParser.JSON_ARRAY, jsgParser.JSON_OBJECT, jsgParser.OBRACKET, jsgParser.OBRACE, jsgParser.OPREN]:
self.enterOuterAlt(localctx, 2)
self.state = 229
self.nonRefValueType()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NonRefValueTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER_ID_REF(self):
return self.getToken(jsgParser.LEXER_ID_REF, 0)
def STRING(self):
return self.getToken(jsgParser.STRING, 0)
def builtinValueType(self):
return self.getTypedRuleContext(jsgParser.BuiltinValueTypeContext,0)
def objectExpr(self):
return self.getTypedRuleContext(jsgParser.ObjectExprContext,0)
def arrayExpr(self):
return self.getTypedRuleContext(jsgParser.ArrayExprContext,0)
def OPREN(self):
return self.getToken(jsgParser.OPREN, 0)
def typeAlternatives(self):
return self.getTypedRuleContext(jsgParser.TypeAlternativesContext,0)
def CPREN(self):
return self.getToken(jsgParser.CPREN, 0)
def getRuleIndex(self):
return jsgParser.RULE_nonRefValueType
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNonRefValueType" ):
return visitor.visitNonRefValueType(self)
else:
return visitor.visitChildren(self)
def nonRefValueType(self):
localctx = jsgParser.NonRefValueTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_nonRefValueType)
try:
self.state = 241
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.LEXER_ID_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 232
self.match(jsgParser.LEXER_ID_REF)
pass
elif token in [jsgParser.STRING]:
self.enterOuterAlt(localctx, 2)
self.state = 233
self.match(jsgParser.STRING)
pass
elif token in [jsgParser.ANY, jsgParser.JSON_STRING, jsgParser.JSON_NUMBER, jsgParser.JSON_INT, jsgParser.JSON_BOOL, jsgParser.JSON_NULL, jsgParser.JSON_ARRAY, jsgParser.JSON_OBJECT]:
self.enterOuterAlt(localctx, 3)
self.state = 234
self.builtinValueType()
pass
elif token in [jsgParser.OBRACE]:
self.enterOuterAlt(localctx, 4)
self.state = 235
self.objectExpr()
pass
elif token in [jsgParser.OBRACKET]:
self.enterOuterAlt(localctx, 5)
self.state = 236
self.arrayExpr()
pass
elif token in [jsgParser.OPREN]:
self.enterOuterAlt(localctx, 6)
self.state = 237
self.match(jsgParser.OPREN)
self.state = 238
self.typeAlternatives()
self.state = 239
self.match(jsgParser.CPREN)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeAlternativesContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def valueType(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.ValueTypeContext)
else:
return self.getTypedRuleContext(jsgParser.ValueTypeContext,i)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def getRuleIndex(self):
return jsgParser.RULE_typeAlternatives
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeAlternatives" ):
return visitor.visitTypeAlternatives(self)
else:
return visitor.visitChildren(self)
def typeAlternatives(self):
localctx = jsgParser.TypeAlternativesContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_typeAlternatives)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 243
self.valueType()
self.state = 246
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 244
self.match(jsgParser.BAR)
self.state = 245
self.valueType()
self.state = 248
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==jsgParser.BAR):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdrefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def getRuleIndex(self):
return jsgParser.RULE_idref
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdref" ):
return visitor.visitIdref(self)
else:
return visitor.visitChildren(self)
def idref(self):
localctx = jsgParser.IdrefContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_idref)
try:
self.enterOuterAlt(localctx, 1)
self.state = 250
self.match(jsgParser.ID)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EbnfSuffixContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def QMARK(self):
return self.getToken(jsgParser.QMARK, 0)
def STAR(self):
return self.getToken(jsgParser.STAR, 0)
def PLUS(self):
return self.getToken(jsgParser.PLUS, 0)
def OBRACE(self):
return self.getToken(jsgParser.OBRACE, 0)
def INT(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.INT)
else:
return self.getToken(jsgParser.INT, i)
def CBRACE(self):
return self.getToken(jsgParser.CBRACE, 0)
def COMMA(self):
return self.getToken(jsgParser.COMMA, 0)
def getRuleIndex(self):
return jsgParser.RULE_ebnfSuffix
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEbnfSuffix" ):
return visitor.visitEbnfSuffix(self)
else:
return visitor.visitChildren(self)
def ebnfSuffix(self):
localctx = jsgParser.EbnfSuffixContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_ebnfSuffix)
self._la = 0 # Token type
try:
self.state = 264
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.QMARK]:
self.enterOuterAlt(localctx, 1)
self.state = 252
self.match(jsgParser.QMARK)
pass
elif token in [jsgParser.STAR]:
self.enterOuterAlt(localctx, 2)
self.state = 253
self.match(jsgParser.STAR)
pass
elif token in [jsgParser.PLUS]:
self.enterOuterAlt(localctx, 3)
self.state = 254
self.match(jsgParser.PLUS)
pass
elif token in [jsgParser.OBRACE]:
self.enterOuterAlt(localctx, 4)
self.state = 255
self.match(jsgParser.OBRACE)
self.state = 256
self.match(jsgParser.INT)
self.state = 261
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.COMMA:
self.state = 257
self.match(jsgParser.COMMA)
self.state = 259
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.INT or _la==jsgParser.STAR:
self.state = 258
_la = self._input.LA(1)
if not(_la==jsgParser.INT or _la==jsgParser.STAR):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 263
self.match(jsgParser.CBRACE)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRulesContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TERMINALS(self):
return self.getToken(jsgParser.TERMINALS, 0)
def lexerRuleSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.LexerRuleSpecContext)
else:
return self.getTypedRuleContext(jsgParser.LexerRuleSpecContext,i)
def getRuleIndex(self):
return jsgParser.RULE_lexerRules
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRules" ):
return visitor.visitLexerRules(self)
else:
return visitor.visitChildren(self)
def lexerRules(self):
localctx = jsgParser.LexerRulesContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_lexerRules)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 266
self.match(jsgParser.TERMINALS)
self.state = 270
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.LEXER_ID:
self.state = 267
self.lexerRuleSpec()
self.state = 272
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER_ID(self):
return self.getToken(jsgParser.LEXER_ID, 0)
def COLON(self):
return self.getToken(jsgParser.COLON, 0)
def lexerRuleBlock(self):
return self.getTypedRuleContext(jsgParser.LexerRuleBlockContext,0)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def getRuleIndex(self):
return jsgParser.RULE_lexerRuleSpec
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleSpec" ):
return visitor.visitLexerRuleSpec(self)
else:
return visitor.visitChildren(self)
def lexerRuleSpec(self):
localctx = jsgParser.LexerRuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_lexerRuleSpec)
try:
self.enterOuterAlt(localctx, 1)
self.state = 273
self.match(jsgParser.LEXER_ID)
self.state = 274
self.match(jsgParser.COLON)
self.state = 275
self.lexerRuleBlock()
self.state = 276
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerAltList(self):
return self.getTypedRuleContext(jsgParser.LexerAltListContext,0)
def builtinValueType(self):
return self.getTypedRuleContext(jsgParser.BuiltinValueTypeContext,0)
def getRuleIndex(self):
return jsgParser.RULE_lexerRuleBlock
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleBlock" ):
return visitor.visitLexerRuleBlock(self)
else:
return visitor.visitChildren(self)
def lexerRuleBlock(self):
localctx = jsgParser.LexerRuleBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_lexerRuleBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 278
self.lexerAltList()
self.state = 280
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ANY) | (1 << jsgParser.JSON_STRING) | (1 << jsgParser.JSON_NUMBER) | (1 << jsgParser.JSON_INT) | (1 << jsgParser.JSON_BOOL) | (1 << jsgParser.JSON_NULL) | (1 << jsgParser.JSON_ARRAY) | (1 << jsgParser.JSON_OBJECT))) != 0):
self.state = 279
self.builtinValueType()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerAlt(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.LexerAltContext)
else:
return self.getTypedRuleContext(jsgParser.LexerAltContext,i)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def getRuleIndex(self):
return jsgParser.RULE_lexerAltList
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAltList" ):
return visitor.visitLexerAltList(self)
else:
return visitor.visitChildren(self)
def lexerAltList(self):
localctx = jsgParser.LexerAltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_lexerAltList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 282
self.lexerAlt()
self.state = 287
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.BAR:
self.state = 283
self.match(jsgParser.BAR)
self.state = 284
self.lexerAlt()
self.state = 289
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAltContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerElements(self):
return self.getTypedRuleContext(jsgParser.LexerElementsContext,0)
def getRuleIndex(self):
return jsgParser.RULE_lexerAlt
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAlt" ):
return visitor.visitLexerAlt(self)
else:
return visitor.visitChildren(self)
def lexerAlt(self):
localctx = jsgParser.LexerAltContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_lexerAlt)
try:
self.state = 292
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,35,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 290
self.lexerElements()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerElementsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.LexerElementContext)
else:
return self.getTypedRuleContext(jsgParser.LexerElementContext,i)
def getRuleIndex(self):
return jsgParser.RULE_lexerElements
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElements" ):
return visitor.visitLexerElements(self)
else:
return visitor.visitChildren(self)
def lexerElements(self):
localctx = jsgParser.LexerElementsContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_lexerElements)
try:
self.enterOuterAlt(localctx, 1)
self.state = 295
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 294
self.lexerElement()
else:
raise NoViableAltException(self)
self.state = 297
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,36,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerAtom(self):
return self.getTypedRuleContext(jsgParser.LexerAtomContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(jsgParser.EbnfSuffixContext,0)
def lexerBlock(self):
return self.getTypedRuleContext(jsgParser.LexerBlockContext,0)
def getRuleIndex(self):
return jsgParser.RULE_lexerElement
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElement" ):
return visitor.visitLexerElement(self)
else:
return visitor.visitChildren(self)
def lexerElement(self):
localctx = jsgParser.LexerElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_lexerElement)
self._la = 0 # Token type
try:
self.state = 307
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.STRING, jsgParser.ANY, jsgParser.LEXER_ID, jsgParser.LEXER_CHAR_SET]:
self.enterOuterAlt(localctx, 1)
self.state = 299
self.lexerAtom()
self.state = 301
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 300
self.ebnfSuffix()
pass
elif token in [jsgParser.OPREN]:
self.enterOuterAlt(localctx, 2)
self.state = 303
self.lexerBlock()
self.state = 305
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 304
self.ebnfSuffix()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OPREN(self):
return self.getToken(jsgParser.OPREN, 0)
def lexerAltList(self):
return self.getTypedRuleContext(jsgParser.LexerAltListContext,0)
def CPREN(self):
return self.getToken(jsgParser.CPREN, 0)
def getRuleIndex(self):
return jsgParser.RULE_lexerBlock
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerBlock" ):
return visitor.visitLexerBlock(self)
else:
return visitor.visitChildren(self)
def lexerBlock(self):
localctx = jsgParser.LexerBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_lexerBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 309
self.match(jsgParser.OPREN)
self.state = 310
self.lexerAltList()
self.state = 311
self.match(jsgParser.CPREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAtomContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerTerminal(self):
return self.getTypedRuleContext(jsgParser.LexerTerminalContext,0)
def LEXER_CHAR_SET(self):
return self.getToken(jsgParser.LEXER_CHAR_SET, 0)
def ANY(self):
return self.getToken(jsgParser.ANY, 0)
def getRuleIndex(self):
return jsgParser.RULE_lexerAtom
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtom" ):
return visitor.visitLexerAtom(self)
else:
return visitor.visitChildren(self)
def lexerAtom(self):
localctx = jsgParser.LexerAtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_lexerAtom)
try:
self.state = 316
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.STRING, jsgParser.LEXER_ID]:
self.enterOuterAlt(localctx, 1)
self.state = 313
self.lexerTerminal()
pass
elif token in [jsgParser.LEXER_CHAR_SET]:
self.enterOuterAlt(localctx, 2)
self.state = 314
self.match(jsgParser.LEXER_CHAR_SET)
pass
elif token in [jsgParser.ANY]:
self.enterOuterAlt(localctx, 3)
self.state = 315
self.match(jsgParser.ANY)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerTerminalContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER_ID(self):
return self.getToken(jsgParser.LEXER_ID, 0)
def STRING(self):
return self.getToken(jsgParser.STRING, 0)
def getRuleIndex(self):
return jsgParser.RULE_lexerTerminal
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerTerminal" ):
return visitor.visitLexerTerminal(self)
else:
return visitor.visitChildren(self)
def lexerTerminal(self):
localctx = jsgParser.LexerTerminalContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_lexerTerminal)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 318
_la = self._input.LA(1)
if not(_la==jsgParser.STRING or _la==jsgParser.LEXER_ID):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| 34.429034 | 299 | 0.566318 | 73,649 | 0.865004 | 0 | 0 | 0 | 0 | 0 | 0 | 10,239 | 0.120257 |
ad2a6e5fc9f7663e6a63e74433c09fbbe508fe41 | 4,568 | py | Python | Nokore/scripts/main-simon-transfer.py | algorine/nokware | 709c7d7061082f73fc2b3d4e0897d451a5fbbbd8 | [
"MIT"
] | null | null | null | Nokore/scripts/main-simon-transfer.py | algorine/nokware | 709c7d7061082f73fc2b3d4e0897d451a5fbbbd8 | [
"MIT"
] | null | null | null | Nokore/scripts/main-simon-transfer.py | algorine/nokware | 709c7d7061082f73fc2b3d4e0897d451a5fbbbd8 | [
"MIT"
] | null | null | null | import time
import numpy as np
import pandas as pd
import random
from Simon import Simon
from Simon.Encoder import Encoder
from Simon.LengthStandardizer import DataLengthStandardizerRaw
start_time = time.time()
### Read-in the emails and print some basic statistics
# Enron
EnronEmails = pd.read_csv('data/enron_emails_body.csv',dtype='str', header=None)
print("The size of the Enron emails dataframe is:")
print(EnronEmails.shape)
print("Ten Enron emails are:")
print(EnronEmails.loc[:10])
# Spam
SpamEmails = pd.read_csv('data/fraudulent_emails_body.csv',encoding="ISO-8859-1",dtype='str', header=None)
print("The size of the Spam emails dataframe is:")
print(SpamEmails.shape)
print("Ten Spam emails are:")
print(SpamEmails.loc[:10])
# Some hyper-parameters for the CNN we will use
maxlen = 20 # max length of each tabular cell <==> max number of characters in a line
max_cells = 500 # max number of cells in a column <==> max number of email lines
p_threshold = 0.5 # prediction threshold probability
Nsamp = 1000
nb_epoch = 20
batch_size = 8
checkpoint_dir = "pretrained_models/"
execution_config = 'Base.pkl'
DEBUG = True # boolean to specify whether or not print DEBUG information
# Convert everything to lower-case, put one sentence per column in a tabular
# structure
ProcessedEnronEmails=[row.lower().split('\n') for row in EnronEmails.iloc[:,1]]
#print("3 Enron emails after Processing (in list form) are:")
#print((ProcessedEnronEmails[:3]))
EnronEmails = pd.DataFrame(random.sample(ProcessedEnronEmails,Nsamp)).transpose()
EnronEmails = DataLengthStandardizerRaw(EnronEmails,max_cells)
#print("Ten Enron emails after Processing (in DataFrame form) are:")
#print((EnronEmails[:10]))
print("Enron email dataframe after Processing shape:")
print(EnronEmails.shape)
ProcessedSpamEmails=[row.lower().split('/n') for row in SpamEmails.iloc[:,1]]
#print("3 Spam emails after Processing (in list form) are:")
#print((ProcessedSpamEmails[:3]))
SpamEmails = pd.DataFrame(random.sample(ProcessedSpamEmails,Nsamp)).transpose()
SpamEmails = DataLengthStandardizerRaw(SpamEmails,max_cells)
#print("Ten Spam emails after Processing (in DataFrame form) are:")
#print((SpamEmails[:10]))
print("Spam email dataframe after Processing shape:")
print(SpamEmails.shape)
# orient the user a bit
with open('pretrained_models/Categories.txt','r') as f:
Categories = f.read().splitlines()
print("former categories are: ")
Categories = sorted(Categories)
print(Categories)
category_count_prior = len(Categories)
# Load pretrained model via specified execution configuration
Classifier = Simon(encoder={}) # dummy text classifier
config = Classifier.load_config(execution_config, checkpoint_dir)
encoder = config['encoder']
checkpoint = config['checkpoint']
# Encode labels and data
Categories = ['spam','notspam']
category_count = len(Categories)
encoder.categories=Categories
header = ([['spam',]]*Nsamp)
header.extend(([['notspam',]]*Nsamp))
#print(header)
raw_data = np.column_stack((SpamEmails,EnronEmails)).T
print("DEBUG::raw_data:")
print(raw_data)
encoder.process(raw_data, max_cells)
X, y = encoder.encode_data(raw_data, header, maxlen)
# build classifier model
model = Classifier.generate_transfer_model(maxlen, max_cells, category_count_prior,category_count, checkpoint, checkpoint_dir,activation='sigmoid')
#Classifier.load_weights(checkpoint, None, model, checkpoint_dir)
model_compile = lambda m: m.compile(loss='binary_crossentropy', optimizer='adam', metrics=['binary_accuracy'])
model_compile(model)
#y = model.predict(X)
# discard empty column edge case
# y[np.all(frame.isnull(),axis=0)]=0
#result = encoder.reverse_label_encode(y,p_threshold)
### FINISHED LABELING COMBINED DATA AS CATEGORICAL/ORDINAL
#print("The predicted classes and probabilities are respectively:")
#print(result)
data = Classifier.setup_test_sets(X, y)
start = time.time()
history = Classifier.train_model(batch_size, checkpoint_dir, model, nb_epoch, data)
end = time.time()
print("Time for training is %f sec"%(end-start))
config = { 'encoder' : encoder,
'checkpoint' : Classifier.get_best_checkpoint(checkpoint_dir) }
Classifier.save_config(config, checkpoint_dir)
Classifier.plot_loss(history) #comment out on docker images...
pred_headers = Classifier.evaluate_model(max_cells, model, data, encoder, p_threshold)
#print("DEBUG::The predicted headers are:")
#print(pred_headers)
#print("DEBUG::The actual headers are:")
#print(header)
elapsed_time = time.time()-start_time
print("Total script execution time is : %.2f sec" % elapsed_time) | 35.968504 | 147 | 0.768827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,092 | 0.457968 |
ad2d2edbe5cd065fd478d317524918fed089d282 | 1,111 | py | Python | example/familytree.py | realistschuckle/pyvisitor | f08bd50f5ca5ff4288f00d9045ca406e278ed306 | [
"MIT"
] | 15 | 2015-01-30T21:08:28.000Z | 2022-02-03T18:00:56.000Z | example/familytree.py | realistschuckle/pyvisitor | f08bd50f5ca5ff4288f00d9045ca406e278ed306 | [
"MIT"
] | 2 | 2016-10-03T21:33:29.000Z | 2019-02-05T13:06:05.000Z | example/familytree.py | realistschuckle/pyvisitor | f08bd50f5ca5ff4288f00d9045ca406e278ed306 | [
"MIT"
] | 7 | 2016-09-16T07:34:50.000Z | 2022-02-03T18:03:01.000Z | from __future__ import print_function
import sys
import os
# Put the path to the visitor module on the search path
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src'))
if not path in sys.path:
sys.path.insert(1, path)
import visitor
class Person(object):
def __init__(self, name):
self.name = name
self.deps = []
def add_dependent(self, dep):
self.deps.append(dep);
def accept(self, visitor):
visitor.visit(self)
class Pet(object):
def __init__(self, name, breed):
self.name = name
self.breed = breed
def accept(self, visitor):
visitor.visit(self)
class DescendantsVisitor(object):
def __init__(self):
self.level = 0
@visitor.on('member')
def visit(self, member):
pass
@visitor.when(Person)
def visit(self, member):
self.write_padding()
print('-', member.name)
self.level += 1
for dep in member.deps:
dep.accept(self)
self.level -= 1
@visitor.when(Pet)
def visit(self, member):
self.write_padding()
print('-', member.name, 'a', member.breed)
def write_padding(self):
for i in range(self.level):
sys.stdout.write(' ') | 19.491228 | 76 | 0.688569 | 843 | 0.758776 | 0 | 0 | 344 | 0.309631 | 0 | 0 | 85 | 0.076508 |
ad2d3fa37ba1868e4e2a135fb9535b7aef051f1f | 234 | py | Python | bgui/server/server/config.py | monash-emu/Legacy-AuTuMN | 513bc14b4ea8c29c5983cc90fb94284e6a003515 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | bgui/server/server/config.py | monash-emu/Legacy-AuTuMN | 513bc14b4ea8c29c5983cc90fb94284e6a003515 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | bgui/server/server/config.py | monash-emu/Legacy-AuTuMN | 513bc14b4ea8c29c5983cc90fb94284e6a003515 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | SQLALCHEMY_DATABASE_URI = 'sqlite:///database.sqlite'
SECRET_KEY = 'F12Zr47j\3yX R~X@H!jmM]Lwf/,?KT'
SAVE_FOLDER = '../../../projects'
SQLALCHEMY_TRACK_MODIFICATIONS = 'False'
PORT = '3000'
STATIC_FOLDER = '../../client/dist/static'
| 29.25 | 53 | 0.709402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.504274 |
ad2e32d215791b8f6d838656d93aa2028c4b0dfd | 416 | py | Python | dentexchange/apps/location/tests/test_zip_code.py | hellhound/dentexchange | 58ae303e842404fc9e1860f294ec8044a332bef3 | [
"BSD-3-Clause"
] | 1 | 2017-11-09T23:09:51.000Z | 2017-11-09T23:09:51.000Z | dentexchange/apps/location/tests/test_zip_code.py | hellhound/dentexchange | 58ae303e842404fc9e1860f294ec8044a332bef3 | [
"BSD-3-Clause"
] | null | null | null | dentexchange/apps/location/tests/test_zip_code.py | hellhound/dentexchange | 58ae303e842404fc9e1860f294ec8044a332bef3 | [
"BSD-3-Clause"
] | 3 | 2015-08-11T16:58:47.000Z | 2021-01-04T08:23:51.000Z | # -*- coding:utf-8 -*-
import unittest
import mock
import decimal
from ..models import ZipCode
class ZipCodeTestCase(unittest.TestCase):
def test_unicode_should_return_code(self):
# setup
model = ZipCode()
code = '1.0'
model.code = decimal.Decimal(code)
# action
returned_value = unicode(model)
# assert
self.assertEqual(code, returned_value)
| 19.809524 | 46 | 0.637019 | 317 | 0.762019 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.120192 |
ad3017e922c09970a0f8a72fe88d8aaa1a57078a | 224,543 | py | Python | src/Intel_Project (1).py | Lance-Dsilva/Intel-Image-Classification-Using-CNN-91-Accuracy- | e5b729dddaa3671337c6c5019b69fc6ca2c868c0 | [
"Unlicense"
] | null | null | null | src/Intel_Project (1).py | Lance-Dsilva/Intel-Image-Classification-Using-CNN-91-Accuracy- | e5b729dddaa3671337c6c5019b69fc6ca2c868c0 | [
"Unlicense"
] | null | null | null | src/Intel_Project (1).py | Lance-Dsilva/Intel-Image-Classification-Using-CNN-91-Accuracy- | e5b729dddaa3671337c6c5019b69fc6ca2c868c0 | [
"Unlicense"
] | null | null | null |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<link rel="dns-prefetch" href="https://github.githubassets.com">
<link rel="dns-prefetch" href="https://avatars0.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars1.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars2.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars3.githubusercontent.com">
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com">
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/">
<link crossorigin="anonymous" media="all" integrity="sha512-aZYk5AYsRiFiFG04Si6FnQoHFwAugnodzKJXgafKqPWsrgrjoWRsapCn//vFuWqjSzr72ucZfPq8/ZbduuSeQg==" rel="stylesheet" href="https://github.githubassets.com/assets/frameworks-next-699624e4062c462162146d384a2e859d.css" />
<link crossorigin="anonymous" media="all" integrity="sha512-c8D4SY7t8jv4IDDWg1Nv+UWEL0qRjmWc3oneTTGvc0yTWolwMo26Spqm29MlqLWPVRkovXK+A08bN/tiPAPDiQ==" rel="stylesheet" href="https://github.githubassets.com/assets/github-next-73c0f8498eedf23bf82030d683536ff9.css" />
<meta name="viewport" content="width=device-width">
<title>Intel-Image-Classification-Using-CNN/Intel_Project (1).py at master · Novia-2018/Intel-Image-Classification-Using-CNN</title>
<meta name="description" content="🧠 An Awesome Deep Learning Model That Classify And Predicts Images According To Their Classes - Novia-2018/Intel-Image-Classification-Using-CNN">
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<meta property="fb:app_id" content="1401488693436528">
<meta name="apple-itunes-app" content="app-id=1477376905">
<meta name="twitter:image:src" content="https://avatars0.githubusercontent.com/u/56831817?s=400&v=4" /><meta name="twitter:site" content="@github" /><meta name="twitter:card" content="summary" /><meta name="twitter:title" content="Novia-2018/Intel-Image-Classification-Using-CNN" /><meta name="twitter:description" content="🧠 An Awesome Deep Learning Model That Classify And Predicts Images According To Their Classes - Novia-2018/Intel-Image-Classification-Using-CNN" />
<meta property="og:image" content="https://avatars0.githubusercontent.com/u/56831817?s=400&v=4" /><meta property="og:site_name" content="GitHub" /><meta property="og:type" content="object" /><meta property="og:title" content="Novia-2018/Intel-Image-Classification-Using-CNN" /><meta property="og:url" content="https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN" /><meta property="og:description" content="🧠 An Awesome Deep Learning Model That Classify And Predicts Images According To Their Classes - Novia-2018/Intel-Image-Classification-Using-CNN" />
<link rel="assets" href="https://github.githubassets.com/">
<link rel="shared-web-socket" href="wss://live.github.com/_luau/eyJ2IjoiVjMiLCJ1Ijo1NjgzMTgxNywicyI6NTUwOTMxNTE2LCJjIjoxMjU0ODg5Nzg2LCJ0IjoxNTkzMDk0ODQ3fQ==--2c919c035103de1a8219435bdefb3f5b2cf063b2c50fbd0f09ac9b5a025beb03" data-refresh-url="/_ws">
<link rel="sudo-modal" href="/sessions/sudo_modal">
<meta name="request-id" content="ED9C:2812:81E274:AE09B0:5EF4B2BB" data-pjax-transient="true" /><meta name="html-safe-nonce" content="cdf26e54f7effe8837c2e5229a6e51344c033a6f" data-pjax-transient="true" /><meta name="visitor-payload" content="eyJyZWZlcnJlciI6Imh0dHBzOi8vZ2l0aHViLmNvbS9Ob3ZpYS0yMDE4L0ludGVsLUltYWdlLUNsYXNzaWZpY2F0aW9uLVVzaW5nLUNOTi90cmVlL21hc3Rlci9zcmMiLCJyZXF1ZXN0X2lkIjoiRUQ5QzoyODEyOjgxRTI3NDpBRTA5QjA6NUVGNEIyQkIiLCJ2aXNpdG9yX2lkIjoiNzEzNTU1NzQyMTkwMzQyNjExMiIsInJlZ2lvbl9lZGdlIjoiYXAtc291dGgtMSIsInJlZ2lvbl9yZW5kZXIiOiJpYWQifQ==" data-pjax-transient="true" /><meta name="visitor-hmac" content="5468ad20fa2c29ebf034c6a7ada6fc49b16dec3f3d0b02385a3acb176c1fd43b" data-pjax-transient="true" />
<meta name="github-keyboard-shortcuts" content="repository,source-code" data-pjax-transient="true" />
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-site-verification" content="c1kuD-K2HIVF635lypcsWPoD4kilo5-jA_wBFyT4uMY">
<meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
<meta name="google-site-verification" content="GXs5KoUUkNCoaAZn7wPN-t01Pywp9M3sEjnt_3_ZWPc">
<meta name="octolytics-host" content="collector.githubapp.com" /><meta name="octolytics-app-id" content="github" /><meta name="octolytics-event-url" content="https://collector.githubapp.com/github-external/browser_event" /><meta name="octolytics-dimension-ga_id" content="" class="js-octo-ga-id" /><meta name="octolytics-actor-id" content="56831817" /><meta name="octolytics-actor-login" content="Novia-2018" /><meta name="octolytics-actor-hash" content="1eeaa63b1ed5dba591e37ae3fdf6663f0517605c7afdbf35ed165dc8f040703f" />
<meta name="analytics-location" content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" />
<meta name="optimizely-datafile" content="{}" />
<meta name="google-analytics" content="UA-3769691-2">
<meta class="js-ga-set" name="userId" content="ed9bd0d9c4d76560cec60b2aa2d2fb21">
<meta class="js-ga-set" name="dimension10" content="Responsive" data-pjax-transient>
<meta class="js-ga-set" name="dimension1" content="Logged In">
<meta name="hostname" content="github.com">
<meta name="user-login" content="Novia-2018">
<meta name="expected-hostname" content="github.com">
<meta name="js-proxy-site-detection-payload" content="MmEwMzczMTU1MTI5NmRjMjQ1ZjE1MjE0ZDY0NGY5Y2I4NzNlZDFjNGY3ZGY1NTE3OGIwMTljOGIxMGM3YjIyMnx7InJlbW90ZV9hZGRyZXNzIjoiMTE1LjY5LjI0Ni4yMzIiLCJyZXF1ZXN0X2lkIjoiRUQ5QzoyODEyOjgxRTI3NDpBRTA5QjA6NUVGNEIyQkIiLCJ0aW1lc3RhbXAiOjE1OTMwOTQ4NDcsImhvc3QiOiJnaXRodWIuY29tIn0=">
<meta name="enabled-features" content="MARKETPLACE_PENDING_INSTALLATIONS,PAGE_STALE_CHECK,PRIMER_NEXT">
<meta http-equiv="x-pjax-version" content="ad930a774189db897bc2f54da9e8f409">
<link href="https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/commits/master.atom" rel="alternate" title="Recent Commits to Intel-Image-Classification-Using-CNN:master" type="application/atom+xml">
<meta name="go-import" content="github.com/Novia-2018/Intel-Image-Classification-Using-CNN git https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN.git">
<meta name="octolytics-dimension-user_id" content="56831817" /><meta name="octolytics-dimension-user_login" content="Novia-2018" /><meta name="octolytics-dimension-repository_id" content="274914883" /><meta name="octolytics-dimension-repository_nwo" content="Novia-2018/Intel-Image-Classification-Using-CNN" /><meta name="octolytics-dimension-repository_public" content="true" /><meta name="octolytics-dimension-repository_is_fork" content="false" /><meta name="octolytics-dimension-repository_network_root_id" content="274914883" /><meta name="octolytics-dimension-repository_network_root_nwo" content="Novia-2018/Intel-Image-Classification-Using-CNN" /><meta name="octolytics-dimension-repository_explore_github_marketplace_ci_cta_shown" content="true" />
<link rel="canonical" href="https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py" data-pjax-transient>
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
<link rel="mask-icon" href="https://github.githubassets.com/pinned-octocat.svg" color="#000000">
<link rel="alternate icon" class="js-site-favicon" type="image/png" href="https://github.githubassets.com/favicons/favicon.png">
<link rel="icon" class="js-site-favicon" type="image/svg+xml" href="https://github.githubassets.com/favicons/favicon.svg">
<meta name="theme-color" content="#1e2327">
<link rel="manifest" href="/manifest.json" crossOrigin="use-credentials">
</head>
<body class="logged-in env-production page-responsive page-blob">
<div class="position-relative js-header-wrapper ">
<a href="#start-of-content" class="p-3 bg-blue text-white show-on-focus js-skip-to-content">Skip to content</a>
<span class="Progress progress-pjax-loader position-fixed width-full js-pjax-loader-bar">
<span class="progress-pjax-loader-bar top-0 left-0" style="width: 0%;"></span>
</span>
<header class="Header py-lg-0 js-details-container Details flex-wrap flex-lg-nowrap px-3" role="banner">
<div class="Header-item d-none d-lg-flex">
<a class="Header-link" href="https://github.com/" data-hotkey="g d"
aria-label="Homepage " data-ga-click="Header, go to dashboard, icon:logo">
<svg class="octicon octicon-mark-github v-align-middle" height="32" viewBox="0 0 16 16" version="1.1" width="32" aria-hidden="true"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path></svg>
</a>
</div>
<div class="Header-item d-lg-none">
<button class="Header-link btn-link js-details-target" type="button" aria-label="Toggle navigation" aria-expanded="false">
<svg height="24" class="octicon octicon-three-bars" viewBox="0 0 16 16" version="1.1" width="24" aria-hidden="true"><path fill-rule="evenodd" d="M1 2.75A.75.75 0 011.75 2h12.5a.75.75 0 110 1.5H1.75A.75.75 0 011 2.75zm0 5A.75.75 0 011.75 7h12.5a.75.75 0 110 1.5H1.75A.75.75 0 011 7.75zM1.75 12a.75.75 0 100 1.5h12.5a.75.75 0 100-1.5H1.75z"></path></svg>
</button>
</div>
<div class="Header-item Header-item--full flex-column flex-lg-row width-full flex-order-2 flex-lg-order-none mr-0 mr-lg-3 mt-3 mt-lg-0 Details-content--hidden">
<div class="header-search header-search-current js-header-search-current flex-self-stretch flex-lg-self-auto mr-0 mr-lg-3 mb-3 mb-lg-0 scoped-search site-scoped-search js-site-search position-relative js-jump-to js-header-search-current-jump-to"
role="combobox"
aria-owns="jump-to-results"
aria-label="Search or jump to"
aria-haspopup="listbox"
aria-expanded="false"
>
<div class="position-relative">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="js-site-search-form" role="search" aria-label="Site" data-scope-type="Repository" data-scope-id="274914883" data-scoped-search-url="/Novia-2018/Intel-Image-Classification-Using-CNN/search" data-unscoped-search-url="/search" action="/Novia-2018/Intel-Image-Classification-Using-CNN/search" accept-charset="UTF-8" method="get">
<label class="form-control input-sm header-search-wrapper p-0 header-search-wrapper-jump-to position-relative d-flex flex-justify-between flex-items-center js-chromeless-input-container">
<input type="text"
class="form-control input-sm header-search-input jump-to-field js-jump-to-field js-site-search-focus js-site-search-field is-clearable"
data-hotkey="s,/"
name="q"
value=""
placeholder="Search or jump to…"
data-unscoped-placeholder="Search or jump to…"
data-scoped-placeholder="Search or jump to…"
autocapitalize="off"
aria-autocomplete="list"
aria-controls="jump-to-results"
aria-label="Search or jump to…"
data-jump-to-suggestions-path="/_graphql/GetSuggestedNavigationDestinations"
spellcheck="false"
autocomplete="off"
>
<input type="hidden" value="pndCKz9+PPGta24O0DVb8WRI3V8+8amlI8wcJmy/8DdZcgkrwAb7ZYiBRl8+c8QLBY/HRO815licxVFtJtWYHw==" data-csrf="true" class="js-data-jump-to-suggestions-path-csrf" />
<input type="hidden" class="js-site-search-type-field" name="type" >
<img src="https://github.githubassets.com/images/search-key-slash.svg" alt="" class="mr-2 header-search-key-slash">
<div class="Box position-absolute overflow-hidden d-none jump-to-suggestions js-jump-to-suggestions-container">
<ul class="d-none js-jump-to-suggestions-template-container">
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-suggestion" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg height="16" width="16" class="octicon octicon-repo flex-shrink-0 js-jump-to-octicon-repo d-none" title="Repository" aria-label="Repository" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path></svg>
<svg height="16" width="16" class="octicon octicon-project flex-shrink-0 js-jump-to-octicon-project d-none" title="Project" aria-label="Project" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path></svg>
<svg height="16" width="16" class="octicon octicon-search flex-shrink-0 js-jump-to-octicon-search d-none" title="Search" aria-label="Search" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path></svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
</ul>
<ul class="d-none js-jump-to-no-results-template-container">
<li class="d-flex flex-justify-center flex-items-center f5 d-none js-jump-to-suggestion p-2">
<span class="text-gray">No suggested jump to results</span>
</li>
</ul>
<ul id="jump-to-results" role="listbox" class="p-0 m-0 js-navigation-container jump-to-suggestions-results-container js-jump-to-suggestions-results-container">
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-scoped-search d-none" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg height="16" width="16" class="octicon octicon-repo flex-shrink-0 js-jump-to-octicon-repo d-none" title="Repository" aria-label="Repository" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path></svg>
<svg height="16" width="16" class="octicon octicon-project flex-shrink-0 js-jump-to-octicon-project d-none" title="Project" aria-label="Project" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path></svg>
<svg height="16" width="16" class="octicon octicon-search flex-shrink-0 js-jump-to-octicon-search d-none" title="Search" aria-label="Search" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path></svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-global-search d-none" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg height="16" width="16" class="octicon octicon-repo flex-shrink-0 js-jump-to-octicon-repo d-none" title="Repository" aria-label="Repository" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path></svg>
<svg height="16" width="16" class="octicon octicon-project flex-shrink-0 js-jump-to-octicon-project d-none" title="Project" aria-label="Project" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path></svg>
<svg height="16" width="16" class="octicon octicon-search flex-shrink-0 js-jump-to-octicon-search d-none" title="Search" aria-label="Search" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path></svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 bg-gray px-1 text-gray-light ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
<li class="d-flex flex-justify-center flex-items-center p-0 f5 js-jump-to-suggestion">
<img src="https://github.githubassets.com/images/spinners/octocat-spinner-128.gif" alt="Octocat Spinner Icon" class="m-2" width="28">
</li>
</ul>
</div>
</label>
</form> </div>
</div>
<nav class="d-flex flex-column flex-lg-row flex-self-stretch flex-lg-self-auto" aria-label="Global">
<a class="Header-link py-lg-3 d-block d-lg-none py-2 border-top border-lg-top-0 border-white-fade-15" data-ga-click="Header, click, Nav menu - item:dashboard:user" aria-label="Dashboard" href="/dashboard">
Dashboard
</a>
<a class="js-selected-navigation-item Header-link py-lg-3 mr-0 mr-lg-3 py-2 border-top border-lg-top-0 border-white-fade-15" data-hotkey="g p" data-ga-click="Header, click, Nav menu - item:pulls context:user" aria-label="Pull requests you created" data-selected-links="/pulls /pulls/assigned /pulls/mentioned /pulls" href="/pulls">
Pull requests
</a>
<a class="js-selected-navigation-item Header-link py-lg-3 mr-0 mr-lg-3 py-2 border-top border-lg-top-0 border-white-fade-15" data-hotkey="g i" data-ga-click="Header, click, Nav menu - item:issues context:user" aria-label="Issues you created" data-selected-links="/issues /issues/assigned /issues/mentioned /issues" href="/issues">
Issues
</a>
<div class="mr-0 mr-lg-3 py-2 py-lg-0 border-top border-lg-top-0 border-white-fade-15">
<a class="js-selected-navigation-item Header-link py-lg-3 d-inline-block" data-ga-click="Header, click, Nav menu - item:marketplace context:user" data-octo-click="marketplace_click" data-octo-dimensions="location:nav_bar" data-selected-links=" /marketplace" href="/marketplace">
Marketplace
</a>
</div>
<a class="js-selected-navigation-item Header-link py-lg-3 mr-0 mr-lg-3 py-2 border-top border-lg-top-0 border-white-fade-15" data-ga-click="Header, click, Nav menu - item:explore" data-selected-links="/explore /trending /trending/developers /integrations /integrations/feature/code /integrations/feature/collaborate /integrations/feature/ship showcases showcases_search showcases_landing /explore" href="/explore">
Explore
</a>
<a class="Header-link d-block d-lg-none mr-0 mr-lg-3 py-2 py-lg-3 border-top border-lg-top-0 border-white-fade-15" href="/Novia-2018">
<img class="avatar avatar-user" src="https://avatars3.githubusercontent.com/u/56831817?s=40&v=4" width="20" height="20" alt="@Novia-2018" />
Novia-2018
</a>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form action="/logout" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="vxQQ8Cpp632ZRNy1qHAiCj68jUNMpNXFdIczOy4veDHzRQ6SshzHLjw8rnbZER27SfoIglRRcdYdvDxlvN9vTQ==" />
<button type="submit" class="Header-link mr-0 mr-lg-3 py-2 py-lg-3 border-top border-lg-top-0 border-white-fade-15 d-lg-none btn-link d-block width-full text-left" data-ga-click="Header, sign out, icon:logout" style="padding-left: 2px;">
<svg class="octicon octicon-sign-out v-align-middle" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M2 2.75C2 1.784 2.784 1 3.75 1h2.5a.75.75 0 010 1.5h-2.5a.25.25 0 00-.25.25v10.5c0 .138.112.25.25.25h2.5a.75.75 0 010 1.5h-2.5A1.75 1.75 0 012 13.25V2.75zm10.44 4.5H6.75a.75.75 0 000 1.5h5.69l-1.97 1.97a.75.75 0 101.06 1.06l3.25-3.25a.75.75 0 000-1.06l-3.25-3.25a.75.75 0 10-1.06 1.06l1.97 1.97z"></path></svg>
Sign out
</button>
</form></nav>
</div>
<div class="Header-item Header-item--full flex-justify-center d-lg-none position-relative">
<a class="Header-link" href="https://github.com/" data-hotkey="g d"
aria-label="Homepage " data-ga-click="Header, go to dashboard, icon:logo">
<svg class="octicon octicon-mark-github v-align-middle" height="32" viewBox="0 0 16 16" version="1.1" width="32" aria-hidden="true"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path></svg>
</a>
</div>
<div class="Header-item mr-0 mr-lg-3 flex-order-1 flex-lg-order-none">
<a aria-label="You have no unread notifications" class="Header-link notification-indicator position-relative tooltipped tooltipped-sw js-socket-channel js-notification-indicator" data-hotkey="g n" data-ga-click="Header, go to notifications, icon:read" data-channel="eyJjIjoibm90aWZpY2F0aW9uLWNoYW5nZWQ6NTY4MzE4MTciLCJ0IjoxNTkzMDk0ODQ3fQ==--85a7327cb0320925f31e843ecca40ae2c25a991797a75d7cf2a16ee5bdef8e30" href="/notifications">
<span class="js-indicator-modifier mail-status "></span>
<svg class="octicon octicon-bell" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path d="M8 16a2 2 0 001.985-1.75c.017-.137-.097-.25-.235-.25h-3.5c-.138 0-.252.113-.235.25A2 2 0 008 16z"></path><path fill-rule="evenodd" d="M8 1.5A3.5 3.5 0 004.5 5v2.947c0 .346-.102.683-.294.97l-1.703 2.556a.018.018 0 00-.003.01l.001.006c0 .002.002.004.004.006a.017.017 0 00.006.004l.007.001h10.964l.007-.001a.016.016 0 00.006-.004.016.016 0 00.004-.006l.001-.007a.017.017 0 00-.003-.01l-1.703-2.554a1.75 1.75 0 01-.294-.97V5A3.5 3.5 0 008 1.5zM3 5a5 5 0 0110 0v2.947c0 .05.015.098.042.139l1.703 2.555A1.518 1.518 0 0113.482 13H2.518a1.518 1.518 0 01-1.263-2.36l1.703-2.554A.25.25 0 003 7.947V5z"></path></svg>
</a>
</div>
<div class="Header-item position-relative d-none d-lg-flex">
<details class="details-overlay details-reset">
<summary class="Header-link"
aria-label="Create new…"
data-ga-click="Header, create new, icon:add">
<svg class="octicon octicon-plus" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 2a.75.75 0 01.75.75v4.5h4.5a.75.75 0 010 1.5h-4.5v4.5a.75.75 0 01-1.5 0v-4.5h-4.5a.75.75 0 010-1.5h4.5v-4.5A.75.75 0 018 2z"></path></svg> <span class="dropdown-caret"></span>
</summary>
<details-menu class="dropdown-menu dropdown-menu-sw mt-n2">
<a role="menuitem" class="dropdown-item" href="/new" data-ga-click="Header, create new repository">
New repository
</a>
<a role="menuitem" class="dropdown-item" href="/new/import" data-ga-click="Header, import a repository">
Import repository
</a>
<a role="menuitem" class="dropdown-item" href="https://gist.github.com/" data-ga-click="Header, create new gist">
New gist
</a>
<a role="menuitem" class="dropdown-item" href="/organizations/new" data-ga-click="Header, create new organization">
New organization
</a>
<div role="none" class="dropdown-divider"></div>
<div class="dropdown-header">
<span title="Novia-2018/Intel-Image-Classification-Using-CNN">This repository</span>
</div>
<a role="menuitem" class="dropdown-item" href="/Novia-2018/Intel-Image-Classification-Using-CNN/issues/new/choose" data-ga-click="Header, create new issue" data-skip-pjax>
New issue
</a>
</details-menu>
</details>
</div>
<div class="Header-item position-relative mr-0 d-none d-lg-flex">
<details class="details-overlay details-reset js-feature-preview-indicator-container" data-feature-preview-indicator-src="/users/Novia-2018/feature_preview/indicator_check">
<summary class="Header-link"
aria-label="View profile and more"
data-ga-click="Header, show menu, icon:avatar">
<img
alt="@Novia-2018"
width="20"
height="20"
src="https://avatars0.githubusercontent.com/u/56831817?s=60&v=4"
class="avatar avatar-user " />
<span class="feature-preview-indicator js-feature-preview-indicator" style="top: 10px;" hidden></span>
<span class="dropdown-caret"></span>
</summary>
<details-menu class="dropdown-menu dropdown-menu-sw mt-n2" style="width: 180px" >
<div class="header-nav-current-user css-truncate"><a role="menuitem" class="no-underline user-profile-link px-3 pt-2 pb-2 mb-n2 mt-n1 d-block" href="/Novia-2018" data-ga-click="Header, go to profile, text:Signed in as">Signed in as <strong class="css-truncate-target">Novia-2018</strong></a></div>
<div role="none" class="dropdown-divider"></div>
<div class="pl-3 pr-3 f6 user-status-container js-user-status-context lh-condensed" data-url="/users/status?compact=1&link_mentions=0&truncate=1">
<div class="js-user-status-container rounded-1 px-2 py-1 mt-2 border"
data-team-hovercards-enabled>
<details class="js-user-status-details details-reset details-overlay details-overlay-dark">
<summary class="btn-link btn-block link-gray no-underline js-toggle-user-status-edit toggle-user-status-edit "
role="menuitem" data-hydro-click="{"event_type":"user_profile.click","payload":{"profile_user_id":56831817,"target":"EDIT_USER_STATUS","user_id":56831817,"originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py"}}" data-hydro-click-hmac="c015019fc658e06304bd55b65197426dcf04d5f0202c8d65d441406e88f04f6c">
<div class="d-flex flex-items-center flex-items-stretch">
<div class="f6 lh-condensed user-status-header d-flex user-status-emoji-only-header circle">
<div class="user-status-emoji-container flex-shrink-0 mr-2 d-flex flex-items-center flex-justify-center lh-condensed-ultra v-align-bottom">
<svg class="octicon octicon-smiley" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0zM8 0a8 8 0 100 16A8 8 0 008 0zM5 8a1 1 0 100-2 1 1 0 000 2zm7-1a1 1 0 11-2 0 1 1 0 012 0zM5.32 9.636a.75.75 0 011.038.175l.007.009c.103.118.22.222.35.31.264.178.683.37 1.285.37.602 0 1.02-.192 1.285-.371.13-.088.247-.192.35-.31l.007-.008a.75.75 0 111.222.87l-.614-.431c.614.43.614.431.613.431v.001l-.001.002-.002.003-.005.007-.014.019a1.984 1.984 0 01-.184.213c-.16.166-.338.316-.53.445-.63.418-1.37.638-2.127.629-.946 0-1.652-.308-2.126-.63a3.32 3.32 0 01-.715-.657l-.014-.02-.005-.006-.002-.003v-.002h-.001l.613-.432-.614.43a.75.75 0 01.183-1.044h.001z"></path></svg>
</div>
</div>
<div class="
user-status-message-wrapper f6 min-width-0"
style="line-height: 20px;" >
<div class="css-truncate css-truncate-target width-fit text-gray-dark text-left">
<span class="text-gray">Set status</span>
</div>
</div>
</div>
</summary>
<details-dialog class="details-dialog rounded-1 anim-fade-in fast Box Box--overlay" role="dialog" tabindex="-1">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="position-relative flex-auto js-user-status-form" action="/users/status?circle=0&compact=1&link_mentions=0&truncate=1" accept-charset="UTF-8" method="post"><input type="hidden" name="_method" value="put" /><input type="hidden" name="authenticity_token" value="66hsw004BOc0Nedk4b4B1TKrcIZtW2Kesrzm+YsF6FWpnUXxqikemWRj9zT1ryt6QIXd4z5ikAjUV5S6WApRJQ==" />
<div class="Box-header bg-gray border-bottom p-3">
<button class="Box-btn-octicon js-toggle-user-status-edit btn-octicon float-right" type="reset" aria-label="Close dialog" data-close-dialog>
<svg class="octicon octicon-x" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path></svg>
</button>
<h3 class="Box-title f5 text-bold text-gray-dark">Edit status</h3>
</div>
<input type="hidden" name="emoji" class="js-user-status-emoji-field" value="">
<input type="hidden" name="organization_id" class="js-user-status-org-id-field" value="">
<div class="px-3 py-2 text-gray-dark">
<div class="js-characters-remaining-container position-relative mt-2">
<div class="input-group d-table form-group my-0 js-user-status-form-group">
<span class="input-group-button d-table-cell v-align-middle" style="width: 1%">
<button type="button" aria-label="Choose an emoji" class="btn-outline btn js-toggle-user-status-emoji-picker btn-open-emoji-picker p-0">
<span class="js-user-status-original-emoji" hidden></span>
<span class="js-user-status-custom-emoji"></span>
<span class="js-user-status-no-emoji-icon" >
<svg class="octicon octicon-smiley" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0zM8 0a8 8 0 100 16A8 8 0 008 0zM5 8a1 1 0 100-2 1 1 0 000 2zm7-1a1 1 0 11-2 0 1 1 0 012 0zM5.32 9.636a.75.75 0 011.038.175l.007.009c.103.118.22.222.35.31.264.178.683.37 1.285.37.602 0 1.02-.192 1.285-.371.13-.088.247-.192.35-.31l.007-.008a.75.75 0 111.222.87l-.614-.431c.614.43.614.431.613.431v.001l-.001.002-.002.003-.005.007-.014.019a1.984 1.984 0 01-.184.213c-.16.166-.338.316-.53.445-.63.418-1.37.638-2.127.629-.946 0-1.652-.308-2.126-.63a3.32 3.32 0 01-.715-.657l-.014-.02-.005-.006-.002-.003v-.002h-.001l.613-.432-.614.43a.75.75 0 01.183-1.044h.001z"></path></svg>
</span>
</button>
</span>
<text-expander keys=": @" data-mention-url="/autocomplete/user-suggestions" data-emoji-url="/autocomplete/emoji">
<input
type="text"
autocomplete="off"
data-no-org-url="/autocomplete/user-suggestions"
data-org-url="/suggestions?mention_suggester=1"
data-maxlength="80"
class="d-table-cell width-full form-control js-user-status-message-field js-characters-remaining-field"
placeholder="What's happening?"
name="message"
value=""
aria-label="What is your current status?">
</text-expander>
<div class="error">Could not update your status, please try again.</div>
</div>
<div style="margin-left: 53px" class="my-1 text-small label-characters-remaining js-characters-remaining" data-suffix="remaining" hidden>
80 remaining
</div>
</div>
<include-fragment class="js-user-status-emoji-picker" data-url="/users/status/emoji"></include-fragment>
<div class="overflow-auto ml-n3 mr-n3 px-3 border-bottom" style="max-height: 33vh">
<div class="user-status-suggestions js-user-status-suggestions collapsed overflow-hidden">
<h4 class="f6 text-normal my-3">Suggestions:</h4>
<div class="mx-3 mt-2 clearfix">
<div class="float-left col-6">
<button type="button" value=":palm_tree:" class="d-flex flex-items-baseline flex-items-stretch lh-condensed f6 btn-link link-gray no-underline js-predefined-user-status mb-1">
<div class="emoji-status-width mr-2 v-align-middle js-predefined-user-status-emoji">
<g-emoji alias="palm_tree" fallback-src="https://github.githubassets.com/images/icons/emoji/unicode/1f334.png">🌴</g-emoji>
</div>
<div class="d-flex flex-items-center no-underline js-predefined-user-status-message ws-normal text-left" style="border-left: 1px solid transparent">
On vacation
</div>
</button>
<button type="button" value=":face_with_thermometer:" class="d-flex flex-items-baseline flex-items-stretch lh-condensed f6 btn-link link-gray no-underline js-predefined-user-status mb-1">
<div class="emoji-status-width mr-2 v-align-middle js-predefined-user-status-emoji">
<g-emoji alias="face_with_thermometer" fallback-src="https://github.githubassets.com/images/icons/emoji/unicode/1f912.png">🤒</g-emoji>
</div>
<div class="d-flex flex-items-center no-underline js-predefined-user-status-message ws-normal text-left" style="border-left: 1px solid transparent">
Out sick
</div>
</button>
</div>
<div class="float-left col-6">
<button type="button" value=":house:" class="d-flex flex-items-baseline flex-items-stretch lh-condensed f6 btn-link link-gray no-underline js-predefined-user-status mb-1">
<div class="emoji-status-width mr-2 v-align-middle js-predefined-user-status-emoji">
<g-emoji alias="house" fallback-src="https://github.githubassets.com/images/icons/emoji/unicode/1f3e0.png">🏠</g-emoji>
</div>
<div class="d-flex flex-items-center no-underline js-predefined-user-status-message ws-normal text-left" style="border-left: 1px solid transparent">
Working from home
</div>
</button>
<button type="button" value=":dart:" class="d-flex flex-items-baseline flex-items-stretch lh-condensed f6 btn-link link-gray no-underline js-predefined-user-status mb-1">
<div class="emoji-status-width mr-2 v-align-middle js-predefined-user-status-emoji">
<g-emoji alias="dart" fallback-src="https://github.githubassets.com/images/icons/emoji/unicode/1f3af.png">🎯</g-emoji>
</div>
<div class="d-flex flex-items-center no-underline js-predefined-user-status-message ws-normal text-left" style="border-left: 1px solid transparent">
Focusing
</div>
</button>
</div>
</div>
</div>
<div class="user-status-limited-availability-container">
<div class="form-checkbox my-0">
<input type="checkbox" name="limited_availability" value="1" class="js-user-status-limited-availability-checkbox" data-default-message="I may be slow to respond." aria-describedby="limited-availability-help-text-truncate-true-compact-true" id="limited-availability-truncate-true-compact-true">
<label class="d-block f5 text-gray-dark mb-1" for="limited-availability-truncate-true-compact-true">
Busy
</label>
<p class="note" id="limited-availability-help-text-truncate-true-compact-true">
When others mention you, assign you, or request your review,
GitHub will let them know that you have limited availability.
</p>
</div>
</div>
</div>
<div class="d-inline-block f5 mr-2 pt-3 pb-2" >
<div class="d-inline-block mr-1">
Clear status
</div>
<details class="js-user-status-expire-drop-down f6 dropdown details-reset details-overlay d-inline-block mr-2">
<summary class="f5 btn-link link-gray-dark border px-2 py-1 rounded-1" aria-haspopup="true">
<div class="js-user-status-expiration-interval-selected d-inline-block v-align-baseline">
Never
</div>
<div class="dropdown-caret"></div>
</summary>
<ul class="dropdown-menu dropdown-menu-se pl-0 overflow-auto" style="width: 220px; max-height: 15.5em">
<li>
<button type="button" class="btn-link dropdown-item js-user-status-expire-button ws-normal" title="Never">
<span class="d-inline-block text-bold mb-1">Never</span>
<div class="f6 lh-condensed">Keep this status until you clear your status or edit your status.</div>
</button>
</li>
<li class="dropdown-divider" role="none"></li>
<li>
<button type="button" class="btn-link dropdown-item ws-normal js-user-status-expire-button" title="in 30 minutes" value="2020-06-25T07:50:47-07:00">
in 30 minutes
</button>
</li>
<li>
<button type="button" class="btn-link dropdown-item ws-normal js-user-status-expire-button" title="in 1 hour" value="2020-06-25T08:20:47-07:00">
in 1 hour
</button>
</li>
<li>
<button type="button" class="btn-link dropdown-item ws-normal js-user-status-expire-button" title="in 4 hours" value="2020-06-25T11:20:47-07:00">
in 4 hours
</button>
</li>
<li>
<button type="button" class="btn-link dropdown-item ws-normal js-user-status-expire-button" title="today" value="2020-06-25T23:59:59-07:00">
today
</button>
</li>
<li>
<button type="button" class="btn-link dropdown-item ws-normal js-user-status-expire-button" title="this week" value="2020-06-28T23:59:59-07:00">
this week
</button>
</li>
</ul>
</details>
<input class="js-user-status-expiration-date-input" type="hidden" name="expires_at" value="">
</div>
<include-fragment class="js-user-status-org-picker" data-url="/users/status/organizations"></include-fragment>
</div>
<div class="d-flex flex-items-center flex-justify-between p-3 border-top">
<button type="submit" disabled class="width-full btn btn-primary mr-2 js-user-status-submit">
Set status
</button>
<button type="button" disabled class="width-full js-clear-user-status-button btn ml-2 ">
Clear status
</button>
</div>
</form> </details-dialog>
</details>
</div>
</div>
<div role="none" class="dropdown-divider"></div>
<a role="menuitem" class="dropdown-item" href="/Novia-2018" data-ga-click="Header, go to profile, text:your profile" data-hydro-click="{"event_type":"global_header.user_menu_dropdown.click","payload":{"request_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","target":"YOUR_PROFILE","originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="b84989873dd8fd723cf9a4e2ef8be69d3cb674161ec87d2d9e4e7dc5ffa80359" >Your profile</a>
<a role="menuitem" class="dropdown-item" href="/Novia-2018?tab=repositories" data-ga-click="Header, go to repositories, text:your repositories" data-hydro-click="{"event_type":"global_header.user_menu_dropdown.click","payload":{"request_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","target":"YOUR_REPOSITORIES","originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="a92b888dffe153bcf7e3693a9693a5985ae75b1a570657da094cf022ce2368e8" >Your repositories</a>
<a role="menuitem" class="dropdown-item" href="/Novia-2018?tab=projects" data-ga-click="Header, go to projects, text:your projects" data-hydro-click="{"event_type":"global_header.user_menu_dropdown.click","payload":{"request_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","target":"YOUR_PROJECTS","originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="279c3ae2df0f7afd3d8591d8d693ebeeac39b2f8507204faa34beb8998a85b98" >Your projects</a>
<a role="menuitem" class="dropdown-item" href="/Novia-2018?tab=stars" data-ga-click="Header, go to starred repos, text:your stars" data-hydro-click="{"event_type":"global_header.user_menu_dropdown.click","payload":{"request_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","target":"YOUR_STARS","originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="70f3ac421a7713d5301096e872fa3c19e74b58f00e3e77b63efc46150f4bfc82" >Your stars</a>
<a role="menuitem" class="dropdown-item" href="https://gist.github.com/mine" data-ga-click="Header, your gists, text:your gists" data-hydro-click="{"event_type":"global_header.user_menu_dropdown.click","payload":{"request_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","target":"YOUR_GISTS","originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="95187183d45725279670bf6d8fd626f4f4199d00e174609d6c82208f65a37191" >Your gists</a>
<div role="none" class="dropdown-divider"></div>
<a role="menuitem" class="dropdown-item" href="/settings/billing" data-ga-click="Header, go to billing, text:upgrade" data-hydro-click="{"event_type":"global_header.user_menu_dropdown.click","payload":{"request_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","target":"UPGRADE","originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="2383643713bcf0ce05c1284f29fe708e1461147c1ed64150b7b49b09354d027d" >Upgrade</a>
<div id="feature-enrollment-toggle" class="hide-sm hide-md feature-preview-details position-relative">
<button
type="button"
class="dropdown-item btn-link"
role="menuitem"
data-feature-preview-trigger-url="/users/Novia-2018/feature_previews"
data-feature-preview-close-details="{"event_type":"feature_preview.clicks.close_modal","payload":{"originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}"
data-feature-preview-close-hmac="84414ed888d73b35b64a8764aba7b788d91b0f6a13be079df312f7f1c963bd83"
data-hydro-click="{"event_type":"feature_preview.clicks.open_modal","payload":{"link_location":"user_dropdown","originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}"
data-hydro-click-hmac="c4e8b08d86624de84c42c6b74860733c3cfa8f6870638ea9ed0054a668820dc4"
>
Feature preview
</button>
<span class="feature-preview-indicator js-feature-preview-indicator" hidden></span>
</div>
<a role="menuitem" class="dropdown-item" href="https://help.github.com" data-ga-click="Header, go to help, text:help" data-hydro-click="{"event_type":"global_header.user_menu_dropdown.click","payload":{"request_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","target":"HELP","originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="8a47fa825a3d782ba0e2c1a03eaecb4789ad3b21b4dc6b8d656fdddd15c65daf" >Help</a>
<a role="menuitem" class="dropdown-item" href="/settings/profile" data-ga-click="Header, go to settings, icon:settings" data-hydro-click="{"event_type":"global_header.user_menu_dropdown.click","payload":{"request_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","target":"SETTINGS","originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="eda8fd69e31f61a10f5eb61ca8b6b7fef8f2a185dafe434f277ca72dbe6e5ca9" >Settings</a>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="logout-form" action="/logout" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="sKPkc0+XkW/Iu/Im3sHVYr+GXhuCRbceCEpC2Vx5M3v88voR1+K9PG3DgOWvoOrTyMDb2pqwEw1hcU2HzokkBw==" />
<button type="submit" class="dropdown-item dropdown-signout" data-ga-click="Header, sign out, icon:logout" data-hydro-click="{"event_type":"global_header.user_menu_dropdown.click","payload":{"request_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","target":"SIGN_OUT","originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="cf7ffdedf101319f3fd0ae683b8201251b97849e631ba8f398a81f7766c67d35" role="menuitem">
Sign out
</button>
<input type="text" name="required_field_4122" hidden="hidden" class="form-control" /><input type="hidden" name="timestamp" value="1593094847138" class="form-control" /><input type="hidden" name="timestamp_secret" value="981286f3dcd91125c661207716678034ad4f207967f06e027d7049bc0c61eadf" class="form-control" />
</form> </details-menu>
</details>
</div>
</header>
</div>
<div id="start-of-content" class="show-on-focus"></div>
<div id="js-flash-container">
<template class="js-flash-template">
<div class="flash flash-full js-flash-template-container">
<div class=" px-2" >
<button class="flash-close js-flash-close" type="button" aria-label="Dismiss this message">
<svg class="octicon octicon-x" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path></svg>
</button>
<div class="js-flash-template-message"></div>
</div>
</div>
</template>
</div>
<include-fragment class="js-notification-shelf-include-fragment" data-base-src="https://github.com/notifications/beta/shelf"></include-fragment>
<div
class="application-main "
data-commit-hovercards-enabled
data-discussion-hovercards-enabled
data-issue-and-pr-hovercards-enabled
>
<div itemscope itemtype="http://schema.org/SoftwareSourceCode" class="">
<main >
<div class="pagehead repohead hx_repohead readability-menu bg-gray-light pb-0 pt-3 border-0 mb-5">
<div class="d-flex mb-3 px-3 px-md-4 px-lg-5">
<div class="flex-auto min-width-0 width-fit mr-3">
<h1 class="public d-flex flex-wrap flex-items-center break-word float-none f3">
<svg class="octicon octicon-repo" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path></svg>
<span class="author ml-2 flex-self-stretch" itemprop="author">
<a class="url fn" rel="author" data-hovercard-type="user" data-hovercard-url="/users/Novia-2018/hovercard" data-octo-click="hovercard-link-click" data-octo-dimensions="link_type:self" href="/Novia-2018">Novia-2018</a>
</span>
<span class="path-divider flex-self-stretch">/</span>
<strong itemprop="name" class="mr-2 flex-self-stretch">
<a data-pjax="#js-repo-pjax-container" href="/Novia-2018/Intel-Image-Classification-Using-CNN">Intel-Image-Classification-Using-CNN</a>
</strong>
</h1>
</div>
<ul class="pagehead-actions flex-shrink-0 d-none d-md-inline" style="padding: 2px 0;">
<li>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form data-remote="true" class="js-social-form js-social-container clearfix" action="/notifications/subscribe" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="AvQMAYsNgR4YNbR1JH6bE8hhW7EwO14NXiuCze89ALn/NNkh8z3FMta2AU6RHuhj0qvV1Snz89ZNz6EKpLz1LA==" /> <input type="hidden" name="repository_id" value="274914883">
<details class="details-reset details-overlay select-menu float-left" >
<summary class="select-menu-button float-left btn btn-sm btn-with-count" data-hydro-click="{"event_type":"repository.click","payload":{"target":"WATCH_BUTTON","repository_id":274914883,"originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="bcb30d9160cc59d9cb43cd0244905b8a281de943dce06aa7ca5dc0ac53ce911a" data-ga-click="Repository, click Watch settings, action:blob#show"> <span data-menu-button>
<svg class="octicon octicon-eye" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.679 7.932c.412-.621 1.242-1.75 2.366-2.717C5.175 4.242 6.527 3.5 8 3.5c1.473 0 2.824.742 3.955 1.715 1.124.967 1.954 2.096 2.366 2.717a.119.119 0 010 .136c-.412.621-1.242 1.75-2.366 2.717C10.825 11.758 9.473 12.5 8 12.5c-1.473 0-2.824-.742-3.955-1.715C2.92 9.818 2.09 8.69 1.679 8.068a.119.119 0 010-.136zM8 2c-1.981 0-3.67.992-4.933 2.078C1.797 5.169.88 6.423.43 7.1a1.619 1.619 0 000 1.798c.45.678 1.367 1.932 2.637 3.024C4.329 13.008 6.019 14 8 14c1.981 0 3.67-.992 4.933-2.078 1.27-1.091 2.187-2.345 2.637-3.023a1.619 1.619 0 000-1.798c-.45-.678-1.367-1.932-2.637-3.023C11.671 2.992 9.981 2 8 2zm0 8a2 2 0 100-4 2 2 0 000 4z"></path></svg>
Unwatch
</span>
</summary> <details-menu
class="select-menu-modal position-absolute mt-5 "
style="z-index: 99;">
<div class="select-menu-header">
<span class="select-menu-title">Notifications</span>
</div>
<div class="select-menu-list">
<button
type="submit"
name="do"
value="included"
class="select-menu-item width-full"
aria-checked="false"
role="menuitemradio"
>
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path></svg>
<div class="select-menu-item-text">
<span class="select-menu-item-heading">Not watching</span>
<span class="description">Be notified only when participating or @mentioned.</span>
<span class="hidden-select-button-text" data-menu-button-contents>
<svg class="octicon octicon-eye" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.679 7.932c.412-.621 1.242-1.75 2.366-2.717C5.175 4.242 6.527 3.5 8 3.5c1.473 0 2.824.742 3.955 1.715 1.124.967 1.954 2.096 2.366 2.717a.119.119 0 010 .136c-.412.621-1.242 1.75-2.366 2.717C10.825 11.758 9.473 12.5 8 12.5c-1.473 0-2.824-.742-3.955-1.715C2.92 9.818 2.09 8.69 1.679 8.068a.119.119 0 010-.136zM8 2c-1.981 0-3.67.992-4.933 2.078C1.797 5.169.88 6.423.43 7.1a1.619 1.619 0 000 1.798c.45.678 1.367 1.932 2.637 3.024C4.329 13.008 6.019 14 8 14c1.981 0 3.67-.992 4.933-2.078 1.27-1.091 2.187-2.345 2.637-3.023a1.619 1.619 0 000-1.798c-.45-.678-1.367-1.932-2.637-3.023C11.671 2.992 9.981 2 8 2zm0 8a2 2 0 100-4 2 2 0 000 4z"></path></svg>
Watch
</span>
</div>
</button>
<button type="submit" name="do" value="release_only" class="select-menu-item width-full" aria-checked="false" role="menuitemradio">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path></svg>
<div class="select-menu-item-text">
<span class="select-menu-item-heading">Releases only</span>
<span class="description">Be notified of new releases, and when participating or @mentioned.</span>
<span class="hidden-select-button-text" data-menu-button-contents>
<svg class="octicon octicon-eye" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.679 7.932c.412-.621 1.242-1.75 2.366-2.717C5.175 4.242 6.527 3.5 8 3.5c1.473 0 2.824.742 3.955 1.715 1.124.967 1.954 2.096 2.366 2.717a.119.119 0 010 .136c-.412.621-1.242 1.75-2.366 2.717C10.825 11.758 9.473 12.5 8 12.5c-1.473 0-2.824-.742-3.955-1.715C2.92 9.818 2.09 8.69 1.679 8.068a.119.119 0 010-.136zM8 2c-1.981 0-3.67.992-4.933 2.078C1.797 5.169.88 6.423.43 7.1a1.619 1.619 0 000 1.798c.45.678 1.367 1.932 2.637 3.024C4.329 13.008 6.019 14 8 14c1.981 0 3.67-.992 4.933-2.078 1.27-1.091 2.187-2.345 2.637-3.023a1.619 1.619 0 000-1.798c-.45-.678-1.367-1.932-2.637-3.023C11.671 2.992 9.981 2 8 2zm0 8a2 2 0 100-4 2 2 0 000 4z"></path></svg>
Unwatch releases
</span>
</div>
</button>
<button type="submit" name="do" value="subscribed" class="select-menu-item width-full" aria-checked="true" role="menuitemradio">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path></svg>
<div class="select-menu-item-text">
<span class="select-menu-item-heading">Watching</span>
<span class="description">Be notified of all conversations.</span>
<span class="hidden-select-button-text" data-menu-button-contents>
<svg class="octicon octicon-eye" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.679 7.932c.412-.621 1.242-1.75 2.366-2.717C5.175 4.242 6.527 3.5 8 3.5c1.473 0 2.824.742 3.955 1.715 1.124.967 1.954 2.096 2.366 2.717a.119.119 0 010 .136c-.412.621-1.242 1.75-2.366 2.717C10.825 11.758 9.473 12.5 8 12.5c-1.473 0-2.824-.742-3.955-1.715C2.92 9.818 2.09 8.69 1.679 8.068a.119.119 0 010-.136zM8 2c-1.981 0-3.67.992-4.933 2.078C1.797 5.169.88 6.423.43 7.1a1.619 1.619 0 000 1.798c.45.678 1.367 1.932 2.637 3.024C4.329 13.008 6.019 14 8 14c1.981 0 3.67-.992 4.933-2.078 1.27-1.091 2.187-2.345 2.637-3.023a1.619 1.619 0 000-1.798c-.45-.678-1.367-1.932-2.637-3.023C11.671 2.992 9.981 2 8 2zm0 8a2 2 0 100-4 2 2 0 000 4z"></path></svg>
Unwatch
</span>
</div>
</button>
<button type="submit" name="do" value="ignore" class="select-menu-item width-full" aria-checked="false" role="menuitemradio">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path></svg>
<div class="select-menu-item-text">
<span class="select-menu-item-heading">Ignoring</span>
<span class="description">Never be notified.</span>
<span class="hidden-select-button-text" data-menu-button-contents>
<svg class="octicon octicon-mute" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 2.75a.75.75 0 00-1.238-.57L3.472 5H1.75A1.75 1.75 0 000 6.75v2.5C0 10.216.784 11 1.75 11h1.723l3.289 2.82A.75.75 0 008 13.25V2.75zM4.238 6.32L6.5 4.38v7.24L4.238 9.68a.75.75 0 00-.488-.18h-2a.25.25 0 01-.25-.25v-2.5a.25.25 0 01.25-.25h2a.75.75 0 00.488-.18zm7.042-1.1a.75.75 0 10-1.06 1.06L11.94 8l-1.72 1.72a.75.75 0 101.06 1.06L13 9.06l1.72 1.72a.75.75 0 101.06-1.06L14.06 8l1.72-1.72a.75.75 0 00-1.06-1.06L13 6.94l-1.72-1.72z"></path></svg>
Stop ignoring
</span>
</div>
</button>
</div>
</details-menu>
</details>
<a class="social-count js-social-count"
href="/Novia-2018/Intel-Image-Classification-Using-CNN/watchers"
aria-label="1 user is watching this repository">
1
</a>
</form>
</li>
<li>
<div class="js-toggler-container js-social-container starring-container ">
<form class="starred js-social-form" action="/Novia-2018/Intel-Image-Classification-Using-CNN/unstar" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="NVb/g8hu12NSc0e7TdDwauHNkC7jBZQpt4DKkfhv2kVJlQ+ZUrIr0a9J7w43Jb6qSD6V9mDZT1+Gi9NVcF5H1Q==" />
<input type="hidden" name="context" value="repository"></input>
<button type="submit" class="btn btn-sm btn-with-count js-toggler-target" aria-label="Unstar this repository" title="Unstar Novia-2018/Intel-Image-Classification-Using-CNN" data-hydro-click="{"event_type":"repository.click","payload":{"target":"UNSTAR_BUTTON","repository_id":274914883,"originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="a8e60ba436adae1d1ba910268dd209e5f2bf9e55cad0fb8c22416349b1f1e0bf" data-ga-click="Repository, click unstar button, action:blob#show; text:Unstar"> <svg height="16" class="octicon octicon-star-fill" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 .25a.75.75 0 01.673.418l1.882 3.815 4.21.612a.75.75 0 01.416 1.279l-3.046 2.97.719 4.192a.75.75 0 01-1.088.791L8 12.347l-3.766 1.98a.75.75 0 01-1.088-.79l.72-4.194L.818 6.374a.75.75 0 01.416-1.28l4.21-.611L7.327.668A.75.75 0 018 .25z"></path></svg>
Unstar
</button> <a class="social-count js-social-count" href="/Novia-2018/Intel-Image-Classification-Using-CNN/stargazers"
aria-label="0 users starred this repository">
0
</a>
</form>
<form class="unstarred js-social-form" action="/Novia-2018/Intel-Image-Classification-Using-CNN/star" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="y6+SAMZfRboCszyydEyZlhEHV2/k19VWvpkpo+4EyrRYEBRO5kB/nos6IHmeoprKE0ijr0hBKET4CkLV8VY4ng==" />
<input type="hidden" name="context" value="repository"></input>
<button type="submit" class="btn btn-sm btn-with-count js-toggler-target" aria-label="Unstar this repository" title="Star Novia-2018/Intel-Image-Classification-Using-CNN" data-hydro-click="{"event_type":"repository.click","payload":{"target":"STAR_BUTTON","repository_id":274914883,"originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="8c237e9432ee3a5a0d56183ae3b4d6cdbf27695c7ada7850ffdfd460881e64a9" data-ga-click="Repository, click star button, action:blob#show; text:Star"> <svg height="16" class="octicon octicon-star" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 .25a.75.75 0 01.673.418l1.882 3.815 4.21.612a.75.75 0 01.416 1.279l-3.046 2.97.719 4.192a.75.75 0 01-1.088.791L8 12.347l-3.766 1.98a.75.75 0 01-1.088-.79l.72-4.194L.818 6.374a.75.75 0 01.416-1.28l4.21-.611L7.327.668A.75.75 0 018 .25zm0 2.445L6.615 5.5a.75.75 0 01-.564.41l-3.097.45 2.24 2.184a.75.75 0 01.216.664l-.528 3.084 2.769-1.456a.75.75 0 01.698 0l2.77 1.456-.53-3.084a.75.75 0 01.216-.664l2.24-2.183-3.096-.45a.75.75 0 01-.564-.41L8 2.694v.001z"></path></svg>
Star
</button> <a class="social-count js-social-count" href="/Novia-2018/Intel-Image-Classification-Using-CNN/stargazers"
aria-label="0 users starred this repository">
0
</a>
</form> </div>
</li>
<li>
<span class="btn btn-sm btn-with-count disabled tooltipped tooltipped-sw" aria-label="Cannot fork because you own this repository and are not a member of any organizations.">
<svg class="octicon octicon-repo-forked" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M5 3.25a.75.75 0 11-1.5 0 .75.75 0 011.5 0zm0 2.122a2.25 2.25 0 10-1.5 0v.878A2.25 2.25 0 005.75 8.5h1.5v2.128a2.251 2.251 0 101.5 0V8.5h1.5a2.25 2.25 0 002.25-2.25v-.878a2.25 2.25 0 10-1.5 0v.878a.75.75 0 01-.75.75h-4.5A.75.75 0 015 6.25v-.878zm3.75 7.378a.75.75 0 11-1.5 0 .75.75 0 011.5 0zm3-8.75a.75.75 0 100-1.5.75.75 0 000 1.5z"></path></svg>
Fork
</span>
<a href="/Novia-2018/Intel-Image-Classification-Using-CNN/network/members" class="social-count"
aria-label="0 users forked this repository">
0
</a>
</li>
</ul>
</div>
<nav class="UnderlineNav js-repo-nav js-sidenav-container-pjax js-responsive-underlinenav overflow-hidden px-3 px-md-4 px-lg-5 bg-gray-light" aria-label="Repository" data-pjax="#js-repo-pjax-container">
<ul class="UnderlineNav-body list-style-none ">
<li class="d-flex">
<a class="js-selected-navigation-item selected UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item" data-tab-item="code-tab" data-hotkey="g c" data-ga-click="Repository, Navigation click, Code tab" aria-current="page" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches repo_packages repo_deployments /Novia-2018/Intel-Image-Classification-Using-CNN" href="/Novia-2018/Intel-Image-Classification-Using-CNN">
<svg height="16" class="octicon octicon-code UnderlineNav-octicon d-none d-sm-inline" class_names="UnderlineNav-octicon" display="none inline" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M4.72 3.22a.75.75 0 011.06 1.06L2.06 8l3.72 3.72a.75.75 0 11-1.06 1.06L.47 8.53a.75.75 0 010-1.06l4.25-4.25zm6.56 0a.75.75 0 10-1.06 1.06L13.94 8l-3.72 3.72a.75.75 0 101.06 1.06l4.25-4.25a.75.75 0 000-1.06l-4.25-4.25z"></path></svg>
<span data-content="Code">Code</span>
</a> </li>
<li class="d-flex">
<a class="js-selected-navigation-item UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item" data-tab-item="issues-tab" data-hotkey="g i" data-ga-click="Repository, Navigation click, Issues tab" data-selected-links="repo_issues repo_labels repo_milestones /Novia-2018/Intel-Image-Classification-Using-CNN/issues" href="/Novia-2018/Intel-Image-Classification-Using-CNN/issues">
<svg height="16" class="octicon octicon-issue-opened UnderlineNav-octicon d-none d-sm-inline" class_names="UnderlineNav-octicon" display="none inline" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 1.5a6.5 6.5 0 100 13 6.5 6.5 0 000-13zM0 8a8 8 0 1116 0A8 8 0 010 8zm9 3a1 1 0 11-2 0 1 1 0 012 0zm-.25-6.25a.75.75 0 00-1.5 0v3.5a.75.75 0 001.5 0v-3.5z"></path></svg>
<span data-content="Issues">Issues</span>
</a> </li>
<li class="d-flex">
<a class="js-selected-navigation-item UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item" data-tab-item="pull-requests-tab" data-hotkey="g p" data-ga-click="Repository, Navigation click, Pull requests tab" data-selected-links="repo_pulls checks /Novia-2018/Intel-Image-Classification-Using-CNN/pulls" href="/Novia-2018/Intel-Image-Classification-Using-CNN/pulls">
<svg height="16" class="octicon octicon-git-pull-request UnderlineNav-octicon d-none d-sm-inline" class_names="UnderlineNav-octicon" display="none inline" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.177 3.073L9.573.677A.25.25 0 0110 .854v4.792a.25.25 0 01-.427.177L7.177 3.427a.25.25 0 010-.354zM3.75 2.5a.75.75 0 100 1.5.75.75 0 000-1.5zm-2.25.75a2.25 2.25 0 113 2.122v5.256a2.251 2.251 0 11-1.5 0V5.372A2.25 2.25 0 011.5 3.25zM11 2.5h-1V4h1a1 1 0 011 1v5.628a2.251 2.251 0 101.5 0V5A2.5 2.5 0 0011 2.5zm1 10.25a.75.75 0 111.5 0 .75.75 0 01-1.5 0zM3.75 12a.75.75 0 100 1.5.75.75 0 000-1.5z"></path></svg>
<span data-content="Pull requests">Pull requests</span>
</a> </li>
<li class="d-flex">
<a class="js-selected-navigation-item UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item" data-tab-item="actions-tab" data-hotkey="g w" data-ga-click="Repository, Navigation click, Actions tab" data-selected-links="repo_actions /Novia-2018/Intel-Image-Classification-Using-CNN/actions" href="/Novia-2018/Intel-Image-Classification-Using-CNN/actions">
<svg height="16" class="octicon octicon-play UnderlineNav-octicon d-none d-sm-inline" class_names="UnderlineNav-octicon" display="none inline" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0zM8 0a8 8 0 100 16A8 8 0 008 0zM6.379 5.227A.25.25 0 006 5.442v5.117a.25.25 0 00.379.214l4.264-2.559a.25.25 0 000-.428L6.379 5.227z"></path></svg>
<span data-content="Actions">Actions</span>
</a> </li>
<li class="d-flex">
<a class="js-selected-navigation-item UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item" data-tab-item="projects-tab" data-hotkey="g b" data-ga-click="Repository, Navigation click, Projects tab" data-selected-links="repo_projects new_repo_project repo_project /Novia-2018/Intel-Image-Classification-Using-CNN/projects" href="/Novia-2018/Intel-Image-Classification-Using-CNN/projects">
<svg height="16" class="octicon octicon-project UnderlineNav-octicon d-none d-sm-inline" class_names="UnderlineNav-octicon" display="none inline" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path></svg>
<span data-content="Projects">Projects</span>
</a> </li>
<li class="d-flex">
<a class="js-selected-navigation-item UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item" data-tab-item="wiki-tab" data-ga-click="Repository, Navigation click, Wikis tab" data-selected-links="repo_wiki /Novia-2018/Intel-Image-Classification-Using-CNN/wiki" href="/Novia-2018/Intel-Image-Classification-Using-CNN/wiki">
<svg height="16" class="octicon octicon-book UnderlineNav-octicon d-none d-sm-inline" class_names="UnderlineNav-octicon" display="none inline" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M0 1.75A.75.75 0 01.75 1h4.253c1.227 0 2.317.59 3 1.501A3.744 3.744 0 0111.006 1h4.245a.75.75 0 01.75.75v10.5a.75.75 0 01-.75.75h-4.507a2.25 2.25 0 00-1.591.659l-.622.621a.75.75 0 01-1.06 0l-.622-.621A2.25 2.25 0 005.258 13H.75a.75.75 0 01-.75-.75V1.75zm8.755 3a2.25 2.25 0 012.25-2.25H14.5v9h-3.757c-.71 0-1.4.201-1.992.572l.004-7.322zm-1.504 7.324l.004-5.073-.002-2.253A2.25 2.25 0 005.003 2.5H1.5v9h3.757a3.75 3.75 0 011.994.574z"></path></svg>
<span data-content="Wiki">Wiki</span>
</a> </li>
<li class="d-flex">
<a class="js-selected-navigation-item UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item" data-tab-item="security-tab" data-hotkey="g s" data-ga-click="Repository, Navigation click, Security tab" data-selected-links="security overview alerts policy token_scanning code_scanning /Novia-2018/Intel-Image-Classification-Using-CNN/security" href="/Novia-2018/Intel-Image-Classification-Using-CNN/security">
<svg height="16" class="octicon octicon-shield UnderlineNav-octicon d-none d-sm-inline" class_names="UnderlineNav-octicon" display="none inline" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.467.133a1.75 1.75 0 011.066 0l5.25 1.68A1.75 1.75 0 0115 3.48V7c0 1.566-.32 3.182-1.303 4.682-.983 1.498-2.585 2.813-5.032 3.855a1.7 1.7 0 01-1.33 0c-2.447-1.042-4.049-2.357-5.032-3.855C1.32 10.182 1 8.566 1 7V3.48a1.75 1.75 0 011.217-1.667l5.25-1.68zm.61 1.429a.25.25 0 00-.153 0l-5.25 1.68a.25.25 0 00-.174.238V7c0 1.358.275 2.666 1.057 3.86.784 1.194 2.121 2.34 4.366 3.297a.2.2 0 00.154 0c2.245-.956 3.582-2.104 4.366-3.298C13.225 9.666 13.5 8.36 13.5 7V3.48a.25.25 0 00-.174-.237l-5.25-1.68zM9 10.5a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.75a.75.75 0 10-1.5 0v3a.75.75 0 001.5 0v-3z"></path></svg>
<span data-content="Security">Security</span>
<span class="Counter js-security-tab-count " data-url="/Novia-2018/Intel-Image-Classification-Using-CNN/security/overall-count" hidden="hidden">1</span>
</a> </li>
<li class="d-flex">
<a class="js-selected-navigation-item UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item" data-tab-item="insights-tab" data-ga-click="Repository, Navigation click, Insights tab" data-selected-links="repo_graphs repo_contributors dependency_graph dependabot_updates pulse people /Novia-2018/Intel-Image-Classification-Using-CNN/pulse" href="/Novia-2018/Intel-Image-Classification-Using-CNN/pulse">
<svg height="16" class="octicon octicon-graph UnderlineNav-octicon d-none d-sm-inline" class_names="UnderlineNav-octicon" display="none inline" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.5 1.75a.75.75 0 00-1.5 0v12.5c0 .414.336.75.75.75h14.5a.75.75 0 000-1.5H1.5V1.75zm14.28 2.53a.75.75 0 00-1.06-1.06L10 7.94 7.53 5.47a.75.75 0 00-1.06 0L3.22 8.72a.75.75 0 001.06 1.06L7 7.06l2.47 2.47a.75.75 0 001.06 0l5.25-5.25z"></path></svg>
<span data-content="Insights">Insights</span>
</a> </li>
<li class="d-flex">
<a class="js-selected-navigation-item UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item" data-tab-item="settings-tab" data-ga-click="Repository, Navigation click, Settings tab" data-selected-links="repo_settings repo_branch_settings hooks integration_installations repo_keys_settings issue_template_editor secrets_settings key_links_settings repo_actions_settings notifications /Novia-2018/Intel-Image-Classification-Using-CNN/settings" href="/Novia-2018/Intel-Image-Classification-Using-CNN/settings">
<svg height="16" class="octicon octicon-gear UnderlineNav-octicon d-none d-sm-inline" class_names="UnderlineNav-octicon" display="none inline" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.429 1.525a6.593 6.593 0 011.142 0c.036.003.108.036.137.146l.289 1.105c.147.56.55.967.997 1.189.174.086.341.183.501.29.417.278.97.423 1.53.27l1.102-.303c.11-.03.175.016.195.046.219.31.41.641.573.989.014.031.022.11-.059.19l-.815.806c-.411.406-.562.957-.53 1.456a4.588 4.588 0 010 .582c-.032.499.119 1.05.53 1.456l.815.806c.08.08.073.159.059.19a6.494 6.494 0 01-.573.99c-.02.029-.086.074-.195.045l-1.103-.303c-.559-.153-1.112-.008-1.529.27-.16.107-.327.204-.5.29-.449.222-.851.628-.998 1.189l-.289 1.105c-.029.11-.101.143-.137.146a6.613 6.613 0 01-1.142 0c-.036-.003-.108-.037-.137-.146l-.289-1.105c-.147-.56-.55-.967-.997-1.189a4.502 4.502 0 01-.501-.29c-.417-.278-.97-.423-1.53-.27l-1.102.303c-.11.03-.175-.016-.195-.046a6.492 6.492 0 01-.573-.989c-.014-.031-.022-.11.059-.19l.815-.806c.411-.406.562-.957.53-1.456a4.587 4.587 0 010-.582c.032-.499-.119-1.05-.53-1.456l-.815-.806c-.08-.08-.073-.159-.059-.19a6.44 6.44 0 01.573-.99c.02-.029.086-.075.195-.045l1.103.303c.559.153 1.112.008 1.529-.27.16-.107.327-.204.5-.29.449-.222.851-.628.998-1.189l.289-1.105c.029-.11.101-.143.137-.146zM8 0c-.236 0-.47.01-.701.03-.743.065-1.29.615-1.458 1.261l-.29 1.106c-.017.066-.078.158-.211.224a5.994 5.994 0 00-.668.386c-.123.082-.233.09-.3.071L3.27 2.776c-.644-.177-1.392.02-1.82.63a7.977 7.977 0 00-.704 1.217c-.315.675-.111 1.422.363 1.891l.815.806c.05.048.098.147.088.294a6.084 6.084 0 000 .772c.01.147-.038.246-.088.294l-.815.806c-.474.469-.678 1.216-.363 1.891.2.428.436.835.704 1.218.428.609 1.176.806 1.82.63l1.103-.303c.066-.019.176-.011.299.071.213.143.436.272.668.386.133.066.194.158.212.224l.289 1.106c.169.646.715 1.196 1.458 1.26a8.094 8.094 0 001.402 0c.743-.064 1.29-.614 1.458-1.26l.29-1.106c.017-.066.078-.158.211-.224a5.98 5.98 0 00.668-.386c.123-.082.233-.09.3-.071l1.102.302c.644.177 1.392-.02 1.82-.63.268-.382.505-.789.704-1.217.315-.675.111-1.422-.364-1.891l-.814-.806c-.05-.048-.098-.147-.088-.294a6.1 6.1 0 000-.772c-.01-.147.039-.246.088-.294l.814-.806c.475-.469.679-1.216.364-1.891a7.992 7.992 0 00-.704-1.218c-.428-.609-1.176-.806-1.82-.63l-1.103.303c-.066.019-.176.011-.299-.071a5.991 5.991 0 00-.668-.386c-.133-.066-.194-.158-.212-.224L10.16 1.29C9.99.645 9.444.095 8.701.031A8.094 8.094 0 008 0zm1.5 8a1.5 1.5 0 11-3 0 1.5 1.5 0 013 0zM11 8a3 3 0 11-6 0 3 3 0 016 0z"></path></svg>
<span data-content="Settings">Settings</span>
</a> </li>
</ul>
<div class="position-absolute right-0 pr-3 pr-md-4 pr-lg-5 js-responsive-underlinenav-overflow" style="visibility:hidden;">
<details class="details-overlay details-reset position-relative">
<summary role="button">
<div class="UnderlineNav-item mr-0 border-0">
<svg class="octicon octicon-kebab-horizontal" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path></svg>
<span class="sr-only">More</span>
</div>
</summary>
<details-menu class="dropdown-menu dropdown-menu-sw " role="menu">
<ul>
<li data-menu-item="code-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links=" /Novia-2018/Intel-Image-Classification-Using-CNN" href="/Novia-2018/Intel-Image-Classification-Using-CNN">
Code
</a> </li>
<li data-menu-item="issues-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links=" /Novia-2018/Intel-Image-Classification-Using-CNN/issues" href="/Novia-2018/Intel-Image-Classification-Using-CNN/issues">
Issues
</a> </li>
<li data-menu-item="pull-requests-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links=" /Novia-2018/Intel-Image-Classification-Using-CNN/pulls" href="/Novia-2018/Intel-Image-Classification-Using-CNN/pulls">
Pull requests
</a> </li>
<li data-menu-item="actions-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links=" /Novia-2018/Intel-Image-Classification-Using-CNN/actions" href="/Novia-2018/Intel-Image-Classification-Using-CNN/actions">
Actions
</a> </li>
<li data-menu-item="projects-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links=" /Novia-2018/Intel-Image-Classification-Using-CNN/projects" href="/Novia-2018/Intel-Image-Classification-Using-CNN/projects">
Projects
</a> </li>
<li data-menu-item="wiki-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links=" /Novia-2018/Intel-Image-Classification-Using-CNN/wiki" href="/Novia-2018/Intel-Image-Classification-Using-CNN/wiki">
Wiki
</a> </li>
<li data-menu-item="security-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links=" /Novia-2018/Intel-Image-Classification-Using-CNN/security" href="/Novia-2018/Intel-Image-Classification-Using-CNN/security">
Security
</a> </li>
<li data-menu-item="insights-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links=" /Novia-2018/Intel-Image-Classification-Using-CNN/pulse" href="/Novia-2018/Intel-Image-Classification-Using-CNN/pulse">
Insights
</a> </li>
<li data-menu-item="settings-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links=" /Novia-2018/Intel-Image-Classification-Using-CNN/settings" href="/Novia-2018/Intel-Image-Classification-Using-CNN/settings">
Settings
</a> </li>
</ul>
</details-menu>
</details>
</div>
</nav>
</div>
<div class="container-xl clearfix new-discussion-timeline px-3 px-md-4 px-lg-5">
<div class="repository-content ">
<a class="d-none js-permalink-shortcut" data-hotkey="y" href="/Novia-2018/Intel-Image-Classification-Using-CNN/blob/87e3d7360c13ba5b2c4c12ee85c939902dfbd422/src/Intel_Project%20(1).py">Permalink</a>
<!-- blob contrib key: blob_contributors:v22:7516945cbabace70346afbf5a43f7172 -->
<div class="d-flex flex-items-center flex-shrink-0 pb-3 flex-wrap flex-justify-between flex-md-justify-start">
<details class="details-reset details-overlay mr-0 mb-0 " id="branch-select-menu">
<summary class="btn css-truncate"
data-hotkey="w"
title="Switch branches or tags">
<svg height="16" class="octicon octicon-git-branch text-gray" text="gray" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M11.75 2.5a.75.75 0 100 1.5.75.75 0 000-1.5zm-2.25.75a2.25 2.25 0 113 2.122V6A2.5 2.5 0 0110 8.5H6a1 1 0 00-1 1v1.128a2.251 2.251 0 11-1.5 0V5.372a2.25 2.25 0 111.5 0v1.836A2.492 2.492 0 016 7h4a1 1 0 001-1v-.628A2.25 2.25 0 019.5 3.25zM4.25 12a.75.75 0 100 1.5.75.75 0 000-1.5zM3.5 3.25a.75.75 0 111.5 0 .75.75 0 01-1.5 0z"></path></svg>
<i class="d-none d-lg-inline">Branch:</i>
<span class="css-truncate-target" data-menu-button>master</span>
<span class="dropdown-caret"></span>
</summary>
<details-menu class="SelectMenu SelectMenu--hasFilter" src="/Novia-2018/Intel-Image-Classification-Using-CNN/refs/master/src/Intel_Project%20(1).py?source_action=show&source_controller=blob" preload>
<div class="SelectMenu-modal">
<include-fragment class="SelectMenu-loading" aria-label="Menu is loading">
<svg class="octicon octicon-octoface anim-pulse" height="32" viewBox="0 0 16 16" version="1.1" width="32" aria-hidden="true"><path fill-rule="evenodd" d="M14.7 5.34c.13-.32.55-1.59-.13-3.31 0 0-1.05-.33-3.44 1.3-1-.28-2.07-.32-3.13-.32s-2.13.04-3.13.32c-2.39-1.64-3.44-1.3-3.44-1.3-.68 1.72-.26 2.99-.13 3.31C.49 6.21 0 7.33 0 8.69 0 13.84 3.33 15 7.98 15S16 13.84 16 8.69c0-1.36-.49-2.48-1.3-3.35zM8 14.02c-3.3 0-5.98-.15-5.98-3.35 0-.76.38-1.48 1.02-2.07 1.07-.98 2.9-.46 4.96-.46 2.07 0 3.88-.52 4.96.46.65.59 1.02 1.3 1.02 2.07 0 3.19-2.68 3.35-5.98 3.35zM5.49 9.01c-.66 0-1.2.8-1.2 1.78s.54 1.79 1.2 1.79c.66 0 1.2-.8 1.2-1.79s-.54-1.78-1.2-1.78zm5.02 0c-.66 0-1.2.79-1.2 1.78s.54 1.79 1.2 1.79c.66 0 1.2-.8 1.2-1.79s-.53-1.78-1.2-1.78z"></path></svg>
</include-fragment>
</div>
</details-menu>
</details>
<h2 id="blob-path" class="breadcrumb flex-auto min-width-0 text-normal mx-0 mx-md-3 width-full width-md-auto flex-order-1 flex-md-order-none mt-3 mt-md-0">
<span class="js-repo-root text-bold"><span class="js-path-segment d-inline-block wb-break-all"><a data-pjax="true" href="/Novia-2018/Intel-Image-Classification-Using-CNN"><span>Intel-Image-Classification-Using-CNN</span></a></span></span><span class="separator">/</span><span class="js-path-segment d-inline-block wb-break-all"><a data-pjax="true" href="/Novia-2018/Intel-Image-Classification-Using-CNN/tree/master/src"><span>src</span></a></span><span class="separator">/</span><strong class="final-path">Intel_Project (1).py</strong>
</h2>
<a href="/Novia-2018/Intel-Image-Classification-Using-CNN/find/master"
class="js-pjax-capture-input btn mr-2 d-none d-md-block"
data-pjax
data-hotkey="t">
Go to file
</a>
<details class="flex-self-end details-overlay details-reset position-relative" id="blob-more-options-details">
<summary role="button">
<span class="btn">
<svg height="16" class="octicon octicon-kebab-horizontal" aria-label="More options" viewBox="0 0 16 16" version="1.1" width="16" role="img"><path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path></svg>
</span>
</summary>
<ul class="dropdown-menu dropdown-menu-sw">
<li class="d-block d-md-none">
<a class="dropdown-item d-flex flex-items-baseline" data-hydro-click="{"event_type":"repository.click","payload":{"target":"FIND_FILE_BUTTON","repository_id":274914883,"originating_url":"https://github.com/Novia-2018/Intel-Image-Classification-Using-CNN/blob/master/src/Intel_Project%20(1).py","user_id":56831817}}" data-hydro-click-hmac="41e40d790f85c273c71df7591af7e2a3b53d56d4c50721c802fa2403f52c905d" data-ga-click="Repository, find file, location:repo overview" data-hotkey="t" data-pjax="true" href="/Novia-2018/Intel-Image-Classification-Using-CNN/find/master">
<span class="flex-auto">Go to file</span>
<span class="text-small text-gray" aria-hidden="true">T</span>
</a> </li>
<li data-toggle-for="blob-more-options-details">
<button type="button" data-toggle-for="jumpto-line-details-dialog" class="btn-link dropdown-item">
<span class="d-flex flex-items-baseline">
<span class="flex-auto">Go to line</span>
<span class="text-small text-gray" aria-hidden="true">L</span>
</span>
</button>
</li>
<li class="dropdown-divider" role="none"></li>
<li>
<clipboard-copy value="src/Intel_Project (1).py" class="dropdown-item cursor-pointer" data-toggle-for="blob-more-options-details">
Copy path
</clipboard-copy>
</li>
</ul>
</details>
</div>
<div class="Box d-flex flex-column flex-shrink-0 mb-3">
<include-fragment src="/Novia-2018/Intel-Image-Classification-Using-CNN/contributors/master/src/Intel_Project%20(1).py" class="commit-loader">
<div class="Box-header Box-header--blue d-flex flex-items-center">
<div class="Skeleton avatar avatar-user flex-shrink-0 ml-n1 mr-n1 mt-n1 mb-n1" style="width:24px;height:24px;"></div>
<div class="Skeleton Skeleton--text col-5 ml-2"> </div>
</div>
<div class="Box-body d-flex flex-items-center" >
<div class="Skeleton Skeleton--text col-1"> </div>
<span class="text-red h6 loader-error">Cannot retrieve contributors at this time</span>
</div>
</include-fragment> </div>
<div class="Box mt-3 position-relative
">
<div class="Box-header py-2 d-flex flex-column flex-shrink-0 flex-md-row flex-md-items-center">
<div class="text-mono f6 flex-auto pr-3 flex-order-2 flex-md-order-1 mt-2 mt-md-0">
460 lines (266 sloc)
<span class="file-info-divider"></span>
8.28 KB
</div>
<div class="d-flex py-1 py-md-0 flex-auto flex-order-1 flex-md-order-2 flex-sm-grow-0 flex-justify-between">
<div class="BtnGroup">
<a id="raw-url" class="btn btn-sm BtnGroup-item" href="/Novia-2018/Intel-Image-Classification-Using-CNN/raw/master/src/Intel_Project%20(1).py">Raw</a>
<a class="btn btn-sm js-update-url-with-hash BtnGroup-item" data-hotkey="b" href="/Novia-2018/Intel-Image-Classification-Using-CNN/blame/master/src/Intel_Project%20(1).py">Blame</a>
</div>
<div>
<a class="btn-octicon tooltipped tooltipped-nw js-remove-unless-platform"
data-platforms="windows,mac"
href="https://desktop.github.com"
aria-label="Open this file in GitHub Desktop"
data-ga-click="Repository, open with desktop">
<svg class="octicon octicon-device-desktop" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.75 2.5h12.5a.25.25 0 01.25.25v7.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25v-7.5a.25.25 0 01.25-.25zM14.25 1H1.75A1.75 1.75 0 000 2.75v7.5C0 11.216.784 12 1.75 12h3.727c-.1 1.041-.52 1.872-1.292 2.757A.75.75 0 004.75 16h6.5a.75.75 0 00.565-1.243c-.772-.885-1.193-1.716-1.292-2.757h3.727A1.75 1.75 0 0016 10.25v-7.5A1.75 1.75 0 0014.25 1zM9.018 12H6.982a5.72 5.72 0 01-.765 2.5h3.566a5.72 5.72 0 01-.765-2.5z"></path></svg>
</a>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="inline-form js-update-url-with-hash" action="/Novia-2018/Intel-Image-Classification-Using-CNN/edit/master/src/Intel_Project%20(1).py" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="3W181oMJs/p5Npv5EnvJA8UIk6301MiB5NsOEA9+/7LiMR5cbKaK2j8L13aZ2JpcFOytnuPyoEQWw5GR3n0tWg==" />
<button class="btn-octicon tooltipped tooltipped-nw" type="submit"
aria-label="Edit this file" data-hotkey="e" data-disable-with>
<svg class="octicon octicon-pencil" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M11.013 1.427a1.75 1.75 0 012.474 0l1.086 1.086a1.75 1.75 0 010 2.474l-8.61 8.61c-.21.21-.47.364-.756.445l-3.251.93a.75.75 0 01-.927-.928l.929-3.25a1.75 1.75 0 01.445-.758l8.61-8.61zm1.414 1.06a.25.25 0 00-.354 0L10.811 3.75l1.439 1.44 1.263-1.263a.25.25 0 000-.354l-1.086-1.086zM11.189 6.25L9.75 4.81l-6.286 6.287a.25.25 0 00-.064.108l-.558 1.953 1.953-.558a.249.249 0 00.108-.064l6.286-6.286z"></path></svg>
</button>
</form>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="inline-form" action="/Novia-2018/Intel-Image-Classification-Using-CNN/delete/master/src/Intel_Project%20(1).py" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="49tqkIezK4346J7VQG+3Y3+MHe1P47t8ghVi8mrxdmbhxUslFYU72kMib3upB8TjYdj8yPPGquyx0L/B9sfHaQ==" />
<button class="btn-octicon btn-octicon-danger tooltipped tooltipped-nw" type="submit"
aria-label="Delete this file" data-disable-with>
<svg class="octicon octicon-trashcan" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M6.5 1.75a.25.25 0 01.25-.25h2.5a.25.25 0 01.25.25V3h-3V1.75zm4.5 0V3h2.25a.75.75 0 010 1.5H2.75a.75.75 0 010-1.5H5V1.75C5 .784 5.784 0 6.75 0h2.5C10.216 0 11 .784 11 1.75zM4.496 6.675a.75.75 0 10-1.492.15l.66 6.6A1.75 1.75 0 005.405 15h5.19c.9 0 1.652-.681 1.741-1.576l.66-6.6a.75.75 0 00-1.492-.149l-.66 6.6a.25.25 0 01-.249.225h-5.19a.25.25 0 01-.249-.225l-.66-6.6z"></path></svg>
</button>
</form> </div>
</div>
</div>
<div itemprop="text" class="Box-body p-0 blob-wrapper data type-python ">
<table class="highlight tab-size js-file-line-container" data-tab-size="8" data-paste-markdown-skip>
<tr>
<td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code blob-code-inner js-file-line"><span class=pl-c>#!/usr/bin/env python</span></td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code blob-code-inner js-file-line"><span class=pl-c># coding: utf-8</span></td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code blob-code-inner js-file-line"><span class=pl-c># # Convolutional Neural Network</span></td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Installing the libraries</span></td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code blob-code-inner js-file-line">!p<span class=pl-s1>ip</span> <span class=pl-s1>install</span> <span class=pl-s1>tensorflow</span>!p<span class=pl-s1>ip</span> <span class=pl-s1>install</span> <span class=pl-s1>keras</span></td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code blob-code-inner js-file-line">!p<span class=pl-s1>ip</span> <span class=pl-s1>install</span> <span class=pl-s1>opencv</span><span class=pl-c1>-</span><span class=pl-s1>python</span></td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ## Importing The Libraries</span></td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[213]:</span></td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>tensorflow</span> <span class=pl-k>as</span> <span class=pl-s1>tf</span></td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-s1>keras</span>.<span class=pl-s1>preprocessing</span>.<span class=pl-s1>image</span> <span class=pl-k>import</span> <span class=pl-v>ImageDataGenerator</span></td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-s1>keras</span>.<span class=pl-s1>preprocessing</span> <span class=pl-k>import</span> <span class=pl-s1>image</span> <span class=pl-k>as</span> <span class=pl-s1>img</span></td>
</tr>
<tr>
<td id="L17" class="blob-num js-line-number" data-line-number="17"></td>
<td id="LC17" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>pandas</span> <span class=pl-k>as</span> <span class=pl-s1>pd</span></td>
</tr>
<tr>
<td id="L18" class="blob-num js-line-number" data-line-number="18"></td>
<td id="LC18" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>matplotlib</span>.<span class=pl-s1>pyplot</span> <span class=pl-k>as</span> <span class=pl-s1>plt</span></td>
</tr>
<tr>
<td id="L19" class="blob-num js-line-number" data-line-number="19"></td>
<td id="LC19" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>numpy</span> <span class=pl-k>as</span> <span class=pl-s1>np</span></td>
</tr>
<tr>
<td id="L20" class="blob-num js-line-number" data-line-number="20"></td>
<td id="LC20" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>tensorflow</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>layers</span> <span class=pl-k>as</span> <span class=pl-v>Layers</span></td>
</tr>
<tr>
<td id="L21" class="blob-num js-line-number" data-line-number="21"></td>
<td id="LC21" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>tensorflow</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>activations</span> <span class=pl-k>as</span> <span class=pl-v>Actications</span></td>
</tr>
<tr>
<td id="L22" class="blob-num js-line-number" data-line-number="22"></td>
<td id="LC22" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>tensorflow</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>models</span> <span class=pl-k>as</span> <span class=pl-v>Models</span></td>
</tr>
<tr>
<td id="L23" class="blob-num js-line-number" data-line-number="23"></td>
<td id="LC23" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>tensorflow</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>optimizers</span> <span class=pl-k>as</span> <span class=pl-v>Optimizer</span></td>
</tr>
<tr>
<td id="L24" class="blob-num js-line-number" data-line-number="24"></td>
<td id="LC24" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>tensorflow</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>metrics</span> <span class=pl-k>as</span> <span class=pl-v>Metrics</span></td>
</tr>
<tr>
<td id="L25" class="blob-num js-line-number" data-line-number="25"></td>
<td id="LC25" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>tensorflow</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>utils</span> <span class=pl-k>as</span> <span class=pl-v>Utils</span></td>
</tr>
<tr>
<td id="L26" class="blob-num js-line-number" data-line-number="26"></td>
<td id="LC26" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-s1>keras</span>.<span class=pl-s1>utils</span>.<span class=pl-s1>vis_utils</span> <span class=pl-k>import</span> <span class=pl-s1>model_to_dot</span></td>
</tr>
<tr>
<td id="L27" class="blob-num js-line-number" data-line-number="27"></td>
<td id="LC27" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>os</span></td>
</tr>
<tr>
<td id="L28" class="blob-num js-line-number" data-line-number="28"></td>
<td id="LC28" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>matplotlib</span>.<span class=pl-s1>pyplot</span> <span class=pl-k>as</span> <span class=pl-s1>plot</span></td>
</tr>
<tr>
<td id="L29" class="blob-num js-line-number" data-line-number="29"></td>
<td id="LC29" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>cv2</span></td>
</tr>
<tr>
<td id="L30" class="blob-num js-line-number" data-line-number="30"></td>
<td id="LC30" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>numpy</span> <span class=pl-k>as</span> <span class=pl-s1>np</span></td>
</tr>
<tr>
<td id="L31" class="blob-num js-line-number" data-line-number="31"></td>
<td id="LC31" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-s1>sklearn</span>.<span class=pl-s1>utils</span> <span class=pl-k>import</span> <span class=pl-s1>shuffle</span></td>
</tr>
<tr>
<td id="L32" class="blob-num js-line-number" data-line-number="32"></td>
<td id="LC32" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-s1>sklearn</span>.<span class=pl-s1>metrics</span> <span class=pl-k>import</span> <span class=pl-s1>confusion_matrix</span> <span class=pl-k>as</span> <span class=pl-v>CM</span></td>
</tr>
<tr>
<td id="L33" class="blob-num js-line-number" data-line-number="33"></td>
<td id="LC33" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-s1>random</span> <span class=pl-k>import</span> <span class=pl-s1>randint</span></td>
</tr>
<tr>
<td id="L34" class="blob-num js-line-number" data-line-number="34"></td>
<td id="LC34" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-v>IPython</span>.<span class=pl-s1>display</span> <span class=pl-k>import</span> <span class=pl-v>SVG</span></td>
</tr>
<tr>
<td id="L35" class="blob-num js-line-number" data-line-number="35"></td>
<td id="LC35" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>matplotlib</span>.<span class=pl-s1>gridspec</span> <span class=pl-k>as</span> <span class=pl-s1>gridspec</span></td>
</tr>
<tr>
<td id="L36" class="blob-num js-line-number" data-line-number="36"></td>
<td id="LC36" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L37" class="blob-num js-line-number" data-line-number="37"></td>
<td id="LC37" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L38" class="blob-num js-line-number" data-line-number="38"></td>
<td id="LC38" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[184]:</span></td>
</tr>
<tr>
<td id="L39" class="blob-num js-line-number" data-line-number="39"></td>
<td id="LC39" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L40" class="blob-num js-line-number" data-line-number="40"></td>
<td id="LC40" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L41" class="blob-num js-line-number" data-line-number="41"></td>
<td id="LC41" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>tf</span>.<span class=pl-s1>__version__</span></td>
</tr>
<tr>
<td id="L42" class="blob-num js-line-number" data-line-number="42"></td>
<td id="LC42" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L43" class="blob-num js-line-number" data-line-number="43"></td>
<td id="LC43" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L44" class="blob-num js-line-number" data-line-number="44"></td>
<td id="LC44" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[185]:</span></td>
</tr>
<tr>
<td id="L45" class="blob-num js-line-number" data-line-number="45"></td>
<td id="LC45" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L46" class="blob-num js-line-number" data-line-number="46"></td>
<td id="LC46" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L47" class="blob-num js-line-number" data-line-number="47"></td>
<td id="LC47" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cv2</span>.<span class=pl-s1>__version__</span></td>
</tr>
<tr>
<td id="L48" class="blob-num js-line-number" data-line-number="48"></td>
<td id="LC48" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L49" class="blob-num js-line-number" data-line-number="49"></td>
<td id="LC49" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L50" class="blob-num js-line-number" data-line-number="50"></td>
<td id="LC50" class="blob-code blob-code-inner js-file-line"><span class=pl-c># # Analising the Data</span></td>
</tr>
<tr>
<td id="L51" class="blob-num js-line-number" data-line-number="51"></td>
<td id="LC51" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L52" class="blob-num js-line-number" data-line-number="52"></td>
<td id="LC52" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[214]:</span></td>
</tr>
<tr>
<td id="L53" class="blob-num js-line-number" data-line-number="53"></td>
<td id="LC53" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L54" class="blob-num js-line-number" data-line-number="54"></td>
<td id="LC54" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L55" class="blob-num js-line-number" data-line-number="55"></td>
<td id="LC55" class="blob-code blob-code-inner js-file-line"><span class=pl-v>X</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L56" class="blob-num js-line-number" data-line-number="56"></td>
<td id="LC56" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>y</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L57" class="blob-num js-line-number" data-line-number="57"></td>
<td id="LC57" class="blob-code blob-code-inner js-file-line"><span class=pl-v>IMG_SIZE</span> <span class=pl-c1>=</span> <span class=pl-c1>150</span></td>
</tr>
<tr>
<td id="L58" class="blob-num js-line-number" data-line-number="58"></td>
<td id="LC58" class="blob-code blob-code-inner js-file-line"><span class=pl-v>DIR</span> <span class=pl-c1>=</span> <span class=pl-s>"111880_269359_bundle_archive/seg_train/seg_train"</span></td>
</tr>
<tr>
<td id="L59" class="blob-num js-line-number" data-line-number="59"></td>
<td id="LC59" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>folders</span> <span class=pl-c1>=</span> <span class=pl-s1>os</span>.<span class=pl-en>listdir</span>(<span class=pl-v>DIR</span>)</td>
</tr>
<tr>
<td id="L60" class="blob-num js-line-number" data-line-number="60"></td>
<td id="LC60" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>folders</span></td>
</tr>
<tr>
<td id="L61" class="blob-num js-line-number" data-line-number="61"></td>
<td id="LC61" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L62" class="blob-num js-line-number" data-line-number="62"></td>
<td id="LC62" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L63" class="blob-num js-line-number" data-line-number="63"></td>
<td id="LC63" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[216]:</span></td>
</tr>
<tr>
<td id="L64" class="blob-num js-line-number" data-line-number="64"></td>
<td id="LC64" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L65" class="blob-num js-line-number" data-line-number="65"></td>
<td id="LC65" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L66" class="blob-num js-line-number" data-line-number="66"></td>
<td id="LC66" class="blob-code blob-code-inner js-file-line"><span class=pl-k>for</span> <span class=pl-s1>i</span>, <span class=pl-s1>file</span> <span class=pl-c1>in</span> <span class=pl-en>enumerate</span>(<span class=pl-s1>folders</span>):</td>
</tr>
<tr>
<td id="L67" class="blob-num js-line-number" data-line-number="67"></td>
<td id="LC67" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>filename</span> <span class=pl-c1>=</span> <span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>join</span>(<span class=pl-v>DIR</span>, <span class=pl-s1>file</span>)</td>
</tr>
<tr>
<td id="L68" class="blob-num js-line-number" data-line-number="68"></td>
<td id="LC68" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>print</span>(<span class=pl-s>"Folder {} started"</span>.<span class=pl-en>format</span>(<span class=pl-s1>file</span>))</td>
</tr>
<tr>
<td id="L69" class="blob-num js-line-number" data-line-number="69"></td>
<td id="LC69" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>try</span>:</td>
</tr>
<tr>
<td id="L70" class="blob-num js-line-number" data-line-number="70"></td>
<td id="LC70" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>img</span> <span class=pl-c1>in</span> <span class=pl-s1>os</span>.<span class=pl-en>listdir</span>(<span class=pl-s1>filename</span>):</td>
</tr>
<tr>
<td id="L71" class="blob-num js-line-number" data-line-number="71"></td>
<td id="LC71" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>path</span> <span class=pl-c1>=</span> <span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>join</span>(<span class=pl-s1>filename</span>, <span class=pl-s1>img</span>)</td>
</tr>
<tr>
<td id="L72" class="blob-num js-line-number" data-line-number="72"></td>
<td id="LC72" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>img</span> <span class=pl-c1>=</span> <span class=pl-s1>cv2</span>.<span class=pl-en>imread</span>(<span class=pl-s1>path</span>,<span class=pl-s1>cv2</span>.<span class=pl-v>IMREAD_COLOR</span>)</td>
</tr>
<tr>
<td id="L73" class="blob-num js-line-number" data-line-number="73"></td>
<td id="LC73" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>img</span> <span class=pl-c1>=</span> <span class=pl-s1>cv2</span>.<span class=pl-en>resize</span>(<span class=pl-s1>img</span>, (<span class=pl-v>IMG_SIZE</span>,<span class=pl-v>IMG_SIZE</span>))</td>
</tr>
<tr>
<td id="L74" class="blob-num js-line-number" data-line-number="74"></td>
<td id="LC74" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L75" class="blob-num js-line-number" data-line-number="75"></td>
<td id="LC75" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>X</span>.<span class=pl-en>append</span>(<span class=pl-s1>np</span>.<span class=pl-en>array</span>(<span class=pl-s1>img</span>))</td>
</tr>
<tr>
<td id="L76" class="blob-num js-line-number" data-line-number="76"></td>
<td id="LC76" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>y</span>.<span class=pl-en>append</span>(<span class=pl-s1>i</span>)</td>
</tr>
<tr>
<td id="L77" class="blob-num js-line-number" data-line-number="77"></td>
<td id="LC77" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>except</span>:</td>
</tr>
<tr>
<td id="L78" class="blob-num js-line-number" data-line-number="78"></td>
<td id="LC78" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>print</span>(<span class=pl-s>"File {} not read"</span>.<span class=pl-en>format</span>(<span class=pl-s1>path</span>))</td>
</tr>
<tr>
<td id="L79" class="blob-num js-line-number" data-line-number="79"></td>
<td id="LC79" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L80" class="blob-num js-line-number" data-line-number="80"></td>
<td id="LC80" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>print</span>(<span class=pl-s>"Folder {} done"</span>.<span class=pl-en>format</span>(<span class=pl-s1>file</span>))</td>
</tr>
<tr>
<td id="L81" class="blob-num js-line-number" data-line-number="81"></td>
<td id="LC81" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>print</span>(<span class=pl-s>"The folder {} is labeled as {}"</span>.<span class=pl-en>format</span>(<span class=pl-s1>file</span>, <span class=pl-s1>i</span>))</td>
</tr>
<tr>
<td id="L82" class="blob-num js-line-number" data-line-number="82"></td>
<td id="LC82" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L83" class="blob-num js-line-number" data-line-number="83"></td>
<td id="LC83" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L84" class="blob-num js-line-number" data-line-number="84"></td>
<td id="LC84" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[217]:</span></td>
</tr>
<tr>
<td id="L85" class="blob-num js-line-number" data-line-number="85"></td>
<td id="LC85" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L86" class="blob-num js-line-number" data-line-number="86"></td>
<td id="LC86" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L87" class="blob-num js-line-number" data-line-number="87"></td>
<td id="LC87" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>np</span>.<span class=pl-en>unique</span>(<span class=pl-s1>y</span>, <span class=pl-s1>return_counts</span><span class=pl-c1>=</span><span class=pl-c1>True</span>)</td>
</tr>
<tr>
<td id="L88" class="blob-num js-line-number" data-line-number="88"></td>
<td id="LC88" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L89" class="blob-num js-line-number" data-line-number="89"></td>
<td id="LC89" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L90" class="blob-num js-line-number" data-line-number="90"></td>
<td id="LC90" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Making the functions to get the training and validation set from the Images</span></td>
</tr>
<tr>
<td id="L91" class="blob-num js-line-number" data-line-number="91"></td>
<td id="LC91" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L92" class="blob-num js-line-number" data-line-number="92"></td>
<td id="LC92" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[223]:</span></td>
</tr>
<tr>
<td id="L93" class="blob-num js-line-number" data-line-number="93"></td>
<td id="LC93" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L94" class="blob-num js-line-number" data-line-number="94"></td>
<td id="LC94" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L95" class="blob-num js-line-number" data-line-number="95"></td>
<td id="LC95" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-s1>tqdm</span> <span class=pl-k>import</span> <span class=pl-s1>tqdm</span></td>
</tr>
<tr>
<td id="L96" class="blob-num js-line-number" data-line-number="96"></td>
<td id="LC96" class="blob-code blob-code-inner js-file-line"><span class=pl-v>X</span><span class=pl-c1>=</span>[]</td>
</tr>
<tr>
<td id="L97" class="blob-num js-line-number" data-line-number="97"></td>
<td id="LC97" class="blob-code blob-code-inner js-file-line"><span class=pl-v>Z</span><span class=pl-c1>=</span>[]</td>
</tr>
<tr>
<td id="L98" class="blob-num js-line-number" data-line-number="98"></td>
<td id="LC98" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L99" class="blob-num js-line-number" data-line-number="99"></td>
<td id="LC99" class="blob-code blob-code-inner js-file-line"><span class=pl-v>IMG_SIZE</span><span class=pl-c1>=</span><span class=pl-c1>150</span></td>
</tr>
<tr>
<td id="L100" class="blob-num js-line-number" data-line-number="100"></td>
<td id="LC100" class="blob-code blob-code-inner js-file-line"><span class=pl-v>IMAGE_BUILDINGS_DIR</span><span class=pl-c1>=</span><span class=pl-s>'111880_269359_bundle_archive/seg_train/seg_train/buildings'</span></td>
</tr>
<tr>
<td id="L101" class="blob-num js-line-number" data-line-number="101"></td>
<td id="LC101" class="blob-code blob-code-inner js-file-line"><span class=pl-v>IMAGE_FOREST_DIR</span><span class=pl-c1>=</span><span class=pl-s>'111880_269359_bundle_archive/seg_train/seg_train/forest'</span></td>
</tr>
<tr>
<td id="L102" class="blob-num js-line-number" data-line-number="102"></td>
<td id="LC102" class="blob-code blob-code-inner js-file-line"><span class=pl-v>IMAGE_GLACIER_DIR</span><span class=pl-c1>=</span><span class=pl-s>'111880_269359_bundle_archive/seg_train/seg_train/glacier'</span></td>
</tr>
<tr>
<td id="L103" class="blob-num js-line-number" data-line-number="103"></td>
<td id="LC103" class="blob-code blob-code-inner js-file-line"><span class=pl-v>IMAGE_MOUNTAIN_DIR</span><span class=pl-c1>=</span><span class=pl-s>'111880_269359_bundle_archive/seg_train/seg_train/mountain'</span></td>
</tr>
<tr>
<td id="L104" class="blob-num js-line-number" data-line-number="104"></td>
<td id="LC104" class="blob-code blob-code-inner js-file-line"><span class=pl-v>IMAGE_SEA_DIR</span><span class=pl-c1>=</span><span class=pl-s>'111880_269359_bundle_archive/seg_train/seg_train/sea'</span></td>
</tr>
<tr>
<td id="L105" class="blob-num js-line-number" data-line-number="105"></td>
<td id="LC105" class="blob-code blob-code-inner js-file-line"><span class=pl-v>IMAGE_STREET_DIR</span><span class=pl-c1>=</span><span class=pl-s>'111880_269359_bundle_archive/seg_train/seg_train/street'</span></td>
</tr>
<tr>
<td id="L106" class="blob-num js-line-number" data-line-number="106"></td>
<td id="LC106" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L107" class="blob-num js-line-number" data-line-number="107"></td>
<td id="LC107" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L108" class="blob-num js-line-number" data-line-number="108"></td>
<td id="LC108" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[224]:</span></td>
</tr>
<tr>
<td id="L109" class="blob-num js-line-number" data-line-number="109"></td>
<td id="LC109" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L110" class="blob-num js-line-number" data-line-number="110"></td>
<td id="LC110" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L111" class="blob-num js-line-number" data-line-number="111"></td>
<td id="LC111" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>assign_label</span>(<span class=pl-s1>img</span>,<span class=pl-s1>image_type</span>):</td>
</tr>
<tr>
<td id="L112" class="blob-num js-line-number" data-line-number="112"></td>
<td id="LC112" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>return</span> <span class=pl-s1>image_type</span></td>
</tr>
<tr>
<td id="L113" class="blob-num js-line-number" data-line-number="113"></td>
<td id="LC113" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L114" class="blob-num js-line-number" data-line-number="114"></td>
<td id="LC114" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L115" class="blob-num js-line-number" data-line-number="115"></td>
<td id="LC115" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[255]:</span></td>
</tr>
<tr>
<td id="L116" class="blob-num js-line-number" data-line-number="116"></td>
<td id="LC116" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L117" class="blob-num js-line-number" data-line-number="117"></td>
<td id="LC117" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L118" class="blob-num js-line-number" data-line-number="118"></td>
<td id="LC118" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>make_train_data</span>(<span class=pl-s1>image_type</span>,<span class=pl-v>DIR</span>):</td>
</tr>
<tr>
<td id="L119" class="blob-num js-line-number" data-line-number="119"></td>
<td id="LC119" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>img</span> <span class=pl-c1>in</span> <span class=pl-en>tqdm</span>(<span class=pl-s1>os</span>.<span class=pl-en>listdir</span>(<span class=pl-v>DIR</span>)):</td>
</tr>
<tr>
<td id="L120" class="blob-num js-line-number" data-line-number="120"></td>
<td id="LC120" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>label</span><span class=pl-c1>=</span><span class=pl-en>assign_label</span>(<span class=pl-s1>img</span>,<span class=pl-s1>image_type</span>)</td>
</tr>
<tr>
<td id="L121" class="blob-num js-line-number" data-line-number="121"></td>
<td id="LC121" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>path</span> <span class=pl-c1>=</span> <span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>join</span>(<span class=pl-v>DIR</span>,<span class=pl-s1>img</span>)</td>
</tr>
<tr>
<td id="L122" class="blob-num js-line-number" data-line-number="122"></td>
<td id="LC122" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>img</span> <span class=pl-c1>=</span> <span class=pl-s1>cv2</span>.<span class=pl-en>imread</span>(<span class=pl-s1>path</span>,<span class=pl-s1>cv2</span>.<span class=pl-v>IMREAD_COLOR</span>)</td>
</tr>
<tr>
<td id="L123" class="blob-num js-line-number" data-line-number="123"></td>
<td id="LC123" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>img</span> <span class=pl-c1>=</span> <span class=pl-s1>cv2</span>.<span class=pl-en>resize</span>(<span class=pl-s1>img</span>, (<span class=pl-v>IMG_SIZE</span>,<span class=pl-v>IMG_SIZE</span>))</td>
</tr>
<tr>
<td id="L124" class="blob-num js-line-number" data-line-number="124"></td>
<td id="LC124" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L125" class="blob-num js-line-number" data-line-number="125"></td>
<td id="LC125" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>X</span>.<span class=pl-en>append</span>(<span class=pl-s1>np</span>.<span class=pl-en>array</span>(<span class=pl-s1>img</span>))</td>
</tr>
<tr>
<td id="L126" class="blob-num js-line-number" data-line-number="126"></td>
<td id="LC126" class="blob-code blob-code-inner js-file-line"> <span class=pl-v>Z</span>.<span class=pl-en>append</span>(<span class=pl-s1>__builtins__</span>.<span class=pl-en>str</span>(<span class=pl-s1>label</span>))</td>
</tr>
<tr>
<td id="L127" class="blob-num js-line-number" data-line-number="127"></td>
<td id="LC127" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L128" class="blob-num js-line-number" data-line-number="128"></td>
<td id="LC128" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L129" class="blob-num js-line-number" data-line-number="129"></td>
<td id="LC129" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[256]:</span></td>
</tr>
<tr>
<td id="L130" class="blob-num js-line-number" data-line-number="130"></td>
<td id="LC130" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L131" class="blob-num js-line-number" data-line-number="131"></td>
<td id="LC131" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L132" class="blob-num js-line-number" data-line-number="132"></td>
<td id="LC132" class="blob-code blob-code-inner js-file-line"><span class=pl-en>make_train_data</span>(<span class=pl-s>'Buildings'</span>,<span class=pl-v>IMAGE_BUILDINGS_DIR</span>)</td>
</tr>
<tr>
<td id="L133" class="blob-num js-line-number" data-line-number="133"></td>
<td id="LC133" class="blob-code blob-code-inner js-file-line"><span class=pl-en>print</span>(<span class=pl-en>len</span>(<span class=pl-v>X</span>))</td>
</tr>
<tr>
<td id="L134" class="blob-num js-line-number" data-line-number="134"></td>
<td id="LC134" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L135" class="blob-num js-line-number" data-line-number="135"></td>
<td id="LC135" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L136" class="blob-num js-line-number" data-line-number="136"></td>
<td id="LC136" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[257]:</span></td>
</tr>
<tr>
<td id="L137" class="blob-num js-line-number" data-line-number="137"></td>
<td id="LC137" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L138" class="blob-num js-line-number" data-line-number="138"></td>
<td id="LC138" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L139" class="blob-num js-line-number" data-line-number="139"></td>
<td id="LC139" class="blob-code blob-code-inner js-file-line"><span class=pl-en>make_train_data</span>(<span class=pl-s>'Forest'</span>,<span class=pl-v>IMAGE_FOREST_DIR</span>)</td>
</tr>
<tr>
<td id="L140" class="blob-num js-line-number" data-line-number="140"></td>
<td id="LC140" class="blob-code blob-code-inner js-file-line"><span class=pl-en>print</span>(<span class=pl-en>len</span>(<span class=pl-v>X</span>))</td>
</tr>
<tr>
<td id="L141" class="blob-num js-line-number" data-line-number="141"></td>
<td id="LC141" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L142" class="blob-num js-line-number" data-line-number="142"></td>
<td id="LC142" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L143" class="blob-num js-line-number" data-line-number="143"></td>
<td id="LC143" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[258]:</span></td>
</tr>
<tr>
<td id="L144" class="blob-num js-line-number" data-line-number="144"></td>
<td id="LC144" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L145" class="blob-num js-line-number" data-line-number="145"></td>
<td id="LC145" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L146" class="blob-num js-line-number" data-line-number="146"></td>
<td id="LC146" class="blob-code blob-code-inner js-file-line"><span class=pl-en>make_train_data</span>(<span class=pl-s>'Glacier'</span>,<span class=pl-v>IMAGE_GLACIER_DIR</span>)</td>
</tr>
<tr>
<td id="L147" class="blob-num js-line-number" data-line-number="147"></td>
<td id="LC147" class="blob-code blob-code-inner js-file-line"><span class=pl-en>print</span>(<span class=pl-en>len</span>(<span class=pl-v>X</span>))</td>
</tr>
<tr>
<td id="L148" class="blob-num js-line-number" data-line-number="148"></td>
<td id="LC148" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L149" class="blob-num js-line-number" data-line-number="149"></td>
<td id="LC149" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L150" class="blob-num js-line-number" data-line-number="150"></td>
<td id="LC150" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[259]:</span></td>
</tr>
<tr>
<td id="L151" class="blob-num js-line-number" data-line-number="151"></td>
<td id="LC151" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L152" class="blob-num js-line-number" data-line-number="152"></td>
<td id="LC152" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L153" class="blob-num js-line-number" data-line-number="153"></td>
<td id="LC153" class="blob-code blob-code-inner js-file-line"><span class=pl-en>make_train_data</span>(<span class=pl-s>'Mountain'</span>,<span class=pl-v>IMAGE_MOUNTAIN_DIR</span>)</td>
</tr>
<tr>
<td id="L154" class="blob-num js-line-number" data-line-number="154"></td>
<td id="LC154" class="blob-code blob-code-inner js-file-line"><span class=pl-en>print</span>(<span class=pl-en>len</span>(<span class=pl-v>X</span>))</td>
</tr>
<tr>
<td id="L155" class="blob-num js-line-number" data-line-number="155"></td>
<td id="LC155" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L156" class="blob-num js-line-number" data-line-number="156"></td>
<td id="LC156" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L157" class="blob-num js-line-number" data-line-number="157"></td>
<td id="LC157" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[260]:</span></td>
</tr>
<tr>
<td id="L158" class="blob-num js-line-number" data-line-number="158"></td>
<td id="LC158" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L159" class="blob-num js-line-number" data-line-number="159"></td>
<td id="LC159" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L160" class="blob-num js-line-number" data-line-number="160"></td>
<td id="LC160" class="blob-code blob-code-inner js-file-line"><span class=pl-en>make_train_data</span>(<span class=pl-s>'Sea'</span>,<span class=pl-v>IMAGE_SEA_DIR</span>)</td>
</tr>
<tr>
<td id="L161" class="blob-num js-line-number" data-line-number="161"></td>
<td id="LC161" class="blob-code blob-code-inner js-file-line"><span class=pl-en>print</span>(<span class=pl-en>len</span>(<span class=pl-v>X</span>))</td>
</tr>
<tr>
<td id="L162" class="blob-num js-line-number" data-line-number="162"></td>
<td id="LC162" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L163" class="blob-num js-line-number" data-line-number="163"></td>
<td id="LC163" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L164" class="blob-num js-line-number" data-line-number="164"></td>
<td id="LC164" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[261]:</span></td>
</tr>
<tr>
<td id="L165" class="blob-num js-line-number" data-line-number="165"></td>
<td id="LC165" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L166" class="blob-num js-line-number" data-line-number="166"></td>
<td id="LC166" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L167" class="blob-num js-line-number" data-line-number="167"></td>
<td id="LC167" class="blob-code blob-code-inner js-file-line"><span class=pl-en>make_train_data</span>(<span class=pl-s>'Street'</span>,<span class=pl-v>IMAGE_STREET_DIR</span>)</td>
</tr>
<tr>
<td id="L168" class="blob-num js-line-number" data-line-number="168"></td>
<td id="LC168" class="blob-code blob-code-inner js-file-line"><span class=pl-en>print</span>(<span class=pl-en>len</span>(<span class=pl-v>X</span>))</td>
</tr>
<tr>
<td id="L169" class="blob-num js-line-number" data-line-number="169"></td>
<td id="LC169" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L170" class="blob-num js-line-number" data-line-number="170"></td>
<td id="LC170" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L171" class="blob-num js-line-number" data-line-number="171"></td>
<td id="LC171" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[262]:</span></td>
</tr>
<tr>
<td id="L172" class="blob-num js-line-number" data-line-number="172"></td>
<td id="LC172" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L173" class="blob-num js-line-number" data-line-number="173"></td>
<td id="LC173" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L174" class="blob-num js-line-number" data-line-number="174"></td>
<td id="LC174" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L175" class="blob-num js-line-number" data-line-number="175"></td>
<td id="LC175" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-v>IPython</span>.<span class=pl-s1>display</span> <span class=pl-k>import</span> <span class=pl-s1>display</span></td>
</tr>
<tr>
<td id="L176" class="blob-num js-line-number" data-line-number="176"></td>
<td id="LC176" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-v>PIL</span> <span class=pl-k>import</span> <span class=pl-v>Image</span> </td>
</tr>
<tr>
<td id="L177" class="blob-num js-line-number" data-line-number="177"></td>
<td id="LC177" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>labels</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L178" class="blob-num js-line-number" data-line-number="178"></td>
<td id="LC178" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>dic</span> <span class=pl-c1>=</span> <span class=pl-en>dict</span>()</td>
</tr>
<tr>
<td id="L179" class="blob-num js-line-number" data-line-number="179"></td>
<td id="LC179" class="blob-code blob-code-inner js-file-line"><span class=pl-k>for</span> <span class=pl-s1>i</span> <span class=pl-c1>in</span> <span class=pl-en>range</span>(<span class=pl-c1>0</span>,<span class=pl-c1>6</span>):</td>
</tr>
<tr>
<td id="L180" class="blob-num js-line-number" data-line-number="180"></td>
<td id="LC180" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>str</span> <span class=pl-c1>=</span> <span class=pl-s>'111880_269359_bundle_archive\seg_train\seg_train<span class=pl-cce>\\</span>'</span><span class=pl-c1>+</span><span class=pl-s1>dirs</span>[<span class=pl-s1>i</span>]</td>
</tr>
<tr>
<td id="L181" class="blob-num js-line-number" data-line-number="181"></td>
<td id="LC181" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>count</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L182" class="blob-num js-line-number" data-line-number="182"></td>
<td id="LC182" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>j</span> <span class=pl-c1>in</span> <span class=pl-s1>os</span>.<span class=pl-en>listdir</span>(<span class=pl-s1>str</span>):</td>
</tr>
<tr>
<td id="L183" class="blob-num js-line-number" data-line-number="183"></td>
<td id="LC183" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>str2</span> <span class=pl-c1>=</span> <span class=pl-s1>str</span><span class=pl-c1>+</span><span class=pl-s>"<span class=pl-cce>\\</span>"</span><span class=pl-c1>+</span><span class=pl-s1>j</span></td>
</tr>
<tr>
<td id="L184" class="blob-num js-line-number" data-line-number="184"></td>
<td id="LC184" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>im</span> <span class=pl-c1>=</span> <span class=pl-v>Image</span>.<span class=pl-en>open</span>(<span class=pl-s1>str2</span>)</td>
</tr>
<tr>
<td id="L185" class="blob-num js-line-number" data-line-number="185"></td>
<td id="LC185" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>count</span> <span class=pl-c1>+=</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L186" class="blob-num js-line-number" data-line-number="186"></td>
<td id="LC186" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>labels</span>.<span class=pl-en>append</span>(<span class=pl-s1>j</span>)</td>
</tr>
<tr>
<td id="L187" class="blob-num js-line-number" data-line-number="187"></td>
<td id="LC187" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>dic</span>[<span class=pl-s1>dirs</span>[<span class=pl-s1>i</span>]] <span class=pl-c1>=</span> <span class=pl-s1>count</span></td>
</tr>
<tr>
<td id="L188" class="blob-num js-line-number" data-line-number="188"></td>
<td id="LC188" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L189" class="blob-num js-line-number" data-line-number="189"></td>
<td id="LC189" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L190" class="blob-num js-line-number" data-line-number="190"></td>
<td id="LC190" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[263]:</span></td>
</tr>
<tr>
<td id="L191" class="blob-num js-line-number" data-line-number="191"></td>
<td id="LC191" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L192" class="blob-num js-line-number" data-line-number="192"></td>
<td id="LC192" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L193" class="blob-num js-line-number" data-line-number="193"></td>
<td id="LC193" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>labels1</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L194" class="blob-num js-line-number" data-line-number="194"></td>
<td id="LC194" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>dic1</span> <span class=pl-c1>=</span> <span class=pl-en>dict</span>()</td>
</tr>
<tr>
<td id="L195" class="blob-num js-line-number" data-line-number="195"></td>
<td id="LC195" class="blob-code blob-code-inner js-file-line"><span class=pl-v>IMAGE_SIZE</span> <span class=pl-c1>=</span> (<span class=pl-c1>64</span>,<span class=pl-c1>64</span>)</td>
</tr>
<tr>
<td id="L196" class="blob-num js-line-number" data-line-number="196"></td>
<td id="LC196" class="blob-code blob-code-inner js-file-line"><span class=pl-k>for</span> <span class=pl-s1>i</span> <span class=pl-c1>in</span> <span class=pl-en>range</span>(<span class=pl-c1>0</span>,<span class=pl-c1>6</span>):</td>
</tr>
<tr>
<td id="L197" class="blob-num js-line-number" data-line-number="197"></td>
<td id="LC197" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>str</span> <span class=pl-c1>=</span> <span class=pl-s>'111880_269359_bundle_archive\seg_test\seg_test<span class=pl-cce>\\</span>'</span><span class=pl-c1>+</span><span class=pl-s1>dirs</span>[<span class=pl-s1>i</span>]</td>
</tr>
<tr>
<td id="L198" class="blob-num js-line-number" data-line-number="198"></td>
<td id="LC198" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>count</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span></td>
</tr>
<tr>
<td id="L199" class="blob-num js-line-number" data-line-number="199"></td>
<td id="LC199" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>j</span> <span class=pl-c1>in</span> <span class=pl-s1>os</span>.<span class=pl-en>listdir</span>(<span class=pl-s1>str</span>):</td>
</tr>
<tr>
<td id="L200" class="blob-num js-line-number" data-line-number="200"></td>
<td id="LC200" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>str2</span> <span class=pl-c1>=</span> <span class=pl-s1>str</span><span class=pl-c1>+</span><span class=pl-s>"<span class=pl-cce>\\</span>"</span><span class=pl-c1>+</span><span class=pl-s1>j</span></td>
</tr>
<tr>
<td id="L201" class="blob-num js-line-number" data-line-number="201"></td>
<td id="LC201" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>im</span> <span class=pl-c1>=</span> <span class=pl-v>Image</span>.<span class=pl-en>open</span>(<span class=pl-s1>str2</span>)</td>
</tr>
<tr>
<td id="L202" class="blob-num js-line-number" data-line-number="202"></td>
<td id="LC202" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>count</span> <span class=pl-c1>+=</span> <span class=pl-c1>1</span></td>
</tr>
<tr>
<td id="L203" class="blob-num js-line-number" data-line-number="203"></td>
<td id="LC203" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>labels1</span>.<span class=pl-en>append</span>(<span class=pl-s1>j</span>)</td>
</tr>
<tr>
<td id="L204" class="blob-num js-line-number" data-line-number="204"></td>
<td id="LC204" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>dic1</span>[<span class=pl-s1>dirs</span>[<span class=pl-s1>i</span>]] <span class=pl-c1>=</span> <span class=pl-s1>count</span></td>
</tr>
<tr>
<td id="L205" class="blob-num js-line-number" data-line-number="205"></td>
<td id="LC205" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L206" class="blob-num js-line-number" data-line-number="206"></td>
<td id="LC206" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L207" class="blob-num js-line-number" data-line-number="207"></td>
<td id="LC207" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[264]:</span></td>
</tr>
<tr>
<td id="L208" class="blob-num js-line-number" data-line-number="208"></td>
<td id="LC208" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L209" class="blob-num js-line-number" data-line-number="209"></td>
<td id="LC209" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L210" class="blob-num js-line-number" data-line-number="210"></td>
<td id="LC210" class="blob-code blob-code-inner js-file-line"><span class=pl-en>print</span> (<span class=pl-s>"Number of training examples: {}"</span>.<span class=pl-en>format</span>(<span class=pl-en>len</span>(<span class=pl-s1>labels</span>)))</td>
</tr>
<tr>
<td id="L211" class="blob-num js-line-number" data-line-number="211"></td>
<td id="LC211" class="blob-code blob-code-inner js-file-line"><span class=pl-en>print</span> (<span class=pl-s>"Number of testing examples: {}"</span>.<span class=pl-en>format</span>(<span class=pl-en>len</span>(<span class=pl-s1>labels1</span>)))</td>
</tr>
<tr>
<td id="L212" class="blob-num js-line-number" data-line-number="212"></td>
<td id="LC212" class="blob-code blob-code-inner js-file-line"><span class=pl-en>print</span> (<span class=pl-s>"Each image is of size: {}"</span>.<span class=pl-en>format</span>(<span class=pl-v>IMAGE_SIZE</span>))</td>
</tr>
<tr>
<td id="L213" class="blob-num js-line-number" data-line-number="213"></td>
<td id="LC213" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L214" class="blob-num js-line-number" data-line-number="214"></td>
<td id="LC214" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L215" class="blob-num js-line-number" data-line-number="215"></td>
<td id="LC215" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[265]:</span></td>
</tr>
<tr>
<td id="L216" class="blob-num js-line-number" data-line-number="216"></td>
<td id="LC216" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L217" class="blob-num js-line-number" data-line-number="217"></td>
<td id="LC217" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L218" class="blob-num js-line-number" data-line-number="218"></td>
<td id="LC218" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>lis1</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L219" class="blob-num js-line-number" data-line-number="219"></td>
<td id="LC219" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>lis2</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L220" class="blob-num js-line-number" data-line-number="220"></td>
<td id="LC220" class="blob-code blob-code-inner js-file-line"><span class=pl-k>for</span> <span class=pl-s1>key</span>,<span class=pl-s1>val</span> <span class=pl-c1>in</span> <span class=pl-s1>dic</span>.<span class=pl-en>items</span>():</td>
</tr>
<tr>
<td id="L221" class="blob-num js-line-number" data-line-number="221"></td>
<td id="LC221" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>lis1</span>.<span class=pl-en>append</span>(<span class=pl-s1>val</span>)</td>
</tr>
<tr>
<td id="L222" class="blob-num js-line-number" data-line-number="222"></td>
<td id="LC222" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>lis2</span>.<span class=pl-en>append</span>(<span class=pl-s1>key</span>)</td>
</tr>
<tr>
<td id="L223" class="blob-num js-line-number" data-line-number="223"></td>
<td id="LC223" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L224" class="blob-num js-line-number" data-line-number="224"></td>
<td id="LC224" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L225" class="blob-num js-line-number" data-line-number="225"></td>
<td id="LC225" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[266]:</span></td>
</tr>
<tr>
<td id="L226" class="blob-num js-line-number" data-line-number="226"></td>
<td id="LC226" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L227" class="blob-num js-line-number" data-line-number="227"></td>
<td id="LC227" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L228" class="blob-num js-line-number" data-line-number="228"></td>
<td id="LC228" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>lis11</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L229" class="blob-num js-line-number" data-line-number="229"></td>
<td id="LC229" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>lis22</span> <span class=pl-c1>=</span> []</td>
</tr>
<tr>
<td id="L230" class="blob-num js-line-number" data-line-number="230"></td>
<td id="LC230" class="blob-code blob-code-inner js-file-line"><span class=pl-k>for</span> <span class=pl-s1>key</span>,<span class=pl-s1>val</span> <span class=pl-c1>in</span> <span class=pl-s1>dic1</span>.<span class=pl-en>items</span>():</td>
</tr>
<tr>
<td id="L231" class="blob-num js-line-number" data-line-number="231"></td>
<td id="LC231" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>lis11</span>.<span class=pl-en>append</span>(<span class=pl-s1>val</span>)</td>
</tr>
<tr>
<td id="L232" class="blob-num js-line-number" data-line-number="232"></td>
<td id="LC232" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>lis22</span>.<span class=pl-en>append</span>(<span class=pl-s1>key</span>)</td>
</tr>
<tr>
<td id="L233" class="blob-num js-line-number" data-line-number="233"></td>
<td id="LC233" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L234" class="blob-num js-line-number" data-line-number="234"></td>
<td id="LC234" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L235" class="blob-num js-line-number" data-line-number="235"></td>
<td id="LC235" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[267]:</span></td>
</tr>
<tr>
<td id="L236" class="blob-num js-line-number" data-line-number="236"></td>
<td id="LC236" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L237" class="blob-num js-line-number" data-line-number="237"></td>
<td id="LC237" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L238" class="blob-num js-line-number" data-line-number="238"></td>
<td id="LC238" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>data</span> <span class=pl-c1>=</span> {<span class=pl-s>'Name'</span>:<span class=pl-s1>lis2</span>, <span class=pl-s>'train'</span>:<span class=pl-s1>lis1</span>,<span class=pl-s>'test'</span>:<span class=pl-s1>lis11</span>}</td>
</tr>
<tr>
<td id="L239" class="blob-num js-line-number" data-line-number="239"></td>
<td id="LC239" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>data</span></td>
</tr>
<tr>
<td id="L240" class="blob-num js-line-number" data-line-number="240"></td>
<td id="LC240" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L241" class="blob-num js-line-number" data-line-number="241"></td>
<td id="LC241" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L242" class="blob-num js-line-number" data-line-number="242"></td>
<td id="LC242" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[268]:</span></td>
</tr>
<tr>
<td id="L243" class="blob-num js-line-number" data-line-number="243"></td>
<td id="LC243" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L244" class="blob-num js-line-number" data-line-number="244"></td>
<td id="LC244" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L245" class="blob-num js-line-number" data-line-number="245"></td>
<td id="LC245" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>pandas</span> <span class=pl-k>as</span> <span class=pl-s1>pd</span></td>
</tr>
<tr>
<td id="L246" class="blob-num js-line-number" data-line-number="246"></td>
<td id="LC246" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>df</span> <span class=pl-c1>=</span> <span class=pl-s1>pd</span>.<span class=pl-v>DataFrame</span>(<span class=pl-s1>data</span>)</td>
</tr>
<tr>
<td id="L247" class="blob-num js-line-number" data-line-number="247"></td>
<td id="LC247" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>df</span></td>
</tr>
<tr>
<td id="L248" class="blob-num js-line-number" data-line-number="248"></td>
<td id="LC248" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L249" class="blob-num js-line-number" data-line-number="249"></td>
<td id="LC249" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L250" class="blob-num js-line-number" data-line-number="250"></td>
<td id="LC250" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ## 2.2 ) Visualizing some Random Images</span></td>
</tr>
<tr>
<td id="L251" class="blob-num js-line-number" data-line-number="251"></td>
<td id="LC251" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L252" class="blob-num js-line-number" data-line-number="252"></td>
<td id="LC252" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[269]:</span></td>
</tr>
<tr>
<td id="L253" class="blob-num js-line-number" data-line-number="253"></td>
<td id="LC253" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L254" class="blob-num js-line-number" data-line-number="254"></td>
<td id="LC254" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L255" class="blob-num js-line-number" data-line-number="255"></td>
<td id="LC255" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>ax</span> <span class=pl-c1>=</span> <span class=pl-s1>df</span>.<span class=pl-s1>plot</span>.<span class=pl-en>bar</span>(<span class=pl-s1>x</span><span class=pl-c1>=</span><span class=pl-s>'Name'</span>, <span class=pl-s1>y</span><span class=pl-c1>=</span>[<span class=pl-s>'train'</span>,<span class=pl-s>'test'</span>], <span class=pl-s1>rot</span><span class=pl-c1>=</span><span class=pl-c1>0</span>)</td>
</tr>
<tr>
<td id="L256" class="blob-num js-line-number" data-line-number="256"></td>
<td id="LC256" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>title</span>(<span class=pl-s>'Training sets Input'</span>)</td>
</tr>
<tr>
<td id="L257" class="blob-num js-line-number" data-line-number="257"></td>
<td id="LC257" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L258" class="blob-num js-line-number" data-line-number="258"></td>
<td id="LC258" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L259" class="blob-num js-line-number" data-line-number="259"></td>
<td id="LC259" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[270]:</span></td>
</tr>
<tr>
<td id="L260" class="blob-num js-line-number" data-line-number="260"></td>
<td id="LC260" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L261" class="blob-num js-line-number" data-line-number="261"></td>
<td id="LC261" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L262" class="blob-num js-line-number" data-line-number="262"></td>
<td id="LC262" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>pie</span>(<span class=pl-s1>lis1</span>,</td>
</tr>
<tr>
<td id="L263" class="blob-num js-line-number" data-line-number="263"></td>
<td id="LC263" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>explode</span><span class=pl-c1>=</span>(<span class=pl-c1>0</span>, <span class=pl-c1>0</span>, <span class=pl-c1>0</span>, <span class=pl-c1>0</span>, <span class=pl-c1>0</span>, <span class=pl-c1>0</span>) , </td>
</tr>
<tr>
<td id="L264" class="blob-num js-line-number" data-line-number="264"></td>
<td id="LC264" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>labels</span><span class=pl-c1>=</span><span class=pl-s1>lis2</span>,</td>
</tr>
<tr>
<td id="L265" class="blob-num js-line-number" data-line-number="265"></td>
<td id="LC265" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>autopct</span><span class=pl-c1>=</span><span class=pl-s>'%1.1f%%'</span>)</td>
</tr>
<tr>
<td id="L266" class="blob-num js-line-number" data-line-number="266"></td>
<td id="LC266" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>axis</span>(<span class=pl-s>'equal'</span>)</td>
</tr>
<tr>
<td id="L267" class="blob-num js-line-number" data-line-number="267"></td>
<td id="LC267" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>title</span>(<span class=pl-s>'Proportion of each observed category'</span>)</td>
</tr>
<tr>
<td id="L268" class="blob-num js-line-number" data-line-number="268"></td>
<td id="LC268" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>show</span>()</td>
</tr>
<tr>
<td id="L269" class="blob-num js-line-number" data-line-number="269"></td>
<td id="LC269" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L270" class="blob-num js-line-number" data-line-number="270"></td>
<td id="LC270" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L271" class="blob-num js-line-number" data-line-number="271"></td>
<td id="LC271" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[271]:</span></td>
</tr>
<tr>
<td id="L272" class="blob-num js-line-number" data-line-number="272"></td>
<td id="LC272" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L273" class="blob-num js-line-number" data-line-number="273"></td>
<td id="LC273" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L274" class="blob-num js-line-number" data-line-number="274"></td>
<td id="LC274" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>random</span> <span class=pl-k>as</span> <span class=pl-s1>rn</span></td>
</tr>
<tr>
<td id="L275" class="blob-num js-line-number" data-line-number="275"></td>
<td id="LC275" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>fig</span>,<span class=pl-s1>ax</span><span class=pl-c1>=</span><span class=pl-s1>plt</span>.<span class=pl-en>subplots</span>(<span class=pl-c1>5</span>,<span class=pl-c1>3</span>)</td>
</tr>
<tr>
<td id="L276" class="blob-num js-line-number" data-line-number="276"></td>
<td id="LC276" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>fig</span>.<span class=pl-en>set_size_inches</span>(<span class=pl-c1>15</span>,<span class=pl-c1>15</span>)</td>
</tr>
<tr>
<td id="L277" class="blob-num js-line-number" data-line-number="277"></td>
<td id="LC277" class="blob-code blob-code-inner js-file-line"><span class=pl-k>for</span> <span class=pl-s1>i</span> <span class=pl-c1>in</span> <span class=pl-en>range</span>(<span class=pl-c1>5</span>):</td>
</tr>
<tr>
<td id="L278" class="blob-num js-line-number" data-line-number="278"></td>
<td id="LC278" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>j</span> <span class=pl-c1>in</span> <span class=pl-en>range</span> (<span class=pl-c1>3</span>):</td>
</tr>
<tr>
<td id="L279" class="blob-num js-line-number" data-line-number="279"></td>
<td id="LC279" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>l</span><span class=pl-c1>=</span><span class=pl-s1>rn</span>.<span class=pl-en>randint</span>(<span class=pl-c1>0</span>,<span class=pl-en>len</span>(<span class=pl-v>Z</span>))</td>
</tr>
<tr>
<td id="L280" class="blob-num js-line-number" data-line-number="280"></td>
<td id="LC280" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>ax</span>[<span class=pl-s1>i</span>,<span class=pl-s1>j</span>].<span class=pl-en>imshow</span>(<span class=pl-v>X</span>[<span class=pl-s1>l</span>])</td>
</tr>
<tr>
<td id="L281" class="blob-num js-line-number" data-line-number="281"></td>
<td id="LC281" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>ax</span>[<span class=pl-s1>i</span>,<span class=pl-s1>j</span>].<span class=pl-en>set_title</span>(<span class=pl-s>'Intel_Image: '</span><span class=pl-c1>+</span><span class=pl-v>Z</span>[<span class=pl-s1>l</span>])</td>
</tr>
<tr>
<td id="L282" class="blob-num js-line-number" data-line-number="282"></td>
<td id="LC282" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L283" class="blob-num js-line-number" data-line-number="283"></td>
<td id="LC283" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>tight_layout</span>()</td>
</tr>
<tr>
<td id="L284" class="blob-num js-line-number" data-line-number="284"></td>
<td id="LC284" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L285" class="blob-num js-line-number" data-line-number="285"></td>
<td id="LC285" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L286" class="blob-num js-line-number" data-line-number="286"></td>
<td id="LC286" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Preprocessing the Training set</span></td>
</tr>
<tr>
<td id="L287" class="blob-num js-line-number" data-line-number="287"></td>
<td id="LC287" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L288" class="blob-num js-line-number" data-line-number="288"></td>
<td id="LC288" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[272]:</span></td>
</tr>
<tr>
<td id="L289" class="blob-num js-line-number" data-line-number="289"></td>
<td id="LC289" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L290" class="blob-num js-line-number" data-line-number="290"></td>
<td id="LC290" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L291" class="blob-num js-line-number" data-line-number="291"></td>
<td id="LC291" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>train_datagen</span> <span class=pl-c1>=</span> <span class=pl-v>ImageDataGenerator</span>(<span class=pl-s1>rescale</span> <span class=pl-c1>=</span> <span class=pl-c1>1.</span><span class=pl-c1>/</span><span class=pl-c1>255</span>,</td>
</tr>
<tr>
<td id="L292" class="blob-num js-line-number" data-line-number="292"></td>
<td id="LC292" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>shear_range</span> <span class=pl-c1>=</span> <span class=pl-c1>0.2</span>,</td>
</tr>
<tr>
<td id="L293" class="blob-num js-line-number" data-line-number="293"></td>
<td id="LC293" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>zoom_range</span> <span class=pl-c1>=</span> <span class=pl-c1>0.2</span>,</td>
</tr>
<tr>
<td id="L294" class="blob-num js-line-number" data-line-number="294"></td>
<td id="LC294" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>horizontal_flip</span> <span class=pl-c1>=</span> <span class=pl-c1>True</span>)</td>
</tr>
<tr>
<td id="L295" class="blob-num js-line-number" data-line-number="295"></td>
<td id="LC295" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>seg_train</span> <span class=pl-c1>=</span> <span class=pl-s1>train_datagen</span>.<span class=pl-en>flow_from_directory</span>(<span class=pl-s>'111880_269359_bundle_archive/seg_train/seg_train'</span>,</td>
</tr>
<tr>
<td id="L296" class="blob-num js-line-number" data-line-number="296"></td>
<td id="LC296" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>target_size</span> <span class=pl-c1>=</span> (<span class=pl-c1>64</span>, <span class=pl-c1>64</span>),</td>
</tr>
<tr>
<td id="L297" class="blob-num js-line-number" data-line-number="297"></td>
<td id="LC297" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>batch_size</span> <span class=pl-c1>=</span> <span class=pl-c1>32</span>,</td>
</tr>
<tr>
<td id="L298" class="blob-num js-line-number" data-line-number="298"></td>
<td id="LC298" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>class_mode</span> <span class=pl-c1>=</span> <span class=pl-s>'categorical'</span>)</td>
</tr>
<tr>
<td id="L299" class="blob-num js-line-number" data-line-number="299"></td>
<td id="LC299" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L300" class="blob-num js-line-number" data-line-number="300"></td>
<td id="LC300" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L301" class="blob-num js-line-number" data-line-number="301"></td>
<td id="LC301" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Preprocessing the Test set</span></td>
</tr>
<tr>
<td id="L302" class="blob-num js-line-number" data-line-number="302"></td>
<td id="LC302" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L303" class="blob-num js-line-number" data-line-number="303"></td>
<td id="LC303" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[273]:</span></td>
</tr>
<tr>
<td id="L304" class="blob-num js-line-number" data-line-number="304"></td>
<td id="LC304" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L305" class="blob-num js-line-number" data-line-number="305"></td>
<td id="LC305" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L306" class="blob-num js-line-number" data-line-number="306"></td>
<td id="LC306" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>test_datagen</span> <span class=pl-c1>=</span> <span class=pl-v>ImageDataGenerator</span>(<span class=pl-s1>rescale</span> <span class=pl-c1>=</span> <span class=pl-c1>1.</span><span class=pl-c1>/</span><span class=pl-c1>255</span>)</td>
</tr>
<tr>
<td id="L307" class="blob-num js-line-number" data-line-number="307"></td>
<td id="LC307" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>seg_test</span> <span class=pl-c1>=</span> <span class=pl-s1>test_datagen</span>.<span class=pl-en>flow_from_directory</span>(<span class=pl-s>'111880_269359_bundle_archive/seg_test/seg_test'</span>,</td>
</tr>
<tr>
<td id="L308" class="blob-num js-line-number" data-line-number="308"></td>
<td id="LC308" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>target_size</span> <span class=pl-c1>=</span> (<span class=pl-c1>64</span>, <span class=pl-c1>64</span>),</td>
</tr>
<tr>
<td id="L309" class="blob-num js-line-number" data-line-number="309"></td>
<td id="LC309" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>batch_size</span> <span class=pl-c1>=</span> <span class=pl-c1>32</span>,</td>
</tr>
<tr>
<td id="L310" class="blob-num js-line-number" data-line-number="310"></td>
<td id="LC310" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>class_mode</span> <span class=pl-c1>=</span> <span class=pl-s>'categorical'</span>)</td>
</tr>
<tr>
<td id="L311" class="blob-num js-line-number" data-line-number="311"></td>
<td id="LC311" class="blob-code blob-code-inner js-file-line"><span class=pl-v>IMAGE_SIZE</span> <span class=pl-c1>=</span> (<span class=pl-c1>64</span>,<span class=pl-c1>64</span>)</td>
</tr>
<tr>
<td id="L312" class="blob-num js-line-number" data-line-number="312"></td>
<td id="LC312" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L313" class="blob-num js-line-number" data-line-number="313"></td>
<td id="LC313" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L314" class="blob-num js-line-number" data-line-number="314"></td>
<td id="LC314" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ## Part 2 - Building the CNN</span></td>
</tr>
<tr>
<td id="L315" class="blob-num js-line-number" data-line-number="315"></td>
<td id="LC315" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L316" class="blob-num js-line-number" data-line-number="316"></td>
<td id="LC316" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Initialising the CNN</span></td>
</tr>
<tr>
<td id="L317" class="blob-num js-line-number" data-line-number="317"></td>
<td id="LC317" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L318" class="blob-num js-line-number" data-line-number="318"></td>
<td id="LC318" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[281]:</span></td>
</tr>
<tr>
<td id="L319" class="blob-num js-line-number" data-line-number="319"></td>
<td id="LC319" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L320" class="blob-num js-line-number" data-line-number="320"></td>
<td id="LC320" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L321" class="blob-num js-line-number" data-line-number="321"></td>
<td id="LC321" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cnn</span> <span class=pl-c1>=</span> <span class=pl-s1>tf</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>models</span>.<span class=pl-v>Sequential</span>()</td>
</tr>
<tr>
<td id="L322" class="blob-num js-line-number" data-line-number="322"></td>
<td id="LC322" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L323" class="blob-num js-line-number" data-line-number="323"></td>
<td id="LC323" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L324" class="blob-num js-line-number" data-line-number="324"></td>
<td id="LC324" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Step 1 - Convolution</span></td>
</tr>
<tr>
<td id="L325" class="blob-num js-line-number" data-line-number="325"></td>
<td id="LC325" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L326" class="blob-num js-line-number" data-line-number="326"></td>
<td id="LC326" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[282]:</span></td>
</tr>
<tr>
<td id="L327" class="blob-num js-line-number" data-line-number="327"></td>
<td id="LC327" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L328" class="blob-num js-line-number" data-line-number="328"></td>
<td id="LC328" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L329" class="blob-num js-line-number" data-line-number="329"></td>
<td id="LC329" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cnn</span>.<span class=pl-en>add</span>(<span class=pl-s1>tf</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>layers</span>.<span class=pl-v>Conv2D</span>(<span class=pl-s1>filters</span><span class=pl-c1>=</span><span class=pl-c1>32</span>, <span class=pl-s1>kernel_size</span><span class=pl-c1>=</span><span class=pl-c1>3</span>, <span class=pl-s1>activation</span><span class=pl-c1>=</span><span class=pl-s>'relu'</span>, <span class=pl-s1>input_shape</span><span class=pl-c1>=</span>[<span class=pl-c1>64</span>, <span class=pl-c1>64</span>, <span class=pl-c1>3</span>]))</td>
</tr>
<tr>
<td id="L330" class="blob-num js-line-number" data-line-number="330"></td>
<td id="LC330" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L331" class="blob-num js-line-number" data-line-number="331"></td>
<td id="LC331" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L332" class="blob-num js-line-number" data-line-number="332"></td>
<td id="LC332" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Step 2 - Pooling</span></td>
</tr>
<tr>
<td id="L333" class="blob-num js-line-number" data-line-number="333"></td>
<td id="LC333" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L334" class="blob-num js-line-number" data-line-number="334"></td>
<td id="LC334" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[283]:</span></td>
</tr>
<tr>
<td id="L335" class="blob-num js-line-number" data-line-number="335"></td>
<td id="LC335" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L336" class="blob-num js-line-number" data-line-number="336"></td>
<td id="LC336" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L337" class="blob-num js-line-number" data-line-number="337"></td>
<td id="LC337" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cnn</span>.<span class=pl-en>add</span>(<span class=pl-s1>tf</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>layers</span>.<span class=pl-v>MaxPool2D</span>(<span class=pl-s1>pool_size</span><span class=pl-c1>=</span><span class=pl-c1>2</span>, <span class=pl-s1>strides</span><span class=pl-c1>=</span><span class=pl-c1>2</span>))</td>
</tr>
<tr>
<td id="L338" class="blob-num js-line-number" data-line-number="338"></td>
<td id="LC338" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L339" class="blob-num js-line-number" data-line-number="339"></td>
<td id="LC339" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L340" class="blob-num js-line-number" data-line-number="340"></td>
<td id="LC340" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Adding a second convolutional layer</span></td>
</tr>
<tr>
<td id="L341" class="blob-num js-line-number" data-line-number="341"></td>
<td id="LC341" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L342" class="blob-num js-line-number" data-line-number="342"></td>
<td id="LC342" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[284]:</span></td>
</tr>
<tr>
<td id="L343" class="blob-num js-line-number" data-line-number="343"></td>
<td id="LC343" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L344" class="blob-num js-line-number" data-line-number="344"></td>
<td id="LC344" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L345" class="blob-num js-line-number" data-line-number="345"></td>
<td id="LC345" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cnn</span>.<span class=pl-en>add</span>(<span class=pl-s1>tf</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>layers</span>.<span class=pl-v>Conv2D</span>(<span class=pl-s1>filters</span><span class=pl-c1>=</span><span class=pl-c1>32</span>, <span class=pl-s1>kernel_size</span><span class=pl-c1>=</span><span class=pl-c1>3</span>, <span class=pl-s1>activation</span><span class=pl-c1>=</span><span class=pl-s>'relu'</span>))</td>
</tr>
<tr>
<td id="L346" class="blob-num js-line-number" data-line-number="346"></td>
<td id="LC346" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cnn</span>.<span class=pl-en>add</span>(<span class=pl-s1>tf</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>layers</span>.<span class=pl-v>MaxPool2D</span>(<span class=pl-s1>pool_size</span><span class=pl-c1>=</span><span class=pl-c1>2</span>, <span class=pl-s1>strides</span><span class=pl-c1>=</span><span class=pl-c1>2</span>))</td>
</tr>
<tr>
<td id="L347" class="blob-num js-line-number" data-line-number="347"></td>
<td id="LC347" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L348" class="blob-num js-line-number" data-line-number="348"></td>
<td id="LC348" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L349" class="blob-num js-line-number" data-line-number="349"></td>
<td id="LC349" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Step 3 - Flattening</span></td>
</tr>
<tr>
<td id="L350" class="blob-num js-line-number" data-line-number="350"></td>
<td id="LC350" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L351" class="blob-num js-line-number" data-line-number="351"></td>
<td id="LC351" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[285]:</span></td>
</tr>
<tr>
<td id="L352" class="blob-num js-line-number" data-line-number="352"></td>
<td id="LC352" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L353" class="blob-num js-line-number" data-line-number="353"></td>
<td id="LC353" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L354" class="blob-num js-line-number" data-line-number="354"></td>
<td id="LC354" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cnn</span>.<span class=pl-en>add</span>(<span class=pl-s1>tf</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>layers</span>.<span class=pl-v>Flatten</span>())</td>
</tr>
<tr>
<td id="L355" class="blob-num js-line-number" data-line-number="355"></td>
<td id="LC355" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L356" class="blob-num js-line-number" data-line-number="356"></td>
<td id="LC356" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L357" class="blob-num js-line-number" data-line-number="357"></td>
<td id="LC357" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Step 4 - Full Connection</span></td>
</tr>
<tr>
<td id="L358" class="blob-num js-line-number" data-line-number="358"></td>
<td id="LC358" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L359" class="blob-num js-line-number" data-line-number="359"></td>
<td id="LC359" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[286]:</span></td>
</tr>
<tr>
<td id="L360" class="blob-num js-line-number" data-line-number="360"></td>
<td id="LC360" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L361" class="blob-num js-line-number" data-line-number="361"></td>
<td id="LC361" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L362" class="blob-num js-line-number" data-line-number="362"></td>
<td id="LC362" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cnn</span>.<span class=pl-en>add</span>(<span class=pl-s1>tf</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>layers</span>.<span class=pl-v>Dense</span>(<span class=pl-s1>units</span><span class=pl-c1>=</span><span class=pl-c1>128</span>, <span class=pl-s1>activation</span><span class=pl-c1>=</span><span class=pl-s>'relu'</span>))</td>
</tr>
<tr>
<td id="L363" class="blob-num js-line-number" data-line-number="363"></td>
<td id="LC363" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L364" class="blob-num js-line-number" data-line-number="364"></td>
<td id="LC364" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L365" class="blob-num js-line-number" data-line-number="365"></td>
<td id="LC365" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Step 5 - Output Layer</span></td>
</tr>
<tr>
<td id="L366" class="blob-num js-line-number" data-line-number="366"></td>
<td id="LC366" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L367" class="blob-num js-line-number" data-line-number="367"></td>
<td id="LC367" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[287]:</span></td>
</tr>
<tr>
<td id="L368" class="blob-num js-line-number" data-line-number="368"></td>
<td id="LC368" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L369" class="blob-num js-line-number" data-line-number="369"></td>
<td id="LC369" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L370" class="blob-num js-line-number" data-line-number="370"></td>
<td id="LC370" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cnn</span>.<span class=pl-en>add</span>(<span class=pl-s1>tf</span>.<span class=pl-s1>keras</span>.<span class=pl-s1>layers</span>.<span class=pl-v>Dense</span>(<span class=pl-s1>units</span><span class=pl-c1>=</span><span class=pl-c1>6</span>, <span class=pl-s1>activation</span><span class=pl-c1>=</span><span class=pl-s>'softmax'</span>))</td>
</tr>
<tr>
<td id="L371" class="blob-num js-line-number" data-line-number="371"></td>
<td id="LC371" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L372" class="blob-num js-line-number" data-line-number="372"></td>
<td id="LC372" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L373" class="blob-num js-line-number" data-line-number="373"></td>
<td id="LC373" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[288]:</span></td>
</tr>
<tr>
<td id="L374" class="blob-num js-line-number" data-line-number="374"></td>
<td id="LC374" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L375" class="blob-num js-line-number" data-line-number="375"></td>
<td id="LC375" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L376" class="blob-num js-line-number" data-line-number="376"></td>
<td id="LC376" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cnn</span>.<span class=pl-en>summary</span>()</td>
</tr>
<tr>
<td id="L377" class="blob-num js-line-number" data-line-number="377"></td>
<td id="LC377" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L378" class="blob-num js-line-number" data-line-number="378"></td>
<td id="LC378" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L379" class="blob-num js-line-number" data-line-number="379"></td>
<td id="LC379" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ## Part 3 - Training the CNN</span></td>
</tr>
<tr>
<td id="L380" class="blob-num js-line-number" data-line-number="380"></td>
<td id="LC380" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L381" class="blob-num js-line-number" data-line-number="381"></td>
<td id="LC381" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Compiling the CNN</span></td>
</tr>
<tr>
<td id="L382" class="blob-num js-line-number" data-line-number="382"></td>
<td id="LC382" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L383" class="blob-num js-line-number" data-line-number="383"></td>
<td id="LC383" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[289]:</span></td>
</tr>
<tr>
<td id="L384" class="blob-num js-line-number" data-line-number="384"></td>
<td id="LC384" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L385" class="blob-num js-line-number" data-line-number="385"></td>
<td id="LC385" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L386" class="blob-num js-line-number" data-line-number="386"></td>
<td id="LC386" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>cnn</span>.<span class=pl-en>compile</span>(<span class=pl-s1>optimizer</span> <span class=pl-c1>=</span> <span class=pl-s>'adam'</span>, <span class=pl-s1>loss</span> <span class=pl-c1>=</span> <span class=pl-s>'categorical_crossentropy'</span>, <span class=pl-s1>metrics</span> <span class=pl-c1>=</span> [<span class=pl-s>'accuracy'</span>])</td>
</tr>
<tr>
<td id="L387" class="blob-num js-line-number" data-line-number="387"></td>
<td id="LC387" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L388" class="blob-num js-line-number" data-line-number="388"></td>
<td id="LC388" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L389" class="blob-num js-line-number" data-line-number="389"></td>
<td id="LC389" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ### Training the CNN on the Training set and evaluating it on the Test set</span></td>
</tr>
<tr>
<td id="L390" class="blob-num js-line-number" data-line-number="390"></td>
<td id="LC390" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L391" class="blob-num js-line-number" data-line-number="391"></td>
<td id="LC391" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[290]:</span></td>
</tr>
<tr>
<td id="L392" class="blob-num js-line-number" data-line-number="392"></td>
<td id="LC392" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L393" class="blob-num js-line-number" data-line-number="393"></td>
<td id="LC393" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L394" class="blob-num js-line-number" data-line-number="394"></td>
<td id="LC394" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>trained</span><span class=pl-c1>=</span> <span class=pl-s1>cnn</span>.<span class=pl-en>fit</span>(<span class=pl-s1>x</span> <span class=pl-c1>=</span> <span class=pl-s1>seg_train</span>, <span class=pl-s1>validation_data</span> <span class=pl-c1>=</span> <span class=pl-s1>seg_test</span>, <span class=pl-s1>epochs</span> <span class=pl-c1>=</span> <span class=pl-c1>25</span>)</td>
</tr>
<tr>
<td id="L395" class="blob-num js-line-number" data-line-number="395"></td>
<td id="LC395" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L396" class="blob-num js-line-number" data-line-number="396"></td>
<td id="LC396" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L397" class="blob-num js-line-number" data-line-number="397"></td>
<td id="LC397" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ## Evaluating the Model Performance</span></td>
</tr>
<tr>
<td id="L398" class="blob-num js-line-number" data-line-number="398"></td>
<td id="LC398" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L399" class="blob-num js-line-number" data-line-number="399"></td>
<td id="LC399" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[291]:</span></td>
</tr>
<tr>
<td id="L400" class="blob-num js-line-number" data-line-number="400"></td>
<td id="LC400" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L401" class="blob-num js-line-number" data-line-number="401"></td>
<td id="LC401" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L402" class="blob-num js-line-number" data-line-number="402"></td>
<td id="LC402" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>plot</span>(<span class=pl-s1>trained</span>.<span class=pl-s1>history</span>[<span class=pl-s>'loss'</span>])</td>
</tr>
<tr>
<td id="L403" class="blob-num js-line-number" data-line-number="403"></td>
<td id="LC403" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>plot</span>(<span class=pl-s1>trained</span>.<span class=pl-s1>history</span>[<span class=pl-s>'val_loss'</span>])</td>
</tr>
<tr>
<td id="L404" class="blob-num js-line-number" data-line-number="404"></td>
<td id="LC404" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>title</span>(<span class=pl-s>'Model Loss'</span>)</td>
</tr>
<tr>
<td id="L405" class="blob-num js-line-number" data-line-number="405"></td>
<td id="LC405" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>ylabel</span>(<span class=pl-s>'Loss'</span>)</td>
</tr>
<tr>
<td id="L406" class="blob-num js-line-number" data-line-number="406"></td>
<td id="LC406" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>xlabel</span>(<span class=pl-s>'Epochs'</span>)</td>
</tr>
<tr>
<td id="L407" class="blob-num js-line-number" data-line-number="407"></td>
<td id="LC407" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>legend</span>([<span class=pl-s>'train'</span>, <span class=pl-s>'test'</span>])</td>
</tr>
<tr>
<td id="L408" class="blob-num js-line-number" data-line-number="408"></td>
<td id="LC408" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>show</span>()</td>
</tr>
<tr>
<td id="L409" class="blob-num js-line-number" data-line-number="409"></td>
<td id="LC409" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L410" class="blob-num js-line-number" data-line-number="410"></td>
<td id="LC410" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L411" class="blob-num js-line-number" data-line-number="411"></td>
<td id="LC411" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[292]:</span></td>
</tr>
<tr>
<td id="L412" class="blob-num js-line-number" data-line-number="412"></td>
<td id="LC412" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L413" class="blob-num js-line-number" data-line-number="413"></td>
<td id="LC413" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L414" class="blob-num js-line-number" data-line-number="414"></td>
<td id="LC414" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>plot</span>(<span class=pl-s1>trained</span>.<span class=pl-s1>history</span>[<span class=pl-s>'accuracy'</span>])</td>
</tr>
<tr>
<td id="L415" class="blob-num js-line-number" data-line-number="415"></td>
<td id="LC415" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>plot</span>(<span class=pl-s1>trained</span>.<span class=pl-s1>history</span>[<span class=pl-s>'val_accuracy'</span>])</td>
</tr>
<tr>
<td id="L416" class="blob-num js-line-number" data-line-number="416"></td>
<td id="LC416" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>title</span>(<span class=pl-s>'Model Accuracy'</span>)</td>
</tr>
<tr>
<td id="L417" class="blob-num js-line-number" data-line-number="417"></td>
<td id="LC417" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>ylabel</span>(<span class=pl-s>'Accuracy'</span>)</td>
</tr>
<tr>
<td id="L418" class="blob-num js-line-number" data-line-number="418"></td>
<td id="LC418" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>xlabel</span>(<span class=pl-s>'Epochs'</span>)</td>
</tr>
<tr>
<td id="L419" class="blob-num js-line-number" data-line-number="419"></td>
<td id="LC419" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>legend</span>([<span class=pl-s>'train'</span>, <span class=pl-s>'test'</span>])</td>
</tr>
<tr>
<td id="L420" class="blob-num js-line-number" data-line-number="420"></td>
<td id="LC420" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>plt</span>.<span class=pl-en>show</span>()</td>
</tr>
<tr>
<td id="L421" class="blob-num js-line-number" data-line-number="421"></td>
<td id="LC421" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L422" class="blob-num js-line-number" data-line-number="422"></td>
<td id="LC422" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L423" class="blob-num js-line-number" data-line-number="423"></td>
<td id="LC423" class="blob-code blob-code-inner js-file-line"><span class=pl-c># ## Visualizing Predictons on the Validation Set</span></td>
</tr>
<tr>
<td id="L424" class="blob-num js-line-number" data-line-number="424"></td>
<td id="LC424" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L425" class="blob-num js-line-number" data-line-number="425"></td>
<td id="LC425" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[298]:</span></td>
</tr>
<tr>
<td id="L426" class="blob-num js-line-number" data-line-number="426"></td>
<td id="LC426" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L427" class="blob-num js-line-number" data-line-number="427"></td>
<td id="LC427" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L428" class="blob-num js-line-number" data-line-number="428"></td>
<td id="LC428" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>numpy</span> <span class=pl-k>as</span> <span class=pl-s1>np</span></td>
</tr>
<tr>
<td id="L429" class="blob-num js-line-number" data-line-number="429"></td>
<td id="LC429" class="blob-code blob-code-inner js-file-line"><span class=pl-k>from</span> <span class=pl-s1>keras</span>.<span class=pl-s1>preprocessing</span> <span class=pl-k>import</span> <span class=pl-s1>image</span></td>
</tr>
<tr>
<td id="L430" class="blob-num js-line-number" data-line-number="430"></td>
<td id="LC430" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>test_image1</span> <span class=pl-c1>=</span> <span class=pl-s1>image</span>.<span class=pl-en>load_img</span>(<span class=pl-s>'111880_269359_bundle_archive/seg_pred/seg_pred/5.jpg'</span>, <span class=pl-s1>target_size</span> <span class=pl-c1>=</span> (<span class=pl-c1>64</span>, <span class=pl-c1>64</span>))</td>
</tr>
<tr>
<td id="L431" class="blob-num js-line-number" data-line-number="431"></td>
<td id="LC431" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>test_image</span> <span class=pl-c1>=</span> <span class=pl-s1>image</span>.<span class=pl-en>img_to_array</span>(<span class=pl-s1>test_image1</span>)</td>
</tr>
<tr>
<td id="L432" class="blob-num js-line-number" data-line-number="432"></td>
<td id="LC432" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>test_image</span> <span class=pl-c1>=</span> <span class=pl-s1>np</span>.<span class=pl-en>expand_dims</span>(<span class=pl-s1>test_image</span>, <span class=pl-s1>axis</span> <span class=pl-c1>=</span> <span class=pl-c1>0</span>)</td>
</tr>
<tr>
<td id="L433" class="blob-num js-line-number" data-line-number="433"></td>
<td id="LC433" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>result</span> <span class=pl-c1>=</span> <span class=pl-s1>cnn</span>.<span class=pl-en>predict</span>(<span class=pl-s1>test_image</span>)</td>
</tr>
<tr>
<td id="L434" class="blob-num js-line-number" data-line-number="434"></td>
<td id="LC434" class="blob-code blob-code-inner js-file-line"><span class=pl-k>if</span> <span class=pl-s1>result</span>[<span class=pl-c1>0</span>][<span class=pl-c1>0</span>] <span class=pl-c1>==</span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L435" class="blob-num js-line-number" data-line-number="435"></td>
<td id="LC435" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>prediction</span> <span class=pl-c1>=</span> <span class=pl-s>'Building'</span></td>
</tr>
<tr>
<td id="L436" class="blob-num js-line-number" data-line-number="436"></td>
<td id="LC436" class="blob-code blob-code-inner js-file-line"><span class=pl-k>elif</span> <span class=pl-s1>result</span>[<span class=pl-c1>0</span>][<span class=pl-c1>1</span>] <span class=pl-c1>==</span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L437" class="blob-num js-line-number" data-line-number="437"></td>
<td id="LC437" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>prediction</span> <span class=pl-c1>=</span> <span class=pl-s>'Forest'</span></td>
</tr>
<tr>
<td id="L438" class="blob-num js-line-number" data-line-number="438"></td>
<td id="LC438" class="blob-code blob-code-inner js-file-line"><span class=pl-k>elif</span> <span class=pl-s1>result</span>[<span class=pl-c1>0</span>][<span class=pl-c1>2</span>] <span class=pl-c1>==</span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L439" class="blob-num js-line-number" data-line-number="439"></td>
<td id="LC439" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>prediction</span> <span class=pl-c1>=</span> <span class=pl-s>'Glacier'</span></td>
</tr>
<tr>
<td id="L440" class="blob-num js-line-number" data-line-number="440"></td>
<td id="LC440" class="blob-code blob-code-inner js-file-line"><span class=pl-k>elif</span> <span class=pl-s1>result</span>[<span class=pl-c1>0</span>][<span class=pl-c1>3</span>] <span class=pl-c1>==</span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L441" class="blob-num js-line-number" data-line-number="441"></td>
<td id="LC441" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>prediction</span> <span class=pl-c1>=</span> <span class=pl-s>'Mountain'</span></td>
</tr>
<tr>
<td id="L442" class="blob-num js-line-number" data-line-number="442"></td>
<td id="LC442" class="blob-code blob-code-inner js-file-line"><span class=pl-k>elif</span> <span class=pl-s1>result</span>[<span class=pl-c1>0</span>][<span class=pl-c1>4</span>] <span class=pl-c1>==</span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L443" class="blob-num js-line-number" data-line-number="443"></td>
<td id="LC443" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>prediction</span> <span class=pl-c1>=</span> <span class=pl-s>'Sea'</span></td>
</tr>
<tr>
<td id="L444" class="blob-num js-line-number" data-line-number="444"></td>
<td id="LC444" class="blob-code blob-code-inner js-file-line"><span class=pl-k>elif</span> <span class=pl-s1>result</span>[<span class=pl-c1>0</span>][<span class=pl-c1>5</span>] <span class=pl-c1>==</span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L445" class="blob-num js-line-number" data-line-number="445"></td>
<td id="LC445" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>prediction</span> <span class=pl-c1>=</span> <span class=pl-s>'Street'</span></td>
</tr>
<tr>
<td id="L446" class="blob-num js-line-number" data-line-number="446"></td>
<td id="LC446" class="blob-code blob-code-inner js-file-line"><span class=pl-k>else</span>:</td>
</tr>
<tr>
<td id="L447" class="blob-num js-line-number" data-line-number="447"></td>
<td id="LC447" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>print</span>(<span class=pl-s>"Error"</span>)</td>
</tr>
<tr>
<td id="L448" class="blob-num js-line-number" data-line-number="448"></td>
<td id="LC448" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L449" class="blob-num js-line-number" data-line-number="449"></td>
<td id="LC449" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L450" class="blob-num js-line-number" data-line-number="450"></td>
<td id="LC450" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[297]:</span></td>
</tr>
<tr>
<td id="L451" class="blob-num js-line-number" data-line-number="451"></td>
<td id="LC451" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L452" class="blob-num js-line-number" data-line-number="452"></td>
<td id="LC452" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L453" class="blob-num js-line-number" data-line-number="453"></td>
<td id="LC453" class="blob-code blob-code-inner js-file-line"><span class=pl-s1>result</span></td>
</tr>
<tr>
<td id="L454" class="blob-num js-line-number" data-line-number="454"></td>
<td id="LC454" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L455" class="blob-num js-line-number" data-line-number="455"></td>
<td id="LC455" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L456" class="blob-num js-line-number" data-line-number="456"></td>
<td id="LC456" class="blob-code blob-code-inner js-file-line"><span class=pl-c># In[299]:</span></td>
</tr>
<tr>
<td id="L457" class="blob-num js-line-number" data-line-number="457"></td>
<td id="LC457" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L458" class="blob-num js-line-number" data-line-number="458"></td>
<td id="LC458" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L459" class="blob-num js-line-number" data-line-number="459"></td>
<td id="LC459" class="blob-code blob-code-inner js-file-line"><span class=pl-en>print</span>(<span class=pl-s1>prediction</span>)</td>
</tr>
</table>
<details class="details-reset details-overlay BlobToolbar position-absolute js-file-line-actions dropdown d-none" aria-hidden="true">
<summary class="btn-octicon ml-0 px-2 p-0 bg-white border border-gray-dark rounded-1" aria-label="Inline file action toolbar">
<svg class="octicon octicon-kebab-horizontal" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path></svg>
</summary>
<details-menu>
<ul class="BlobToolbar-dropdown dropdown-menu dropdown-menu-se mt-2" style="width:185px">
<li>
<clipboard-copy role="menuitem" class="dropdown-item" id="js-copy-lines" style="cursor:pointer;">
Copy lines
</clipboard-copy>
</li>
<li>
<clipboard-copy role="menuitem" class="dropdown-item" id="js-copy-permalink" style="cursor:pointer;">
Copy permalink
</clipboard-copy>
</li>
<li><a class="dropdown-item js-update-url-with-hash" id="js-view-git-blame" role="menuitem" href="/Novia-2018/Intel-Image-Classification-Using-CNN/blame/87e3d7360c13ba5b2c4c12ee85c939902dfbd422/src/Intel_Project%20(1).py">View git blame</a></li>
<li><a class="dropdown-item" id="js-new-issue" role="menuitem" href="/Novia-2018/Intel-Image-Classification-Using-CNN/issues/new">Reference in new issue</a></li>
</ul>
</details-menu>
</details>
</div>
</div>
<details class="details-reset details-overlay details-overlay-dark" id="jumpto-line-details-dialog">
<summary data-hotkey="l" aria-label="Jump to line"></summary>
<details-dialog class="Box Box--overlay d-flex flex-column anim-fade-in fast linejump" aria-label="Jump to line">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="js-jump-to-line-form Box-body d-flex" action="" accept-charset="UTF-8" method="get">
<input class="form-control flex-auto mr-3 linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" aria-label="Jump to line" autofocus>
<button type="submit" class="btn" data-close-dialog>Go</button>
</form> </details-dialog>
</details>
</div>
</div>
</main>
</div>
</div>
<div class="footer container-xl width-full p-responsive" role="contentinfo">
<div class="position-relative d-flex flex-row-reverse flex-lg-row flex-wrap flex-lg-nowrap flex-justify-center flex-lg-justify-between pt-6 pb-2 mt-6 f6 text-gray border-top border-gray-light ">
<ul class="list-style-none d-flex flex-wrap col-12 col-lg-5 flex-justify-center flex-lg-justify-between mb-2 mb-lg-0">
<li class="mr-3 mr-lg-0">© 2020 GitHub, Inc.</li>
<li class="mr-3 mr-lg-0"><a data-ga-click="Footer, go to terms, text:terms" href="https://github.com/site/terms">Terms</a></li>
<li class="mr-3 mr-lg-0"><a data-ga-click="Footer, go to privacy, text:privacy" href="https://github.com/site/privacy">Privacy</a></li>
<li class="mr-3 mr-lg-0"><a data-ga-click="Footer, go to security, text:security" href="https://github.com/security">Security</a></li>
<li class="mr-3 mr-lg-0"><a href="https://githubstatus.com/" data-ga-click="Footer, go to status, text:status">Status</a></li>
<li><a data-ga-click="Footer, go to help, text:help" href="https://help.github.com">Help</a></li>
</ul>
<a aria-label="Homepage" title="GitHub" class="footer-octicon d-none d-lg-block mx-lg-4" href="https://github.com">
<svg height="24" class="octicon octicon-mark-github" viewBox="0 0 16 16" version="1.1" width="24" aria-hidden="true"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path></svg>
</a>
<ul class="list-style-none d-flex flex-wrap col-12 col-lg-5 flex-justify-center flex-lg-justify-between mb-2 mb-lg-0">
<li class="mr-3 mr-lg-0"><a data-ga-click="Footer, go to contact, text:contact" href="https://github.com/contact">Contact GitHub</a></li>
<li class="mr-3 mr-lg-0"><a href="https://github.com/pricing" data-ga-click="Footer, go to Pricing, text:Pricing">Pricing</a></li>
<li class="mr-3 mr-lg-0"><a href="https://developer.github.com" data-ga-click="Footer, go to api, text:api">API</a></li>
<li class="mr-3 mr-lg-0"><a href="https://training.github.com" data-ga-click="Footer, go to training, text:training">Training</a></li>
<li class="mr-3 mr-lg-0"><a href="https://github.blog" data-ga-click="Footer, go to blog, text:blog">Blog</a></li>
<li><a data-ga-click="Footer, go to about, text:about" href="https://github.com/about">About</a></li>
</ul>
</div>
<div class="d-flex flex-justify-center pb-6">
<span class="f6 text-gray-light"></span>
</div>
</div>
<div id="ajax-error-message" class="ajax-error-message flash flash-error">
<svg class="octicon octicon-alert" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path></svg>
<button type="button" class="flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
<svg class="octicon octicon-x" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path></svg>
</button>
You can’t perform that action at this time.
</div>
<script crossorigin="anonymous" async="async" integrity="sha512-WcQmT2vhcClFVOaaAJV/M+HqsJ2Gq/myvl6F3gCVBxykazXTs+i5fvxncSXwyG1CSfcrqmLFw/R/bmFYzprX2A==" type="application/javascript" id="js-conditional-compat" data-src="https://github.githubassets.com/assets/compat-bootstrap-59c4264f.js"></script>
<script crossorigin="anonymous" integrity="sha512-47cvnR4cfmRA+p1TUeCLY+nCEqD7tni9XPMTW5kLy2C4SUhMCAw0NWiKqvZEM0iZRBw+8u8DgeD30fC56eV02w==" type="application/javascript" src="https://github.githubassets.com/assets/environment-bootstrap-e3b72f9d.js"></script>
<script crossorigin="anonymous" async="async" integrity="sha512-7TxtlURVUWrbnU4wcYZ7jLSTg7Wu9kx7YNV/Pul+Cst5LlM1iXEhbvJ0dxkwVU3SbXcxFHwuljtm7BwP/2ZyKw==" type="application/javascript" src="https://github.githubassets.com/assets/vendor-ed3c6d95.js"></script>
<script crossorigin="anonymous" async="async" integrity="sha512-a1TbnbJAeMGDxojD1x632hdgYt2pavdYxDtxU7WVnZtsjRSkF/3d0dZHdR5uqOD3uTHrS4Qx9avmXwhDWbjPhA==" type="application/javascript" src="https://github.githubassets.com/assets/frameworks-6b54db9d.js"></script>
<script crossorigin="anonymous" async="async" integrity="sha512-VnjgOqYR8JJqkOu7HaiIEMuTXnG+lz/BJhR7fbxHOQwEEz/BgBwhtWEyO2MlT+GF+ftMeK2gRSZZ1qA/gDGZ9A==" type="application/javascript" src="https://github.githubassets.com/assets/github-bootstrap-5678e03a.js"></script>
<script crossorigin="anonymous" async="async" integrity="sha512-4GcSWGoe36+BoWho4gtJcByZe8j43w+lt2/PDe3rmBxRVSgD29YipDwuIywe8fvOd2b2CszBqaPGxSznUtE3Xg==" type="application/javascript" data-module-id="./drag-drop.js" data-src="https://github.githubassets.com/assets/drag-drop-e0671258.js"></script>
<script crossorigin="anonymous" async="async" integrity="sha512-2k8dDHk0yt52uKvOvgc9cwOXOeJhxBfVP5kPS2BrCdytDmtEIJ2yone26vFENAyk1a2aFQ7KDgEevRQafuAf8A==" type="application/javascript" data-module-id="./gist-vendor.js" data-src="https://github.githubassets.com/assets/gist-vendor-da4f1d0c.js"></script>
<script crossorigin="anonymous" async="async" integrity="sha512-iv+4yAluOjiG50ZypUBIWIUCRDo6JEBf2twvmd5AelxgPQJO/XC1oNMGTMdDfKt30p7G7fHEOTZ2utHWDJ9PPQ==" type="application/javascript" data-module-id="./randomColor.js" data-src="https://github.githubassets.com/assets/randomColor-8affb8c8.js"></script>
<div class="js-stale-session-flash flash flash-warn flash-banner" hidden
>
<svg class="octicon octicon-alert" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path></svg>
<span class="js-stale-session-flash-signed-in" hidden>You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
<span class="js-stale-session-flash-signed-out" hidden>You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
</div>
<template id="site-details-dialog">
<details class="details-reset details-overlay details-overlay-dark lh-default text-gray-dark hx_rsm" open>
<summary role="button" aria-label="Close dialog"></summary>
<details-dialog class="Box Box--overlay d-flex flex-column anim-fade-in fast hx_rsm-dialog hx_rsm-modal">
<button class="Box-btn-octicon m-0 btn-octicon position-absolute right-0 top-0" type="button" aria-label="Close dialog" data-close-dialog>
<svg class="octicon octicon-x" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path></svg>
</button>
<div class="octocat-spinner my-6 js-details-dialog-spinner"></div>
</details-dialog>
</details>
</template>
<div class="Popover js-hovercard-content position-absolute" style="display: none; outline: none;" tabindex="0">
<div class="Popover-message Popover-message--bottom-left Popover-message--large Box box-shadow-large" style="width:360px;">
</div>
</div>
</body>
</html>
| 68.291667 | 2,554 | 0.634956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115,832 | 0.515751 |
ad306fcfd7d3c8210a18c93a19c2085a8ed5bde6 | 1,523 | py | Python | spaces/utils.py | jgillick/Spaces | 96247701d530a017f10a0bd0ac6cf241d621be11 | [
"MIT"
] | 1 | 2018-08-12T23:43:45.000Z | 2018-08-12T23:43:45.000Z | spaces/utils.py | jgillick/Spaces | 96247701d530a017f10a0bd0ac6cf241d621be11 | [
"MIT"
] | 3 | 2016-01-13T10:12:51.000Z | 2016-01-13T10:13:15.000Z | spaces/utils.py | jgillick/Spaces | 96247701d530a017f10a0bd0ac6cf241d621be11 | [
"MIT"
] | null | null | null |
import re
import os
import uuid
from datetime import date
from django.conf import settings
def normalize_path(path):
"""
Normalizes a path:
* Removes extra and trailing slashes
* Converts special characters to underscore
"""
if path is None:
return ""
path = re.sub(r'/+', '/', path) # repeated slash
path = re.sub(r'/*$', '', path) # trailing slash
path = [to_slug(p) for p in path.split(os.sep)]
return os.sep.join(path) # preserves leading slash
def to_slug(value):
""" Convert a string to a URL slug. """
value = value.lower()
# Space to dashes
value = re.sub(r'[\s_]+', '-', value)
# Special characters
value = re.sub(r'[^a-z0-9\-]+', '', value, flags=re.I)
# Extra dashes
value = re.sub(r'\-{2,}', '-', value)
value = re.sub(r'(^\-)|(\-$)', '', value)
return value
def upload_file(f):
""" Upload a file and return the URL to it. """
# Create path under media root
name, ext = os.path.splitext(f.name)
name = "%s%s" % (str(uuid.uuid4()), ext)
path = date.today().strftime("%Y")
# Create base directory
filepath = os.path.join(settings.MEDIA_ROOT, path)
if not os.path.exists(filepath):
os.makedirs(filepath)
# Write file
filepath = os.path.join(filepath, name)
with open(filepath, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
# Return URL
return os.path.join(settings.MEDIA_URL, path, name)
| 23.075758 | 58 | 0.594879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.319107 |
ad31e247da8cf855b63a1a735072233c2abea496 | 3,608 | py | Python | babyname_parser.py | jongtaeklho/swpp-hw1-jongtaeklho | 1f0d2e4d4af985f83cbf3a9ee7548fecee68d346 | [
"Apache-2.0"
] | null | null | null | babyname_parser.py | jongtaeklho/swpp-hw1-jongtaeklho | 1f0d2e4d4af985f83cbf3a9ee7548fecee68d346 | [
"Apache-2.0"
] | null | null | null | babyname_parser.py | jongtaeklho/swpp-hw1-jongtaeklho | 1f0d2e4d4af985f83cbf3a9ee7548fecee68d346 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Modified by Alchan Kim at SNU Software Platform Lab for
# SWPP fall 2020 lecture.
import sys
import re
import os
from functools import wraps
"""Baby Names exercise
Implement the babyname parser class that parses the popular names and their ranks from a html file.
1) At first, you need to implement a decorator that checks whether the html file exists or not.
2) Also, the parser should extract tuples of (rank, male-name, female-name) from the file by using regex.
For writing regex, it's nice to include a copy of the target text for inspiration.
3) Finally, you need to implement `parse` method in `BabynameParser` class that parses the extracted tuples
with the given lambda and return a list of processed results.
"""
class BabynameFileNotFoundException(Exception):
"""
A custom exception for the cases that the babyname file does not exist.
"""
pass
def check_filename_existence(func):
@wraps(func)
def wrapper(*args,**kwargs):
try:
return func(*args,**kwargs)
except FileNotFoundError as pathname :
raise BabynameFileNotFoundException("No such file: {}".format(pathname.filename))
return wrapper
"""
(1 point)
A decorator that catches the non-exiting filename argument and raises a custom `BabynameFileNotFoundException`.
Args:
func: The function to decorate.
Raises:
BabynameFileNotFoundException: if there is no such file while func tries to open a file.
We assume func receives directory path and year to generate a filename to open.
"""
# TODO: Implement this decorator
class BabynameParser:
@check_filename_existence
def __init__(self, dirname, year):
"""
(3 points)
Given directory path and year, extracts the name of a file to open the corresponding file
and a list of the (rank, male-name, female-name) tuples from the file read by using regex.
[('1', 'Michael', 'Jessica'), ('2', 'Christopher', 'Ashley'), ....]
Args:
dirname: The name of the directory where baby name html files are stored
year: The year number. int.
"""
pathname = os.path.join(dirname, "{}.html".format(year))
f=open(pathname,'r')
text=f.read()
self.year=year
regex=re.compile("<td>\w{1,60}</td>")
res=regex.findall(text)
mylist=[(res[0][4:-5],res[1][4:-5],res[2][4:-5])]
i=3
while i <= (len(res)-3):
firs=res[i][4:-5]
secon=res[i+1][4:-5]
thir=res[i+2][4:-5]
mylist.append((firs,secon,thir))
i+=3
self.rank_to_names_tuples = mylist
def parse(self, parsing_lambda):
answer=[]
for i in self.rank_to_names_tuples :
answer.append(parsing_lambda(i))
return answer
"""
(2 points)
Collects a list of babynames parsed from the (rank, male-name, female-name) tuples.
The list must contains all results processed with the given lambda.
Args:
parsing_lambda: The parsing lambda.
It must process an single (string, string, string) tuple and return something.
Returns:
A list of lambda function's output
"""
# TODO: Implement this method
| 33.100917 | 119 | 0.636918 | 1,875 | 0.519678 | 0 | 0 | 1,288 | 0.356984 | 0 | 0 | 2,388 | 0.661863 |
ad322b052b2b88031cf1b45b1db093b00f0d7cf1 | 8,137 | py | Python | tests/test_position_stk_short.py | nwillemse/nctrader | 4754ccdeae465ef4674a829f35fc3f78cf1d3ea4 | [
"MIT"
] | 1 | 2019-11-13T06:38:12.000Z | 2019-11-13T06:38:12.000Z | tests/test_position_stk_short.py | nwillemse/nctrader | 4754ccdeae465ef4674a829f35fc3f78cf1d3ea4 | [
"MIT"
] | null | null | null | tests/test_position_stk_short.py | nwillemse/nctrader | 4754ccdeae465ef4674a829f35fc3f78cf1d3ea4 | [
"MIT"
] | 1 | 2021-05-11T11:24:08.000Z | 2021-05-11T11:24:08.000Z | import unittest
from datetime import datetime
from nctrader.position2 import Position
from nctrader.price_parser import PriceParser
class TestShortRoundTripSPYPosition(unittest.TestCase):
"""
Test a round-trip trade in SPY ETF where the initial
trade is a buy/long of 100 shares of SPY, at a price of
$220.45, with $1.00 commission.
"""
def setUp(self):
"""
Set up the Position object that will store the PnL.
"""
self.position = Position(
"SLD", "SPY", 400,
PriceParser.parse(244.15), PriceParser.parse(4.18),
PriceParser.parse(244.05), PriceParser.parse(244.06),
datetime(2016, 1, 1)
)
print(self.position, '\n')
def test_calculate_round_trip(self):
"""
After the subsequent purchase, carry out two more buys/longs
and then close the position out with two additional sells/shorts.
"""
print("Sell 400 SPY at 244.15 with $4.18 commission. Update market value with bid/ask of 244.05/244.06:")
self.position.update_market_value(
PriceParser.parse(244.05), PriceParser.parse(244.06),
datetime(2016, 1, 2)
)
print(self.position, '\n')
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "SPY")
self.assertEqual(self.position.quantity, 400)
self.assertEqual(self.position.open_quantity, 400)
self.assertEqual(PriceParser.display(self.position.entry_price, 5), (244.15*400 - 4.18) / 400)
self.assertEqual(PriceParser.display(self.position.exit_price, 5), 0)
self.assertEqual(PriceParser.display(self.position.total_commission), 4.18)
self.assertEqual(PriceParser.display(self.position.cost_basis), -1*244.15*400 + 4.18)
self.assertEqual(PriceParser.display(self.position.market_value), -1*244.06*400, 2)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), round((-1*244.06*400) - (-1*244.15*400 + 4.18),2) , 2)
self.assertEqual(PriceParser.display(self.position.realised_pnl), 0.00)
print("Sell 250 SPY at 243.88 with $2.61 commission. Update market value with bid/ask of 243.47/243.48:")
self.position.transact_shares(
"SLD", 250, PriceParser.parse(243.88), PriceParser.parse(2.61)
)
self.position.update_market_value(
PriceParser.parse(243.47), PriceParser.parse(243.48),
datetime(2016, 1, 3)
)
print(self.position, '\n')
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "SPY")
self.assertEqual(self.position.quantity, 400+250)
self.assertEqual(self.position.open_quantity, 400+250)
self.assertEqual(PriceParser.display(self.position.entry_price, 5), round((244.15*400+4.18 + 243.88*250+2.61) / 650, 5))
self.assertEqual(PriceParser.display(self.position.exit_price, 5), 0)
self.assertEqual(PriceParser.display(self.position.total_commission), round(4.18+2.61, 2))
self.assertEqual(PriceParser.display(self.position.cost_basis), round(-1*244.15*400 + 4.18 -1*243.88*250 + 2.61, 2))
self.assertEqual(PriceParser.display(self.position.market_value), -1*243.48*650, 2)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), round((-1*243.48*650) - (-1*244.15*400 + 4.18 -1*243.88*250 + 2.61),2) , 2)
self.assertEqual(PriceParser.display(self.position.realised_pnl), 0.00)
print("Sell 150 SPY at 243.50 with $1.81 commission. Update market value with bid/ask of 243.50/243.51:")
self.position.transact_shares(
"SLD", 150, PriceParser.parse(243.50), PriceParser.parse(1.81)
)
self.position.update_market_value(
PriceParser.parse(243.50), PriceParser.parse(243.51),
datetime(2016, 1, 4)
)
print(self.position, '\n')
print("bots:", self.position.bots)
print("solds:", self.position.solds)
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "SPY")
self.assertEqual(self.position.quantity, 400+250+150)
self.assertEqual(self.position.open_quantity, 400+250+150)
self.assertEqual(PriceParser.display(self.position.entry_price, 5), round((244.15*400+4.18 + 243.88*250+2.61 + 243.50*150+1.81) / 800, 5))
self.assertEqual(PriceParser.display(self.position.exit_price, 5), 0)
self.assertEqual(PriceParser.display(self.position.total_commission), round(4.18+2.61+1.81, 2))
self.assertEqual(PriceParser.display(self.position.cost_basis), round(-1*244.15*400 + 4.18 -1*243.88*250 + 2.61 -1*243.50*150 + 1.81, 2))
self.assertEqual(PriceParser.display(self.position.market_value), -1*243.51*800, 2)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), round((-1*243.51*800) - (-1*244.15*400 + 4.18 -1*243.88*250 + 2.61 -1*243.50*150 + 1.81),2) , 2)
self.assertEqual(PriceParser.display(self.position.realised_pnl), 0.00)
print("Buy 50 SPY at 243.77 with $1.00 commission. Update market value with bid/ask of 243.84/243.86:")
self.position.transact_shares(
"BOT", 50, PriceParser.parse(243.77), PriceParser.parse(1.00)
)
self.position.update_market_value(
PriceParser.parse(243.84), PriceParser.parse(243.86),
datetime(2016, 1, 5)
)
print(self.position, '\n')
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "SPY")
self.assertEqual(self.position.quantity, 400+250+150)
self.assertEqual(self.position.open_quantity, 400+250+150-50)
self.assertEqual(PriceParser.display(self.position.entry_price, 5), round((244.15*400+4.18 + 243.88*250+2.61 + 243.50*150+1.81) / 800, 5))
self.assertEqual(PriceParser.display(self.position.exit_price, 5), (243.77*50+1)/50)
self.assertEqual(PriceParser.display(self.position.total_commission), round(4.18+2.61+1.81+1, 2))
self.assertEqual(PriceParser.display(self.position.cost_basis), round(-1*244.15*350 + 350/400*4.18 -1*243.88*250 + 2.61 -1*243.50*150 + 1.81, 4))
self.assertEqual(PriceParser.display(self.position.market_value), -1*243.86*750, 2)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), round((-1*243.86*750) - (-1*244.15*350 + 350/400*4.18 -1*243.88*250 + 2.61 -1*243.50*150 + 1.81), 4))
self.assertEqual(PriceParser.display(self.position.realised_pnl), 17.4775)
print("Buy 750 SPY at 244.29 with $3.75 commission. Update market value with bid/ask of 243.84/243.86:")
self.position.transact_shares(
"BOT", 750, PriceParser.parse(244.29), PriceParser.parse(3.75)
)
self.position.update_market_value(
PriceParser.parse(243.29), PriceParser.parse(243.29),
datetime(2016, 1, 6)
)
print(self.position, '\n')
print("bots:", self.position.bots)
print("solds:", self.position.solds)
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "SPY")
self.assertEqual(self.position.quantity, 400+250+150)
self.assertEqual(self.position.open_quantity, 400+250+150-50-750)
self.assertEqual(PriceParser.display(self.position.entry_price, 5), round((244.15*400+4.18 + 243.88*250+2.61 + 243.50*150+1.81) / 800, 5))
self.assertEqual(PriceParser.display(self.position.exit_price, 5), round((243.77*50+1 + 244.29*750+3.75)/800, 5))
self.assertEqual(PriceParser.display(self.position.total_commission), round(4.18+2.61+1.81+1+3.75, 2))
self.assertEqual(PriceParser.display(self.position.cost_basis), 0)
self.assertEqual(PriceParser.display(self.position.market_value), 0)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), 0)
self.assertEqual(PriceParser.display(self.position.realised_pnl), -264.35)
if __name__ == "__main__":
unittest.main()
| 54.610738 | 177 | 0.665233 | 7,953 | 0.977387 | 0 | 0 | 0 | 0 | 0 | 0 | 1,028 | 0.126336 |
ad32a03d43ea33a557f3e2f1e814ed32989f10d1 | 1,280 | py | Python | app/services/bgm_tv/bgm_tv.py | renovate-tests/pol | dca9aa4ce34273575d69a140dc3bb1d2ac14ecbf | [
"MIT"
] | 5 | 2019-05-11T05:14:44.000Z | 2019-09-07T10:22:53.000Z | app/services/bgm_tv/bgm_tv.py | renovate-tests/pol | dca9aa4ce34273575d69a140dc3bb1d2ac14ecbf | [
"MIT"
] | 161 | 2019-09-09T07:30:25.000Z | 2022-03-14T19:52:43.000Z | app/services/bgm_tv/bgm_tv.py | renovate-tests/pol | dca9aa4ce34273575d69a140dc3bb1d2ac14ecbf | [
"MIT"
] | 3 | 2019-09-07T13:15:05.000Z | 2020-05-06T04:30:46.000Z | from typing import Optional
import requests
from app.core import config
from app.services.bgm_tv.model import UserInfo, SubjectWithEps
class BgmApi:
def __init__(self, mirror=False):
self.session = requests.Session()
if mirror:
self.host = "mirror.api.bgm.rin.cat"
self.session.headers["user-agent"] = config.REQUEST_SERVICE_USER_AGENT
else:
self.host = "api.bgm.tv"
self.session.headers["user-agent"] = config.REQUEST_USER_AGENT
def url(self, path):
return f"https://{self.host}{path}"
@staticmethod
def error_in_response(data: dict):
return "error" in data
def subject_eps(self, subject_id: int) -> Optional[SubjectWithEps]:
r = self.session.get(self.url(f"/subject/{subject_id}/ep")).json()
if self.error_in_response(r):
return None
return SubjectWithEps.parse_obj(r)
def get_user_info(self, user_id: str) -> Optional[UserInfo]:
r = self.session.get(self.url(f"/user/{user_id}")).json()
if self.error_in_response(r):
return None
return UserInfo.parse_obj(r)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
| 29.767442 | 82 | 0.640625 | 1,140 | 0.890625 | 0 | 0 | 83 | 0.064844 | 0 | 0 | 140 | 0.109375 |
ad32a493dcd06ab200267b7f9637b1ea5be35b2c | 1,164 | py | Python | backend_thread.py | HusseinLezzaik/Stock-Market-Prediction | 03f6b835466ebee9d4ee5ad217c4ed5c57b60a30 | [
"MIT"
] | null | null | null | backend_thread.py | HusseinLezzaik/Stock-Market-Prediction | 03f6b835466ebee9d4ee5ad217c4ed5c57b60a30 | [
"MIT"
] | null | null | null | backend_thread.py | HusseinLezzaik/Stock-Market-Prediction | 03f6b835466ebee9d4ee5ad217c4ed5c57b60a30 | [
"MIT"
] | null | null | null | import time
import numpy as np
import yahoo_fin.stock_info as si
from PyQt5.QtCore import QThread, pyqtSignal
from data_processing.download_data import download_data
class GetLivePrice(QThread):
# 产生信号, 用于传输数据和通知UI进行更改
update_data = pyqtSignal(list)
# 从本地读取etf名称
# Haifei: etfs = np.load('./Data/etfs.npy').tolist()
#Elona:
def run(self):
etfs=['^FCHI',]
while True:
for etf in etfs:
live_price_array = []
live_price = si.get_live_price(etf)
live_price = round(live_price, 2)
live_price_array.append(live_price)
print(live_price)
# 通过emit发送信号
self.update_data.emit(live_price_array)
# 每十秒更新一次数据
time.sleep(30)
class UpdateHistData(QThread):
#Elona Comment : etfs = np.load('./Data/etfs.npy').tolist()
tfs = ['1d', '1wk', '1mo']
update_hist_data_signal = pyqtSignal(str)
def run(self):
#Haifei
#download_data(self.etfs, self.tfs)
#Elona
download_data(['^FCHI',], self.tfs)
self.update_hist_data_signal.emit('finish') | 27.069767 | 63 | 0.603952 | 1,071 | 0.862319 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.272142 |
ad344532336002200ed3df2295a2d40ee97f93bb | 2,183 | py | Python | csp_observer/settings.py | flxn/django-csp-observer | a7848085c94c53c06b523096a384118a1deae3e3 | [
"MIT"
] | 1 | 2020-08-26T13:58:10.000Z | 2020-08-26T13:58:10.000Z | csp_observer/settings.py | flxn/django-csp-observer | a7848085c94c53c06b523096a384118a1deae3e3 | [
"MIT"
] | null | null | null | csp_observer/settings.py | flxn/django-csp-observer | a7848085c94c53c06b523096a384118a1deae3e3 | [
"MIT"
] | null | null | null | from django.conf import settings
from .models import StoredConfig
NAMESPACE = getattr(settings, 'CSP_OBSERVER_NAMESPACE' , 'CSPO')
def ns_getattr(object, name, default=None):
return getattr(settings, '_'.join([NAMESPACE, name]), default)
REPORT_ONLY = ns_getattr(settings, 'REPORT_ONLY', True)
ENABLED_PATHS = ns_getattr(settings, 'ENABLED_PATHS', [
"/"
])
CSP_POLICIES = ns_getattr(settings, 'CSP_POLICIES', {
'default-src': ["'self'"],
'script-src': ["'self'"],
'style-src': ["'self'"],
'connect-src': ["'self'"],
'style-src-attr': ["'unsafe-inline'"],
})
USE_NEW_API = ns_getattr(settings, 'USE_NEW_API', False)
RESULT_WAIT_TIME = ns_getattr(settings, 'RESULT_WAIT_TIME', 10)
USE_SCRIPT_NONCE = ns_getattr(settings, 'USE_SCRIPT_NONCE', True)
USE_STYLE_NONCE = ns_getattr(settings, 'USE_STYLE_NONCE', True)
SESSION_KEEP_DAYS = ns_getattr(settings, 'SESSION_KEEP_DAYS', 14)
IS_MASTER_COLLECTOR = ns_getattr(settings, 'IS_MASTER_COLLECTOR', False)
AUTHORIZED_REPORTERS = ns_getattr(settings, 'AUTHORIZED_REPORTERS', [])
REMOTE_SECRET = ns_getattr(settings, 'REMOTE_SECRET', '')
REMOTE_REPORTING = ns_getattr(settings, 'REMOTE_REPORTING', False)
REMOTE_CSP_OBSERVER_URL = ns_getattr(settings, 'REMOTE_CSP_OBSERVER_URL', "").rstrip('/')
CLIENTUI_VISIBILITY = ns_getattr(settings, 'CLIENTUI_VISIBILITY', 'always')
RULE_UPDATE_FILE = ns_getattr(settings, 'RULE_UPDATE_FILE', 'https://raw.githubusercontent.com/flxn/csp-observer-data/master/rules.json')
RULE_UPDATE_INTERVAL = ns_getattr(settings, 'RULE_UPDATE_INTERVAL', 60 * 60 * 6) # in seconds
VOLUNTARY_DATA_SHARING_URL = 'https://csp-observer-reports.flxn.de'
#
# Database-stored config values
#
KEY_LAST_RULE_UPDATE = 'LAST_RULE_UPDATE'
def get_all_stored():
return StoredConfig.objects.all()
def delete_all_stored():
StoredConfig.objects.all().delete()
def put_stored(key, value):
obj, created = StoredConfig.objects.get_or_create(key=str(key))
obj.value = value
obj.save()
def get_stored(key, default=None):
try:
obj = StoredConfig.objects.get(key=key)
except StoredConfig.DoesNotExist:
return default
else:
return str(obj.value)
| 33.584615 | 137 | 0.737975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 631 | 0.289052 |
ad357f0d42f5dc8811eeedd1a1ffd8d72bef3528 | 585 | py | Python | examples/gemini examples/basic_private_api_usage.py | wiqram/robin_stocks | 4e5dcf6515659e7f3f571ffa5e18e44afe3ab8a5 | [
"MIT"
] | null | null | null | examples/gemini examples/basic_private_api_usage.py | wiqram/robin_stocks | 4e5dcf6515659e7f3f571ffa5e18e44afe3ab8a5 | [
"MIT"
] | null | null | null | examples/gemini examples/basic_private_api_usage.py | wiqram/robin_stocks | 4e5dcf6515659e7f3f571ffa5e18e44afe3ab8a5 | [
"MIT"
] | null | null | null | ''' The most basic way to use the Private API. I recommend renaming the file .env
to .env and filling out the gemini api key information. The dotenv package loads the .env (or .env)
file and the os.environ() function reads the values from the file.ß
'''
import os
import robin_stocks.gemini as g
from dotenv import load_dotenv
##
ticker = "btcusd"
##
g.login(os.environ['gemini_account_key'], os.environ['gemini_account_secret'])
my_trades, error = g.get_trades_for_crypto(ticker, jsonify=True)
if error:
print("oh my an error")
else:
print("no errors here")
print(my_trades)
| 30.789474 | 99 | 0.748718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.581911 |
ad35d6c682f13b8a035b9d728de67d944b584d36 | 420 | py | Python | cozens_circles_beams.py | hattfe/Math | 7957c31a830071195d11a206ce2eea9f21d62f98 | [
"MIT"
] | null | null | null | cozens_circles_beams.py | hattfe/Math | 7957c31a830071195d11a206ce2eea9f21d62f98 | [
"MIT"
] | null | null | null | cozens_circles_beams.py | hattfe/Math | 7957c31a830071195d11a206ce2eea9f21d62f98 | [
"MIT"
] | null | null | null | import turtle
t = turtle.Pen()
t.speed(10)
x1 = []
y1 = []
for i in range(0, 36):
t.circle(200,10)
x, y =(t.pos())
x1.append(int(x))
y1.append(int(y))
print(x1, y1)
x11 = x1[0]
y11 = y1[0]
def basagit(node, adım):
t.penup()
t.goto(x1[node],y1[node])
t.pendown()
t.goto(x1[node+node*adım], y1[node+node*adım])
for n in range(len(x1)):
basagit(n,2)
| 15.555556 | 50 | 0.519048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ad3655bbbb52c012300bf70990dabf83a7394946 | 1,808 | py | Python | mysite/stocktrader/models.py | bennett39/stocktrader | c56a72f7a22367241d7126f8ed5b17f8d50a93db | [
"MIT"
] | 1 | 2020-10-09T03:19:10.000Z | 2020-10-09T03:19:10.000Z | mysite/stocktrader/models.py | bennett39/stocktrader | c56a72f7a22367241d7126f8ed5b17f8d50a93db | [
"MIT"
] | null | null | null | mysite/stocktrader/models.py | bennett39/stocktrader | c56a72f7a22367241d7126f8ed5b17f8d50a93db | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.validators import MinValueValidator
# Create your models here.
class Profile(models.Model):
""" Extend built-in Django User model with cash value """
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
)
cash = models.FloatField(
default=10000,
validators=[MinValueValidator(0)],
)
def __str__(self):
return f'{self.user.username}'
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
""" When a new user is created, also create a new Profile """
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
""" Save the OneToOne linked Profile on the User instance """
instance.profile.save()
class Stock(models.Model):
symbol = models.CharField(
max_length=10,
unique=True,
)
name = models.CharField(
max_length=80,
)
def __str__(self):
return f'{self.symbol} - {self.name[:10]}'
class Transaction(models.Model):
""" Stores an append-only list of transactions. """
user = models.ForeignKey(
'Profile',
on_delete=models.CASCADE,
related_name='transactions',
)
stock = models.ForeignKey(
'Stock',
on_delete=models.CASCADE,
)
quantity = models.FloatField()
price = models.FloatField()
time = models.DateTimeField(
auto_now_add=True,
)
def __str__(self):
buy_sell = 'BUY' if self.quantity > 0 else 'SELL'
return f'{self.user} - {buy_sell} {self.stock}'
| 25.828571 | 65 | 0.654867 | 1,155 | 0.638827 | 0 | 0 | 401 | 0.221792 | 0 | 0 | 395 | 0.218473 |
ad375040d6b6884d5223dc6bfe23eb549e651e7b | 322 | py | Python | sql/mysql_demo.py | garyhu1/first-python | 01731d419a64aec9683b450d0e8e233f4b5cc9a7 | [
"Apache-2.0"
] | 1 | 2019-09-03T11:42:38.000Z | 2019-09-03T11:42:38.000Z | sql/mysql_demo.py | garyhu1/first-python | 01731d419a64aec9683b450d0e8e233f4b5cc9a7 | [
"Apache-2.0"
] | null | null | null | sql/mysql_demo.py | garyhu1/first-python | 01731d419a64aec9683b450d0e8e233f4b5cc9a7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'Mysql 连接数据库'
__author__ = 'garyhu'
import mysql.connector;
# 数据库连接
conn = mysql.connector.connect(user='root',password='****',database='websites');
s = conn.cursor();
s.execute('select * from users where id = %s',(7,))
value = s.fetchall();
print(value);
s.close();
| 15.333333 | 80 | 0.642857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.438596 |
ad379fc8f1e12b0d1ebe0428591ed665c7bd1cdd | 1,017 | py | Python | plugins/remind/plugin.py | CrushAndRun/Automata | fb16f2e4e985e22adcd244b8a81387f2678f68be | [
"MIT"
] | null | null | null | plugins/remind/plugin.py | CrushAndRun/Automata | fb16f2e4e985e22adcd244b8a81387f2678f68be | [
"MIT"
] | null | null | null | plugins/remind/plugin.py | CrushAndRun/Automata | fb16f2e4e985e22adcd244b8a81387f2678f68be | [
"MIT"
] | null | null | null | from twisted.internet import reactor
class RemindPlugin(object):
def remind(self, cardinal, user, channel, msg):
message = msg.split(None, 2)
if len(message) < 3:
cardinal.sendMsg(channel, "Syntax: .remind <minutes> <message>")
return
try:
reactor.callLater(60 * int(message[1]), cardinal.sendMsg, user.group(1), message[2])
cardinal.sendMsg(channel, "%s: You will be reminded in %d minutes." % (user.group(1), int(message[1])))
except ValueError:
cardinal.sendMsg(channel, "You did not give a valid number of minutes to be reminded in.")
except AssertionError:
cardinal.sendMsg(channel, "You did not give a valid number of minutes to be reminded in.")
remind.commands = ['remind']
remind.help = ["Sets up a reminder, where the bot will message the user after a predetermined time.",
"Syntax: .remind <minutes> <message>"]
def setup():
return RemindPlugin()
| 42.375 | 115 | 0.627335 | 938 | 0.922321 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.328417 |
ad37a81771adfbf9c4a7bbfe30efdf3df704ada9 | 1,436 | py | Python | kkutil/loader/loader.py | kaka19ace/kkutils | 1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1 | [
"MIT"
] | 1 | 2015-12-13T18:42:52.000Z | 2015-12-13T18:42:52.000Z | kkutil/loader/loader.py | kaka19ace/kkutil | 1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1 | [
"MIT"
] | null | null | null | kkutil/loader/loader.py | kaka19ace/kkutil | 1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import threading
import logging
class Loader(object):
_config = NotImplemented
_config_cache_map = {}
_lock = threading.RLock()
@classmethod
def set_config(cls, config):
"""
:param config: kkutils.config.Config instance
"""
with cls._lock:
cls._config = config
@classmethod
def set_logger(cls, logger):
with cls._lock:
cls.logger = logger
@classmethod
def _load_by_key(cls, key):
if cls._config_cache_map.get(key, None) is None:
with cls._lock:
if cls._config_cache_map.get(key, None) is None:
cls._config_cache_map[key] = cls._config.get_config_data(key)
return cls._config_cache_map[key]
@classmethod
def load_config(cls, key, field=None):
"""
just support two level config
:param key: first level
:param field: if not None: just want second level config
:return: dict about config
"""
if field is None:
return cls._load_by_key()
entry_config = cls._load_by_key(key)
sub_config = entry_config.get(field)
if not sub_config:
raise AttributeError("could not get sub config: key={0} field={1}".format(key, field))
return sub_config
Loader.set_logger(logger=logging.getLogger(Loader.__name__))
| 25.192982 | 98 | 0.608635 | 1,288 | 0.896936 | 0 | 0 | 1,157 | 0.80571 | 0 | 0 | 345 | 0.240251 |
ad3a349f295eb61db842675383d6912392309c0a | 3,879 | py | Python | bdt2cpp/XGBoostParser.py | bixel/bdt2cpp | bffd94d777181a3a3bba81a8173ca57ead65c27c | [
"MIT"
] | 3 | 2017-10-01T15:25:10.000Z | 2021-04-10T18:42:19.000Z | bdt2cpp/XGBoostParser.py | bixel/bdt2cpp | bffd94d777181a3a3bba81a8173ca57ead65c27c | [
"MIT"
] | 3 | 2020-02-25T17:02:56.000Z | 2021-05-04T06:49:49.000Z | bdt2cpp/XGBoostParser.py | bixel/bdt2cpp | bffd94d777181a3a3bba81a8173ca57ead65c27c | [
"MIT"
] | null | null | null | import re
from .Node import Node
class XGBoostNode(Node):
FLOAT_REGEX = '[+-]?\d+(\.\d+)?([eE][+-]?\d+)?'
BRANCH_REGEX = re.compile(f'(?P<branch>\d+):\[(?P<feature>\w+)(?P<comp><)(?P<value>{FLOAT_REGEX})\]')
LEAF_REGEX = re.compile(f'(?P<leaf>\d+):leaf=(?P<value>{FLOAT_REGEX})')
FEATURE_REGEX = re.compile('\w(?P<id>\d+)')
def __init__(self, parent=None, line='', feature_index_dict=None):
super().__init__(parent=parent)
# propagate any feature index dict
self.feature_index_dict = None
if feature_index_dict or parent:
self.feature_index_dict = feature_index_dict or parent.feature_index_dict
match_leaf = self.LEAF_REGEX.search(line)
if match_leaf:
self.weight = float(match_leaf.groupdict().get('value'))
self.final = True
else:
self.weight = 0
self.final = False
match_branch = self.BRANCH_REGEX.search(line)
if match_branch:
self.cut_value = float(match_branch.groupdict().get('value'))
self.feature = match_branch.groupdict().get('feature')
if self.feature_index_dict:
self.feature_index = self.feature_index_dict[self.feature]
else:
feature_match = self.FEATURE_REGEX.search(self.feature)
if not feature_match:
raise ValueError(f'Feature {self.feature} needs to be '
'matched with its correct position in the feature '
'value vector. Please give a list of feature names'
' in the correct order with `--feature-names`.')
self.feature_index = feature_match.groupdict().get('id')
else:
self.cut_value = None
self.feature = None
self.feature_index = None
def get_feature_names(lines):
features = set()
for l in lines:
match_branch = XGBoostNode.BRANCH_REGEX.search(l)
if match_branch:
features.add(match_branch.groupdict().get('feature'))
return features
def parse_model(filename, feature_names):
trees = []
with open(filename, 'r') as f:
lines = f.readlines()
# build the feature name dict if neccessary
if feature_names:
# check that the feature names are in line with the names found in
# the tree
if not set(feature_names) >= get_feature_names(lines):
raise ValueError('The given feature names do not properly describe'
'the features found in the model. Please check that your '
'argument for `--feature-names` is a proper superset of the '
'feature names used in the model.\nThese features have been '
f'found in the model:\n{" ".join(get_feature_names(lines))}')
feature_index_dict = {name: i for i, name in enumerate(feature_names)}
else:
feature_index_dict = None
node = None
for i, line in enumerate(lines):
# save finished tree
if line.startswith('booster'):
if node:
trees.append(node.root)
node = None
continue
# start a new tree
if node is None:
node = XGBoostNode(line=line, feature_index_dict=feature_index_dict)
continue
# move upwards if a leaf is reached
while node.final or (node.parent and node.left and node.right):
node = node.parent
# fill left and right leaf
if not node.left:
node.left = XGBoostNode(parent=node, line=line)
node = node.left
continue
if not node.right:
node.right = XGBoostNode(parent=node, line=line)
node = node.right
continue
trees.append(node.root)
return trees
| 35.916667 | 105 | 0.588812 | 1,841 | 0.474607 | 0 | 0 | 0 | 0 | 0 | 0 | 947 | 0.244135 |
ad3b4e7ac7bf858225d06af83d3f9f52e0bd5d3f | 2,896 | py | Python | Backend/ChatBot/model.py | paucutrina/RareHacks_Chatbot | c7ecfef693bf2f477d090629d6eecf7b0bf57872 | [
"MIT"
] | null | null | null | Backend/ChatBot/model.py | paucutrina/RareHacks_Chatbot | c7ecfef693bf2f477d090629d6eecf7b0bf57872 | [
"MIT"
] | null | null | null | Backend/ChatBot/model.py | paucutrina/RareHacks_Chatbot | c7ecfef693bf2f477d090629d6eecf7b0bf57872 | [
"MIT"
] | null | null | null | import pickle
import json
import random
# NLP stuff
import nltk
# nltk.download('punkt')
from nltk.stem.lancaster import LancasterStemmer
# TensorFlow stuff
import numpy as np
import tflearn
import tensorflow as tf
import os
import time
stemmer = LancasterStemmer()
intents_dict = dict()
folder_entities = "entities/"
for file_name in os.listdir(folder_entities):
name_entity = file_name.split(".txt")[0]
print(name_entity)
intents_dict[name_entity] = list()
file = open(folder_entities + file_name, "r")
for line in file:
line = line.strip()
if line:
print(line)
intents_dict[name_entity].append(line)
words = []
classes = []
documents = []
for intent in intents_dict:
classes.append(intent)
for pattern in intents_dict[intent]:
# tokenize each word in the sentence
w = nltk.word_tokenize(pattern)
# add to our word list
words.extend(w)
# add to documents in our corpus
documents.append((w, intent))
# add to our classes list
# stem and lower each word and remove duplicates
words = [stemmer.stem(w.lower()) for w in words]
words = sorted(list(set(words)))
# remove duplicates
classes = sorted(list(set(classes)))
# create training data
training = []
output = []
# create an empty array for our output
output_empty = [0] * len(classes)
# training set, bag of words for each sentence
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stem each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
# create our bag of words array
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
# output is a '0' for each tag and '1' for current tag
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)
# create train and test lists
train_x = list(training[:, 0])
train_y = list(training[:, 1])
# reset underlying graph data
tf.reset_default_graph()
# Build neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)
# Define model and setup tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training (apply gradient descent algorithm)
model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)
model.save('model.tflearn')
pickle.dump({
'words': words,
'classes': classes,
'train_x': train_x,
'train_y': train_y
}, open('training_data', 'wb'))
| 24.965517 | 74 | 0.696133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 835 | 0.288329 |
ad3bbfaab9acdd38aa4c59baaeda66231af95942 | 3,658 | py | Python | .tmpl/python_scripts/batch_cmd_py/batch_cmd.py | githeim/wh_tmpl | cb869cb241fbe4b66bb0ed5b08a4a4e2dad0ed89 | [
"MIT"
] | 3 | 2020-10-08T11:32:16.000Z | 2021-08-11T09:47:29.000Z | .tmpl/python_scripts/batch_cmd_py/batch_cmd.py | githeim/wh_tmpl | cb869cb241fbe4b66bb0ed5b08a4a4e2dad0ed89 | [
"MIT"
] | null | null | null | .tmpl/python_scripts/batch_cmd_py/batch_cmd.py | githeim/wh_tmpl | cb869cb241fbe4b66bb0ed5b08a4a4e2dad0ed89 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import subprocess
import os
import sys
import unittest
import enum
import datetime
def Get_Parent_Dir():
return os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
def Get_Current_Dir():
return os.path.abspath(os.path.dirname(__file__))
# :x: example
def Get_Working_Dir():
return Get_Current_Dir()+"/testdir"
def JobList():
return [
# working directory # job_cmd
[ Get_Current_Dir(), 'mkdir ./testdir' ],
[ Get_Current_Dir(), 'touch ./testdir/fileA' ],
[ Get_Current_Dir(), 'touch ./testdir/fileB' ],
[ Get_Current_Dir(), 'touch ./testdir/fileC' ],
[ Get_Working_Dir(), 'ls -l' ],
[ Get_Working_Dir(), 'rm file*' ],
[ Get_Current_Dir(), 'rm -rf testdir' ],
# test cases/runners
]
class JobOffset():
dir = 0 # directory
job_cmd = 1 # job command
def DoJobList( JobList ):
StepCnt = 0
CurrentDir = os.getcwd()
ret = True
# The value For printing report
strJobReport ="Job Report\n"
strJobLogReport = "Log Report\n"
for JobItem in JobList:
work_dir = JobItem[JobOffset.dir]
strJobCmd = JobItem[JobOffset.job_cmd]
StepCnt = StepCnt+1
strJobLogReport= strJobLogReport+"\n"+\
"============================================\n"+\
"Step["+str(StepCnt)+"] Work Dir ["+work_dir+"]\n"+\
"job cmd ["+str(strJobCmd)+"]\n"+\
"============================================\n"
print("Job #"+str(StepCnt))
print("check Working Directory ; "+work_dir)
if os.path.isdir(work_dir) == False:
print("No working directory ["+work_dir+"]")
strJobLogReport= strJobLogReport+"No working directory ["+work_dir+"]\n"
strJobSuccess = "Fail"
ret = False
else :
# :x: Change working directory
os.chdir(work_dir)
print("execution job command ; "+strJobCmd)
print("processing")
# Get local systems environment variable
# without this value, it can't get the environment values
# ex) $PATH $HOME...
local_env = os.environ.copy()
output=subprocess.run(strJobCmd, shell=True, universal_newlines=True,
env=local_env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
print("processing Done")
strJobLogReport= strJobLogReport+output.stdout+"\n"
if ( output.returncode !=0 ):
print("Job Error ; on ["+str(work_dir)+"]");
strJobSuccess = "Fail"
ret = False
else:
print("Job Done ; ["+str(work_dir)+"]");
strJobSuccess = "Success"
strJobReport= strJobReport +\
"Step[%04d] ;[%7s],[%s],[%s]\n" %(StepCnt,strJobSuccess,work_dir,strJobCmd)
# :x: back to original working directory
os.chdir(CurrentDir)
return (ret,strJobReport,strJobLogReport)
def main() :
ret = DoJobList(JobList())
bSuccess = ret[0]
strJobReport = ret[1]
strJobLogReport = ret[2]
print("\n\n\n==============\n"+strJobReport+"\n")
strJobLogFileName ='job_log_'+datetime.datetime.now().strftime('%m_%d_%H_%M_%S')+'.txt'
f = open ("./"+strJobLogFileName,'w')
f.write(strJobLogReport)
f.close()
print ("Job log file ; "+strJobLogFileName+"\n")
if (bSuccess != True):
print("Job Error Check Report ; "+strJobLogFileName)
return -1
print("All Job success")
return 0
# Write Unit Test Here
class general_unit_test(unittest.TestCase):
def setUp(self):
print("setup")
def tearDown(self):
print("teardown")
def test_vundle_install(self):
self.assertTrue(True)
if __name__ == '__main__':
print ("chk")
exit( main())
| 26.128571 | 95 | 0.603062 | 268 | 0.073264 | 0 | 0 | 0 | 0 | 0 | 0 | 1,055 | 0.288409 |
ad3e3b47f7469d7f8af94e97611d088863a579cd | 2,753 | py | Python | {{cookiecutter.repo_name}}/src/evaluate.py | nussl/cookiecutter | 5df8512592778ea7155b05e3e4b54676227968b0 | [
"MIT"
] | null | null | null | {{cookiecutter.repo_name}}/src/evaluate.py | nussl/cookiecutter | 5df8512592778ea7155b05e3e4b54676227968b0 | [
"MIT"
] | null | null | null | {{cookiecutter.repo_name}}/src/evaluate.py | nussl/cookiecutter | 5df8512592778ea7155b05e3e4b54676227968b0 | [
"MIT"
] | null | null | null | import nussl
import os
import json
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import tqdm
import gin
from .helpers import build_dataset
import logging
@gin.configurable
def evaluate(output_folder, separation_algorithm, eval_class,
block_on_gpu, num_workers, seed, debug=False):
nussl.utils.seed(seed)
logging.info(gin.operative_config_str())
with gin.config_scope('test'):
test_dataset = build_dataset()
results_folder = os.path.join(output_folder, 'results')
os.makedirs(results_folder, exist_ok=True)
set_model_to_none = False
if block_on_gpu:
# make an instance that'll be used on GPU
# has an empty audio signal for now
gpu_algorithm = separation_algorithm(
nussl.AudioSignal(), device='cuda')
set_model_to_none = True
def forward_on_gpu(audio_signal):
# set the audio signal of the object to this item's mix
gpu_algorithm.audio_signal = audio_signal
if hasattr(gpu_algorithm, 'forward'):
gpu_output = gpu_algorithm.forward()
elif hasattr(gpu_algorithm, 'extract_features'):
gpu_output = gpu_algorithm.extract_features()
return gpu_output
pbar = tqdm.tqdm(total=len(test_dataset))
def separate_and_evaluate(item, gpu_output):
if set_model_to_none:
separator = separation_algorithm(item['mix'], model_path=None)
else:
separator = separation_algorithm(item['mix'])
estimates = separator(gpu_output)
source_names = sorted(list(item['sources'].keys()))
sources = [item['sources'][k] for k in source_names]
# other arguments come from gin config
evaluator = eval_class(sources, estimates)
scores = evaluator.evaluate()
output_path = os.path.join(
results_folder, f"{item['mix'].file_name}.json")
with open(output_path, 'w') as f:
json.dump(scores, f, indent=2)
if debug:
estimate_folder = output_path.replace(
'results', 'audio').replace('json', '')
os.makedirs(estimate_folder, exist_ok=True)
for i, e in enumerate(estimates):
audio_path = os.path.join(estimate_folder, f's{i}.wav')
e.write_audio_to_file(audio_path)
pbar.update(1)
pool = ThreadPoolExecutor(max_workers=num_workers)
for i in range(len(test_dataset)):
item = test_dataset[i]
gpu_output = forward_on_gpu(item['mix'])
if i == 0:
separate_and_evaluate(item, gpu_output)
continue
pool.submit(separate_and_evaluate, item, gpu_output)
pool.shutdown(wait=True)
| 35.753247 | 74 | 0.648384 | 0 | 0 | 0 | 0 | 2,572 | 0.934254 | 0 | 0 | 319 | 0.115874 |
ad3e9b467042b664880d401cd499df829e241e47 | 260 | py | Python | ballpark/cashflows/admin.py | keyvanm/ballpark | 90ca6ac355319f159fa0836f30df487ee8e72ddd | [
"MIT"
] | null | null | null | ballpark/cashflows/admin.py | keyvanm/ballpark | 90ca6ac355319f159fa0836f30df487ee8e72ddd | [
"MIT"
] | null | null | null | ballpark/cashflows/admin.py | keyvanm/ballpark | 90ca6ac355319f159fa0836f30df487ee8e72ddd | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(GenericOnetimeIncome)
admin.site.register(GenericRecurringIncome)
admin.site.register(GenericOnetimeExpense)
admin.site.register(GenericRecurringExpense)
| 26 | 44 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.107692 |