repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
gauchm/mlstream
|
mlstream/scaling.py
|
from pathlib import Path
from typing import List
import pandas as pd
import numpy as np
from .datautils import (load_forcings_lumped,
load_discharge,
load_static_attributes)
class Scaler:
def __init__(self):
self.scalers = {}
def normalize(self, feature: np.ndarray) -> np.ndarray:
return (feature - self.scalers["mean"]) / self.scalers["std"]
def rescale(self, feature: np.ndarray) -> np.ndarray:
return (feature * self.scalers["std"]) + self.scalers["mean"]
class InputScaler(Scaler):
def __init__(self, data_root: Path, basins: List,
start_date: pd.Timestamp, end_date: pd.Timestamp,
forcing_vars: List = None):
super().__init__()
all_forcings = pd.DataFrame()
print("Loading forcings for input scaler.")
basin_forcings = load_forcings_lumped(data_root, basins)
for basin, forcing in basin_forcings.items():
if forcing_vars is not None:
all_forcings = all_forcings.append(forcing.loc[start_date:end_date, forcing_vars])
else:
all_forcings = all_forcings.append(forcing.loc[start_date:end_date])
self.scalers["mean"] = all_forcings.mean(axis=0).values
stds = all_forcings.std(axis=0).values
stds[stds == 0] = 1 # avoid divide-by-zero
self.scalers["std"] = stds
class OutputScaler(Scaler):
def __init__(self, data_root: Path, basins: List,
start_date: pd.Timestamp, end_date: pd.Timestamp):
super().__init__()
print("Loading streamflow for output scaler.")
all_outputs = load_discharge(data_root, basins)
all_outputs = all_outputs[(all_outputs['date'] >= start_date)
& (all_outputs['date'] <= end_date)]
self.scalers["mean"] = all_outputs["qobs"].mean()
self.scalers["std"] = all_outputs["qobs"].std()
class StaticAttributeScaler(Scaler):
def __init__(self, db_path: Path, basins: List, variable_name: str):
super().__init__()
statics = load_static_attributes(db_path, basins)[variable_name]
self.scalers["mean"] = statics.mean()
# avoid divide-by-zero
self.scalers["std"] = statics.std() if statics.std() != 0 else 1
|
larryyin/rectangular
|
rectangular.py
|
<filename>rectangular.py
#!/usr/bin/env python
from flask import Flask, jsonify, render_template, request
app = Flask(__name__)
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
from osgeo.gdalnumeric import *
from osgeo.gdalconst import *
import json
import numpy as np
import pandas as pd
from scipy.interpolate import griddata
import os
import re
import shutil
from subprocess import call
from scipy import stats
import time
def center2corners(CENTER):
CORNERS = np.zeros([CENTER.shape[0]+1,CENTER.shape[1]+1])
I_diff_half = np.diff(CENTER,axis=0)*.5
J_diff_half = np.diff(CENTER,axis=1)*.5
I_interim = CENTER[:-1,:]+I_diff_half
J_interim_diff_half = np.diff(I_interim,axis=1)*.5
CORNERS[1:-1,1:-1] = I_interim[:,:-1]+J_interim_diff_half
# Sides
I_W_interim = CENTER[0,:]-I_diff_half[0,:]
J_W_diff_half = np.diff(I_W_interim)*.5
CORNERS[0,1:-1] = I_W_interim[:-1]+J_W_diff_half
I_E_interim = CENTER[-1,:]+I_diff_half[-1,:]
J_E_diff_half = np.diff(I_E_interim)*.5
CORNERS[-1,1:-1] = I_E_interim[:-1]+J_E_diff_half
I_S_interim = CENTER[:,0]-J_diff_half[:,0]
J_S_diff_half = np.diff(I_S_interim)*.5
CORNERS[1:-1,0] = I_S_interim[:-1]+J_S_diff_half
I_N_interim = CENTER[:,-1]+J_diff_half[:,-1]
J_N_diff_half = np.diff(I_N_interim)*.5
CORNERS[1:-1,-1] = I_N_interim[:-1]+J_N_diff_half
# Corners
CORNERS[0,0] = CENTER[0,0]-I_diff_half[0,0]-J_diff_half[0,0]
CORNERS[-1,0] = CENTER[-1,0]+I_diff_half[-1,0]-J_diff_half[-1,0]
CORNERS[0,-1] = CENTER[0,-1]-I_diff_half[0,-1]+J_diff_half[0,-1]
CORNERS[-1,-1] = CENTER[-1,-1]+I_diff_half[-1,-1]+J_diff_half[-1,-1]
return CORNERS
def dist_greatcircle(lat1,lon1,lat2,lon2):
R = 6371000 # m
latrad1 = np.deg2rad(lat1)
latrad2 = np.deg2rad(lat2)
dLat = latrad2-latrad1
dLon = np.deg2rad(lon2-lon1)
a = (np.sin(dLat/2) * np.sin(dLat/2) +
np.cos(latrad1) * np.cos(latrad2) *
np.sin(dLon/2) * np.sin(dLon/2))
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))
return R * c
def bearing(lat1,lon1,lat2,lon2):
latrad1 = np.deg2rad(lat1)
latrad2 = np.deg2rad(lat2)
lonrad1 = np.deg2rad(lon1)
lonrad2 = np.deg2rad(lon2)
y = np.sin(lonrad2-lonrad1) * np.cos(latrad2)
x = (np.cos(latrad1)*np.sin(latrad2)-
np.sin(latrad1)*np.cos(latrad2)*np.cos(lonrad2-lonrad1))
return np.rad2deg(np.arctan2(y, x))
def latlonlen(latdeg):
lat = np.deg2rad(latdeg)
m1 = 111132.92;
m2 = -559.82;
m3 = 1.175;
m4 = -0.0023;
p1 = 111412.84;
p2 = -93.5;
p3 = 0.118;
latlen = m1+(m2*np.cos(2*lat))+(m3*np.cos(4*lat))+(m4*np.cos(6*lat));
lonlen = (p1*np.cos(lat))+(p2*np.cos(3*lat))+(p3*np.cos(5*lat));
return (latlen,lonlen) # m
def _det(xvert, yvert):
'''Compute twice the area of the triangle defined by points with using
determinant formula.
Input parameters:
xvert -- A vector of nodal x-coords (array-like).
yvert -- A vector of nodal y-coords (array-like).
Output parameters:
Twice the area of the triangle defined by the points.
Notes:
_det is positive if points define polygon in anticlockwise order.
_det is negative if points define polygon in clockwise order.
_det is zero if at least two of the points are concident or if
all points are collinear.
'''
xvert = np.asfarray(xvert)
yvert = np.asfarray(yvert)
x_prev = np.concatenate(([xvert[-1]], xvert[:-1]))
y_prev = np.concatenate(([yvert[-1]], yvert[:-1]))
return np.sum(yvert * x_prev - xvert * y_prev, axis=0)
class Polygon:
'''Polygon object.
Input parameters:
x -- A sequence of nodal x-coords.
y -- A sequence of nodal y-coords.
'''
def __init__(self, x, y):
if len(x) != len(y):
raise IndexError('x and y must be equally sized.')
self.x = np.asfarray(x)
self.y = np.asfarray(y)
# Closes the polygon if were open
x1, y1 = x[0], y[0]
xn, yn = x[-1], y[-1]
if x1 != xn or y1 != yn:
self.x = np.concatenate((self.x, [x1]))
self.y = np.concatenate((self.y, [y1]))
# Anti-clockwise coordinates
if _det(self.x, self.y) < 0:
self.x = self.x[::-1]
self.y = self.y[::-1]
def is_inside(self, xpoint, ypoint, smalld=1e-12):
'''Check if point is inside a general polygon.
Input parameters:
xpoint -- The x-coord of the point to be tested.
ypoint -- The y-coords of the point to be tested.
smalld -- A small float number.
xpoint and ypoint could be scalars or array-like sequences.
Output parameters:
mindst -- The distance from the point to the nearest point of the
polygon.
If mindst < 0 then point is outside the polygon.
If mindst = 0 then point in on a side of the polygon.
If mindst > 0 then point is inside the polygon.
Notes:
An improved version of the algorithm of Nordbeck and Rydstedt.
REF: SLOAN, S.W. (1985): A point-in-polygon program. Adv. Eng.
Software, Vol 7, No. 1, pp 45-47.
'''
xpoint = np.asfarray(xpoint)
ypoint = np.asfarray(ypoint)
# Scalar to array
if xpoint.shape is tuple():
xpoint = np.array([xpoint], dtype=float)
ypoint = np.array([ypoint], dtype=float)
scalar = True
else:
scalar = False
# Check consistency
if xpoint.shape != ypoint.shape:
raise IndexError('x and y has different shapes')
# If snear = True: Dist to nearest side < nearest vertex
# If snear = False: Dist to nearest vertex < nearest side
snear = np.ma.masked_all(xpoint.shape, dtype=bool)
# Initialize arrays
mindst = np.ones_like(xpoint, dtype=float) * np.inf
j = np.ma.masked_all(xpoint.shape, dtype=int)
x = self.x
y = self.y
n = len(x) - 1 # Number of sides/vertices defining the polygon
# Loop over each side defining polygon
for i in range(n):
d = np.ones_like(xpoint, dtype=float) * np.inf
# Start of side has coords (x1, y1)
# End of side has coords (x2, y2)
# Point has coords (xpoint, ypoint)
x1 = x[i]
y1 = y[i]
x21 = x[i + 1] - x1
y21 = y[i + 1] - y1
x1p = x1 - xpoint
y1p = y1 - ypoint
# Points on infinite line defined by
# x = x1 + t * (x1 - x2)
# y = y1 + t * (y1 - y2)
# where
# t = 0 at (x1, y1)
# t = 1 at (x2, y2)
# Find where normal passing through (xpoint, ypoint) intersects
# infinite line
t = -(x1p * x21 + y1p * y21) / (x21 ** 2 + y21 ** 2)
tlt0 = t < 0
tle1 = (0 <= t) & (t <= 1)
# Normal intersects side
d[tle1] = ((x1p[tle1] + t[tle1] * x21) ** 2 +
(y1p[tle1] + t[tle1] * y21) ** 2)
# Normal does not intersects side
# Point is closest to vertex (x1, y1)
# Compute square of distance to this vertex
d[tlt0] = x1p[tlt0] ** 2 + y1p[tlt0] ** 2
# Store distances
mask = d < mindst
mindst[mask] = d[mask]
j[mask] = i
# Point is closer to (x1, y1) than any other vertex or side
snear[mask & tlt0] = False
# Point is closer to this side than to any other side or vertex
snear[mask & tle1] = True
if np.ma.count(snear) != snear.size:
raise IndexError('Error computing distances')
mindst **= 0.5
# Point is closer to its nearest vertex than its nearest side, check if
# nearest vertex is concave.
# If the nearest vertex is concave then point is inside the polygon,
# else the point is outside the polygon.
jo = j.copy()
jo[j == 0] -= 1
area = _det([x[j + 1], x[j], x[jo - 1]], [y[j + 1], y[j], y[jo - 1]])
mindst[~snear] = np.copysign(mindst, area)[~snear]
# Point is closer to its nearest side than to its nearest vertex, check
# if point is to left or right of this side.
# If point is to left of side it is inside polygon, else point is
# outside polygon.
area = _det([x[j], x[j + 1], xpoint], [y[j], y[j + 1], ypoint])
mindst[snear] = np.copysign(mindst, area)[snear]
# Point is on side of polygon
mindst[np.fabs(mindst) < smalld] = 0
# If input values were scalar then the output should be too
if scalar:
mindst = float(mindst)
return mindst
def kml2polygon(kml,extentW=None,extentS=None,extentE=None,extentN=None):
with open(kml,'r') as f_poly:
text_all = f_poly.read().replace('\n', '')
item = text_all.split("</outerBoundaryIs>")[0]
if "<outerBoundaryIs>" in item:
testStr = item[item.find("<outerBoundaryIs>")+len("<outerBoundaryIs>"):]
if ',0.' not in testStr:
isNear0 = 0
if ',0' in testStr:
is3D = 1
else:
is3D = 0
else:
isNear0 = 1
if ',0' in testStr:
is3D = 1
else:
is3D = 0
outer_block = []
if (isNear0==0) and (is3D==1):
stripper = re.compile(r'[^\d.,-;]+')
for item in text_all.split("</outerBoundaryIs>"):
if "<outerBoundaryIs>" in item:
block = (stripper.sub('', item[item.find("<outerBoundaryIs>")+
len("<outerBoundaryIs>"):].replace(',0',';'))).rstrip('/').rstrip(';')
outer_block.append(block)
elif (isNear0==1) and (is3D==1):
stripper = re.compile(r'[^\d.,-;]+')
for item in text_all.split("</outerBoundaryIs>"):
if "<outerBoundaryIs>" in item:
block = (stripper.sub('', item[item.find("<outerBoundaryIs>")+
len("<outerBoundaryIs>"):].replace(',0.',',999.').replace(',0',';'))).rstrip('/').rstrip(';').replace(',999.',',0.')
outer_block.append(block)
elif (is3D==0):
stripper = re.compile(r'[^\d.,-;]+')
for item in text_all.split("</outerBoundaryIs>"):
if "<outerBoundaryIs>" in item:
block = (stripper.sub('', item[item.find("<outerBoundaryIs>")+
len("<outerBoundaryIs>"):].replace(' ',';'))).lstrip(';').rstrip('/').rstrip(';').rstrip('/').rstrip(';')
outer_block.append(block)
text_all = None
outer = np.array([np.array([[float(v6) for v6 in v5] for v5 in v4]) for v4 in
[[v3.split(',') for v3 in v2] for v2 in
[v.split(';') for v in outer_block]]])
outer_block = None
if np.array([extentW,extentS,extentE,extentN]).all():
extentWS = np.array([extentW,extentS])
extentEN = np.array([extentE,extentN])
WS = np.array([np.min(v1,axis=0) for v1 in outer])
EN = np.array([np.max(v1,axis=0) for v1 in outer])
isExtent = np.hstack((WS>extentWS,EN<extentEN)).all(axis=1)
outer = np.extract(isExtent, outer)
polygons = [Polygon(v[:,0], v[:,1]) for v in outer]
return polygons
cs_prec = 0.004 # m
#%%
@app.route('/_draft')
def draft():
"""Constructing rectangle..."""
if not os.path.exists('abc/'):
os.makedirs('abc/')
abcPath = request.args.get('abcPath', 0, type=str)
if not abcPath:
if not os.listdir('abc/'):
isABC = 0
else:
isABC = 1
abcPath = 'abc/'+os.listdir('abc/')[0]
print('ABC path: '+abcPath)
elif not os.path.exists(abcPath):
status = 'Invalid ABC'
return jsonify(status=status)
else:
isABC = 1
print('ABC path: '+abcPath)
if isABC:
ABC = pd.read_csv(abcPath)
lngA,latA = ABC.iloc[0]
lngB,latB = ABC.iloc[1]
lngC,latC = ABC.iloc[2]
lngO,latO = ABC.iloc[3]
else:
latO = request.args.get('latO', 0, type=float)
lngO = request.args.get('lngO', 0, type=float)
latA = request.args.get('latA', 0, type=float)
lngA = request.args.get('lngA', 0, type=float)
latB = request.args.get('latB', 0, type=float)
lngB = request.args.get('lngB', 0, type=float)
latC = request.args.get('latC', 0, type=float)
lngC = request.args.get('lngC', 0, type=float)
# sf = request.args.get('sf', 0, type=float)
cs = request.args.get('cs', 0, type=float)
print(lngO,latO)
print(lngA,latA)
print(lngB,latB)
print(lngC,latC)
# print(sf,cs)
outPath = 'out/'
if not os.path.exists(outPath):
os.makedirs(outPath)
# Save ABC
s = []
s += 'lng,lat'
s += '\n{:f},{:f}'.format(lngA,latA)
s += '\n{:f},{:f}'.format(lngB,latB)
s += '\n{:f},{:f}'.format(lngC,latC)
s += '\n{:f},{:f}'.format(lngO,latO)
with open(outPath+'ABC', 'w') as f:
f.writelines(s)
wgs84 = osr.SpatialReference()
omerc = osr.SpatialReference()
wgs84.SetWellKnownGeogCS("WGS84")
omerc.SetHOM2PNO(clat=latO,dfLat1=latA,dfLong1=lngA,dfLat2=latB,dfLong2=lngB,
scale=1,fe=0,fn=0)
wgs2om = osr.CoordinateTransformation(wgs84, omerc)
om2wgs = osr.CoordinateTransformation(omerc, wgs84)
om_OABC = ogr.Geometry(ogr.wkbMultiPoint)
om_O = ogr.Geometry(ogr.wkbPoint)
om_O.AddPoint_2D(lngO, latO)
om_OABC.AddGeometry(om_O)
om_A = ogr.Geometry(ogr.wkbPoint)
om_A.AddPoint_2D(lngA, latA)
om_OABC.AddGeometry(om_A)
om_B = ogr.Geometry(ogr.wkbPoint)
om_B.AddPoint_2D(lngB, latB)
om_OABC.AddGeometry(om_B)
om_C = ogr.Geometry(ogr.wkbPoint)
om_C.AddPoint_2D(lngC, latC)
om_OABC.AddGeometry(om_C)
om_OABC.Transform(wgs2om)
om_OABC = json.loads(om_OABC.ExportToJson())['coordinates']
xOff = om_OABC[0][0]
yOff = om_OABC[0][1]
xA = om_OABC[1][0]
yA = om_OABC[1][1]
xB = om_OABC[2][0]
yB = om_OABC[2][1]
xC = om_OABC[3][0]
yC = om_OABC[3][1]
xA = xA - xOff
xB = xB - xOff
xC = xC - xOff
yA = yA - yOff
yB = yB - yOff
yC = yC - yOff
xO = 0
yO = 0
r = np.sqrt((xB-xA)**2+(yB-yA)**2)*.5
dx = xC-xO
dy = yC-yO
dr = np.sqrt(dx*dx+dy*dy)
D = xO*yC-xC*yO
sqrtdel = np.sqrt(r*r*dr*dr-D*D)
sgn = (-1)**(int(dy>=0)+1)
x1 = (D*dy+sgn*dx*sqrtdel)/(dr*dr)
y1 = (-D*dx+np.abs(dy)*sqrtdel)/(dr*dr)
x2 = (D*dy-sgn*dx*sqrtdel)/(dr*dr)
y2 = (-D*dx-np.abs(dy)*sqrtdel)/(dr*dr)
d1sq = (x1-xC)**2+(y1-yC)**2
d2sq = (x2-xC)**2+(y2-yC)**2
if d1sq<d2sq:
xM = x1
yM = y1
xN = x2
yN = y2
else:
xM = x2
yM = y2
xN = x1
yN = y1
xO = xO+xOff
yO = yO+yOff
xA = xA+xOff
xB = xB+xOff
xC = xC+xOff
yA = yA+yOff
yB = yB+yOff
yC = yC+yOff
xM = xM+xOff
xN = xN+xOff
yM = yM+yOff
yN = yN+yOff
lenI = np.sqrt((xA-xM)**2+(yA-yM)**2)
lenJ = np.sqrt((xB-xM)**2+(yB-yM)**2)
if cs>0:
cnI = int(np.ceil(lenI/cs))
cnJ = int(np.ceil(lenJ/cs))
else:
cnI = 0
cnJ = 0
wgs_MN = ogr.Geometry(ogr.wkbMultiPoint)
wgs_M = ogr.Geometry(ogr.wkbPoint)
wgs_M.AddPoint_2D(xM,yM)
wgs_MN.AddGeometry(wgs_M)
wgs_N = ogr.Geometry(ogr.wkbPoint)
wgs_N.AddPoint_2D(xN,yN)
wgs_MN.AddGeometry(wgs_N)
wgs_MN.Transform(om2wgs)
wgs_MN = json.loads(wgs_MN.ExportToJson())['coordinates']
lngM = wgs_MN[0][0]
latM = wgs_MN[0][1]
lngN = wgs_MN[1][0]
latN = wgs_MN[1][1]
return jsonify(lngA=lngA,latA=latA,
lngB=lngB,latB=latB,
lngC=lngC,latC=latC,
lngO=lngO,latO=latO,
lngM=lngM,latM=latM,
lngN=lngN,latN=latN,
lenI=lenI,lenJ=lenJ,
cnI=cnI,cnJ=cnJ)
#%%
@app.route('/_final')
def final():
"""Generating model grid..."""
run_start = time.time()
if not os.path.exists('abc/'):
os.makedirs('abc/')
abcPath = request.args.get('abcPath', 0, type=str)
if not abcPath:
if not os.listdir('abc/'):
isABC = 0
else:
isABC = 1
abcPath = 'abc/'+os.listdir('abc/')[0]
print('ABC path: '+abcPath)
elif not os.path.exists(abcPath):
status = 'Invalid ABC'
return jsonify(status=status)
else:
isABC = 1
print('ABC path: '+abcPath)
if isABC:
ABC = pd.read_csv(abcPath)
lngA,latA = ABC.iloc[0]
lngB,latB = ABC.iloc[1]
lngC,latC = ABC.iloc[2]
lngO,latO = ABC.iloc[3]
else:
latO = request.args.get('latO', 0, type=float)
lngO = request.args.get('lngO', 0, type=float)
latA = request.args.get('latA', 0, type=float)
lngA = request.args.get('lngA', 0, type=float)
latB = request.args.get('latB', 0, type=float)
lngB = request.args.get('lngB', 0, type=float)
latC = request.args.get('latC', 0, type=float)
lngC = request.args.get('lngC', 0, type=float)
cs = request.args.get('cs', 0, type=float)
#%% Paths
status = ''
tmpPath = 'tmp/'
if not os.path.exists(tmpPath):
os.makedirs(tmpPath)
else:
shutil.rmtree(tmpPath)
os.makedirs(tmpPath)
demPath = request.args.get('demPath', 0, type=str)
if not demPath:
demPath = 'dem/'+os.listdir('dem/')[0]
elif not os.path.exists(demPath):
status = 'Invalid DEM'
return jsonify(status=status)
print('DEM path: '+demPath)
demclippedPath = tmpPath+'dem_clipped.tif'
buildingsPathList = request.args.get('buildingsPath', 0, type=str)
if not buildingsPathList:
if not os.listdir('buildings/'):
isBuilding = 0
else:
isBuilding = 1
buildingsPathList = [('buildings/'+v) for v in os.listdir('buildings/')]
for v in buildingsPathList:
print('Buildings path: '+v)
elif not os.path.exists(buildingsPathList.split(',')[0]):
status = 'Invalid buildings'
return jsonify(status=status)
else:
isBuilding = 1
buildingsPathList = buildingsPathList.split(',')
for v in buildingsPathList:
print('Buildings path: '+v)
nlcdPath = request.args.get('nlcdPath', 0, type=str)
if not nlcdPath:
if not os.listdir('nlcd/'):
isNLCD = 0
else:
isNLCD = 1
nlcdPath = 'nlcd/'+os.listdir('nlcd/')[0]
print('NLCD path: '+nlcdPath)
elif not os.path.exists(nlcdPath):
status = 'Invalid NLCD'
return jsonify(status=status)
else:
isNLCD = 1
print('NLCD path: '+nlcdPath)
nlcdclippedPath = tmpPath+'nlcd_clipped.tif'
outPath = 'out/'
if not os.path.exists(outPath):
os.makedirs(outPath)
else:
shutil.rmtree(outPath)
os.makedirs(outPath)
print('Output path: '+outPath)
# Save ABC
s = []
s += 'lng,lat'
s += '\n{:f},{:f}'.format(lngA,latA)
s += '\n{:f},{:f}'.format(lngB,latB)
s += '\n{:f},{:f}'.format(lngC,latC)
s += '\n{:f},{:f}'.format(lngO,latO)
with open(outPath+'ABC', 'w') as f:
f.writelines(s)
#%%
wgs84 = osr.SpatialReference()
omerc = osr.SpatialReference()
wgs84.SetWellKnownGeogCS("WGS84")
omerc.SetHOM2PNO(clat=latO,dfLat1=latA,dfLong1=lngA,dfLat2=latB,dfLong2=lngB,
scale=1,fe=0,fn=0)
wgs2om = osr.CoordinateTransformation(wgs84, omerc)
om2wgs = osr.CoordinateTransformation(omerc, wgs84)
wgs2om = osr.CoordinateTransformation(wgs84, omerc)
om2wgs = osr.CoordinateTransformation(omerc, wgs84)
om_OABC = ogr.Geometry(ogr.wkbMultiPoint)
om_O = ogr.Geometry(ogr.wkbPoint)
om_O.AddPoint_2D(lngO, latO)
om_OABC.AddGeometry(om_O)
om_A = ogr.Geometry(ogr.wkbPoint)
om_A.AddPoint_2D(lngA, latA)
om_OABC.AddGeometry(om_A)
om_B = ogr.Geometry(ogr.wkbPoint)
om_B.AddPoint_2D(lngB, latB)
om_OABC.AddGeometry(om_B)
om_C = ogr.Geometry(ogr.wkbPoint)
om_C.AddPoint_2D(lngC, latC)
om_OABC.AddGeometry(om_C)
om_OABC.Transform(wgs2om)
om_OABC = json.loads(om_OABC.ExportToJson())['coordinates']
xOff = om_OABC[0][0]
yOff = om_OABC[0][1]
xA = om_OABC[1][0]
yA = om_OABC[1][1]
xB = om_OABC[2][0]
yB = om_OABC[2][1]
xC = om_OABC[3][0]
yC = om_OABC[3][1]
xA = xA - xOff
xB = xB - xOff
xC = xC - xOff
yA = yA - yOff
yB = yB - yOff
yC = yC - yOff
xO = 0
yO = 0
r = np.sqrt((xB-xA)**2+(yB-yA)**2)*.5
dx = xC-xO
dy = yC-yO
dr = np.sqrt(dx*dx+dy*dy)
D = xO*yC-xC*yO
sqrtdel = np.sqrt(r*r*dr*dr-D*D)
sgn = (-1)**(int(dy>=0)+1)
x1 = (D*dy+sgn*dx*sqrtdel)/(dr*dr)
y1 = (-D*dx+np.abs(dy)*sqrtdel)/(dr*dr)
x2 = (D*dy-sgn*dx*sqrtdel)/(dr*dr)
y2 = (-D*dx-np.abs(dy)*sqrtdel)/(dr*dr)
d1sq = (x1-xC)**2+(y1-yC)**2
d2sq = (x2-xC)**2+(y2-yC)**2
if d1sq<d2sq:
xM = x1
yM = y1
xN = x2
yN = y2
else:
xM = x2
yM = y2
xN = x1
yN = y1
xO = xO+xOff
yO = yO+yOff
xA = xA+xOff
xB = xB+xOff
xC = xC+xOff
yA = yA+yOff
yB = yB+yOff
yC = yC+yOff
xM = xM+xOff
xN = xN+xOff
yM = yM+yOff
yN = yN+yOff
wgs_MN = ogr.Geometry(ogr.wkbMultiPoint)
wgs_M = ogr.Geometry(ogr.wkbPoint)
wgs_M.AddPoint_2D(xM,yM)
wgs_MN.AddGeometry(wgs_M)
wgs_N = ogr.Geometry(ogr.wkbPoint)
wgs_N.AddPoint_2D(xN,yN)
wgs_MN.AddGeometry(wgs_N)
wgs_MN.Transform(om2wgs)
wgs_MN = json.loads(wgs_MN.ExportToJson())['coordinates']
lngM = wgs_MN[0][0]
latM = wgs_MN[0][1]
lngN = wgs_MN[1][0]
latN = wgs_MN[1][1]
lenI = np.sqrt((xA-xM)**2+(yA-yM)**2)
lenJ = np.sqrt((xB-xM)**2+(yB-yM)**2)
#%% Squarization
csI = cs
csJ = cs
med_H1 = 0
med_H2 = 0
loop = 0
loopCount = 0
while (abs(cs-med_H1)>=cs_prec) and (loopCount<100):
loop+=1
loopCount+=1
if med_H1*med_H2>0:
csI = csI-(med_H1-cs)/2
else:
csI = cs
print("Iteration "+str(loop)+': '+str(med_H1)+' x '+str(med_H2))
if csI>cs_prec*3 and csJ>cs_prec*3:
cnI = int(np.ceil(lenI/csI))
cnJ = int(np.ceil(lenJ/csJ))
else:
cnI = 0
cnJ = 0
#
Jm,Im = np.meshgrid(range(cnJ),range(cnI))
Jm = Jm+1
Im = Im+1
Idx = csI*(xM-xA)/lenI
Idy = csI*(yM-yA)/lenI
xI0 = np.linspace(xA,xA+Idx*cnI,num=cnI,dtype=float)
yI0 = np.linspace(yA,yA+Idy*cnI,num=cnI,dtype=float)
Jdx = csJ*(xN-xA)/lenJ
Jdy = csJ*(yN-yA)/lenJ
Xm = np.array([np.linspace(v,v+Jdx*cnJ,num=cnJ,dtype=float) for v in xI0])
Ym = np.array([np.linspace(v,v+Jdy*cnJ,num=cnJ,dtype=float) for v in yI0])
#
cn_Xm = center2corners(Xm)
cn_Ym = center2corners(Ym)
# Centers
wgs_grid = ogr.Geometry(ogr.wkbMultiPoint)
for iJ in range(cnJ):
for iI in range(cnI):
wgs_node = ogr.Geometry(ogr.wkbPoint)
wgs_node.AddPoint_2D(Xm[iI,iJ],Ym[iI,iJ])
wgs_grid.AddGeometry(wgs_node)
wgs_grid.Transform(om2wgs)
wgs_grid = json.loads(wgs_grid.ExportToJson())['coordinates']
lonm = np.zeros(Xm.shape)
latm = np.zeros(Ym.shape)
count = 0
for iJ in range(cnJ):
for iI in range(cnI):
lonm[iI,iJ] = wgs_grid[count][0]
latm[iI,iJ] = wgs_grid[count][1]
count+=1
wgs_grid = []
# Corners
wgs_grid = ogr.Geometry(ogr.wkbMultiPoint)
for iJ in range(cnJ+1):
for iI in range(cnI+1):
wgs_node = ogr.Geometry(ogr.wkbPoint)
wgs_node.AddPoint_2D(cn_Xm[iI,iJ],cn_Ym[iI,iJ])
wgs_grid.AddGeometry(wgs_node)
wgs_grid.Transform(om2wgs)
wgs_grid = json.loads(wgs_grid.ExportToJson())['coordinates']
cn_lonm = np.zeros(cn_Xm.shape)
cn_latm = np.zeros(cn_Ym.shape)
count = 0
for iJ in range(cnJ+1):
for iI in range(cnI+1):
cn_lonm[iI,iJ] = wgs_grid[count][0]
cn_latm[iI,iJ] = wgs_grid[count][1]
count+=1
wgs_grid = []
# H1, H2
I_interim_lonm = cn_lonm[:-1,:]+np.diff(cn_lonm,axis=0)*.5
I_interim_latm = cn_latm[:-1,:]+np.diff(cn_latm,axis=0)*.5
J_interim_lonm = cn_lonm[:,:-1]+np.diff(cn_lonm,axis=1)*.5
J_interim_latm = cn_latm[:,:-1]+np.diff(cn_latm,axis=1)*.5
H1m = dist_greatcircle(J_interim_latm[:-1,:],J_interim_lonm[:-1,:],
J_interim_latm[1:,:],J_interim_lonm[1:,:])
H2m = dist_greatcircle(I_interim_latm[:,:-1],I_interim_lonm[:,:-1],
I_interim_latm[:,1:],I_interim_lonm[:,1:])
med_H1 = np.median(H1m.ravel())
med_H2 = np.median(H2m.ravel())
#
loopCount = 0
while (abs(cs-med_H2)>=cs_prec) and (loopCount<100):
loop+=1
loopCount+=1
if med_H2>0:
csJ = csJ-(med_H2-cs)/2
else:
csJ = cs
print("Iteration "+str(loop)+': '+str(med_H1)+' x '+str(med_H2))
if csI>cs_prec*3 and csJ>cs_prec*3:
cnI = int(np.ceil(lenI/csI))
cnJ = int(np.ceil(lenJ/csJ))
else:
cnI = 0
cnJ = 0
#
Jm,Im = np.meshgrid(range(cnJ),range(cnI))
Jm = Jm+1
Im = Im+1
Idx = csI*(xM-xA)/lenI
Idy = csI*(yM-yA)/lenI
xI0 = np.linspace(xA,xA+Idx*cnI,num=cnI,dtype=float)
yI0 = np.linspace(yA,yA+Idy*cnI,num=cnI,dtype=float)
Jdx = csJ*(xN-xA)/lenJ
Jdy = csJ*(yN-yA)/lenJ
Xm = np.array([np.linspace(v,v+Jdx*cnJ,num=cnJ,dtype=float) for v in xI0])
Ym = np.array([np.linspace(v,v+Jdy*cnJ,num=cnJ,dtype=float) for v in yI0])
#
cn_Xm = center2corners(Xm)
cn_Ym = center2corners(Ym)
# Centers
wgs_grid = ogr.Geometry(ogr.wkbMultiPoint)
for iJ in range(cnJ):
for iI in range(cnI):
wgs_node = ogr.Geometry(ogr.wkbPoint)
wgs_node.AddPoint_2D(Xm[iI,iJ],Ym[iI,iJ])
wgs_grid.AddGeometry(wgs_node)
wgs_grid.Transform(om2wgs)
wgs_grid = json.loads(wgs_grid.ExportToJson())['coordinates']
lonm = np.zeros(Xm.shape)
latm = np.zeros(Ym.shape)
count = 0
for iJ in range(cnJ):
for iI in range(cnI):
lonm[iI,iJ] = wgs_grid[count][0]
latm[iI,iJ] = wgs_grid[count][1]
count+=1
wgs_grid = []
# Corners
wgs_grid = ogr.Geometry(ogr.wkbMultiPoint)
for iJ in range(cnJ+1):
for iI in range(cnI+1):
wgs_node = ogr.Geometry(ogr.wkbPoint)
wgs_node.AddPoint_2D(cn_Xm[iI,iJ],cn_Ym[iI,iJ])
wgs_grid.AddGeometry(wgs_node)
wgs_grid.Transform(om2wgs)
wgs_grid = json.loads(wgs_grid.ExportToJson())['coordinates']
cn_lonm = np.zeros(cn_Xm.shape)
cn_latm = np.zeros(cn_Ym.shape)
count = 0
for iJ in range(cnJ+1):
for iI in range(cnI+1):
cn_lonm[iI,iJ] = wgs_grid[count][0]
cn_latm[iI,iJ] = wgs_grid[count][1]
count+=1
wgs_grid = []
# H1, H2
I_interim_lonm = cn_lonm[:-1,:]+np.diff(cn_lonm,axis=0)*.5
I_interim_latm = cn_latm[:-1,:]+np.diff(cn_latm,axis=0)*.5
J_interim_lonm = cn_lonm[:,:-1]+np.diff(cn_lonm,axis=1)*.5
J_interim_latm = cn_latm[:,:-1]+np.diff(cn_latm,axis=1)*.5
H1m = dist_greatcircle(J_interim_latm[:-1,:],J_interim_lonm[:-1,:],
J_interim_latm[1:,:],J_interim_lonm[1:,:])
H2m = dist_greatcircle(I_interim_latm[:,:-1],I_interim_lonm[:,:-1],
I_interim_latm[:,1:],I_interim_lonm[:,1:])
med_H1 = np.median(H1m.ravel())
med_H2 = np.median(H2m.ravel())
# ANG
bearm = bearing(latm,lonm,J_interim_latm[1:,:],J_interim_lonm[1:,:])
degQ4 = bearm>=270
ANGm = np.zeros(bearm.shape)
ANGm[degQ4] = 360+90-bearm[degQ4]
ANGm[~degQ4] = 90-bearm[~degQ4]
print('H1', stats.describe(H1m.ravel()))
print('H2', stats.describe(H2m.ravel()))
print('ANG', stats.describe(ANGm.ravel()))
#%% DEM
clipMargin = cs*.00005 # 5 times cell size in meters
clipW = np.nanmin(cn_lonm.ravel())-clipMargin
clipS = np.nanmin(cn_latm.ravel())-clipMargin
clipE = np.nanmax(cn_lonm.ravel())+clipMargin
clipN = np.nanmax(cn_latm.ravel())+clipMargin
# clipRes = cs*.00001*.2
# gdalwarp -te <x_min> <y_min> <x_max> <y_max> input.tif clipped_output.tif
# gdalwarp -tr 30 30 -r average equals2.tif equals2-averaged_30m.tif
call(['gdalwarp',
'-te', '{:f}'.format(clipW), '{:f}'.format(clipS),
'{:f}'.format(clipE), '{:f}'.format(clipN),
# '-tr', '{:f}'.format(clipRes), '{:f}'.format(clipRes), '-r', 'bilinear',
demPath, demclippedPath])
#%% Depth
print('Extracting depths from DEM...')
#%% H1, H2 Distance Matrix
bandNum1 = 1
DEM = gdal.Open(demclippedPath, GA_ReadOnly )
band1 = DEM.GetRasterBand(bandNum1)
geotransform = DEM.GetGeoTransform()
x_ul = geotransform[0]
y_ul = geotransform[3]
x_size = geotransform[1]
y_size = geotransform[5]
print('DEM cellsize: {xSize:f} x {ySize:f}'.format(xSize=x_size,ySize=y_size))
data_raw = BandReadAsArray(band1)
(y_cell,x_cell) = data_raw.shape
xv, yv = meshgrid(range(x_cell), range(y_cell), indexing='xy')
x_coor = xv * x_size + x_ul + (x_size*.5)
y_coor = yv * y_size + y_ul + (y_size*.5)
mask_domain = (data_raw>-9999)*(data_raw<9999)
data = np.copy(data_raw).astype(float)
data[~mask_domain] = np.nan
band1 = None
DEM = None
#==============================================================================
# # Cell average scheme
# depthm = np.empty(lonm.shape)
# depthm.fill(np.nan)
# sampleCountm = np.zeros(lonm.shape)
#
# for j in range(cnJ):
# for i in range(cnI):
# cell_vertices = np.array([[cn_lonm[i,j],cn_latm[i,j]],
# [cn_lonm[i+1,j],cn_latm[i+1,j]],
# [cn_lonm[i+1,j+1],cn_latm[i+1,j+1]],
# [cn_lonm[i,j+1],cn_latm[i,j+1]],
# [cn_lonm[i,j],cn_latm[i,j]]])
# cell_polygon = Polygon(cell_vertices[:,0], cell_vertices[:,1])
# # depth_polygon = data[cell_polygon.is_inside(x_coor,y_coor)>0]
# depth_polygon = data[(cell_polygon.is_inside(x_coor,y_coor)>0)*mask_domain]
# if len(depth_polygon)>0:
# depthm[i,j] = np.mean(depth_polygon)
# sampleCountm[i,j] = depth_polygon.size
# print(depthm[i,j],sampleCountm[i,j])
# print(depthm)
# print(sampleCountm)
#==============================================================================
# Griddata scheme
distm_lat,distm_lon = latlonlen(latm)
distm_y,distm_x = latlonlen(y_coor)
depthm = griddata((np.ravel(x_coor*distm_x),np.ravel(y_coor*distm_y)),
np.ravel(data), (np.ravel(lonm*distm_lon),
np.ravel(latm*distm_lat)), method='linear')
depthm = -depthm.reshape(lonm.shape)
#%%
datumm = np.zeros(lonm.shape)
#%% Buildings
if isBuilding:
print("Importing buildings...")
for buildingsPath in buildingsPathList:
polygons = kml2polygon(buildingsPath,extentW=clipW,extentS=clipS,
extentE=clipE,extentN=clipN)
bad_building = 0
for v in polygons:
building_polygon = v.is_inside(lonm,latm)>0
if (building_polygon.sum()/lonm.size)<.2:
depthm[building_polygon] = np.nan
else:
bad_building+=1
print('Bad building shape #{nBad:d}...'.format(nBad=bad_building))
print("Imported from {buildingsPath:s}: {nBuilding:d} buildings.".format(buildingsPath=buildingsPath,nBuilding=len(polygons)))
#%% NLCD
if isNLCD:
call(['gdalwarp', '-q',
'-te', '{:f}'.format(clipW), '{:f}'.format(clipS),
'{:f}'.format(clipE), '{:f}'.format(clipN),
nlcdPath, nlcdclippedPath])
print('Extracting NLCD classes from raster...')
bandNum1 = 1
DEM = gdal.Open(nlcdclippedPath, GA_ReadOnly )
band1 = DEM.GetRasterBand(bandNum1)
geotransform = DEM.GetGeoTransform()
x_ul = geotransform[0]
y_ul = geotransform[3]
x_size = geotransform[1]
y_size = geotransform[5]
print('NLCD raster cellsize: {xSize:f} x {ySize:f}'.format(xSize=x_size,ySize=y_size))
data_raw = BandReadAsArray(band1)
(y_cell,x_cell) = data_raw.shape
xv, yv = meshgrid(range(x_cell), range(y_cell), indexing='xy')
x_coor = xv * x_size + x_ul + (x_size*.5)
y_coor = yv * y_size + y_ul + (y_size*.5)
data = np.copy(data_raw).astype(float)
band1 = None
DEM = None
# Griddata scheme
distm_y,distm_x = latlonlen(y_coor)
nlcdm = griddata((np.ravel(x_coor*distm_x),np.ravel(y_coor*distm_y)),
np.ravel(data), (np.ravel(lonm*distm_lon),
np.ravel(latm*distm_lat)), method='nearest')
nlcdm = nlcdm.reshape(lonm.shape)
nlcdm[np.isnan(depthm)] = np.nan
# NLCD to Manning's
LC = pd.read_csv('templates/nlcd_table.csv')
LC_match = list(zip(LC.NLCD.values,LC.Manning.values))
LC_dict = dict(zip(LC.NLCD.values,LC.Name.values))
manm = np.ones(nlcdm.shape)*.02 # Conservative base value
for v in LC_match:
manm[nlcdm==v[0]] = round(v[1],3)
BFRIC_base = 0.0025
#%% Output
print('Write to output...')
# Write to model_grid_hor
s = []
s += "Horizontal Segmentations\n"
s += "{nI:5d}{nJ:5d}".format(nI=cnI,nJ=cnJ)
for j in range(1,cnJ-1):
for i in range(1,cnI-1):
if ~np.isnan(depthm[i][j]):
s += "\n{I:5d}{J:5d}{H1:10.2f}{H2:10.2f}{depth:10.3f}{ang:10.2f}{lat:10.6f}{lon:10.6f}{datum:5.1f}".format(I=Im[i][j],J=Jm[i][j],H1=H1m[i][j],H2=H2m[i][j],depth=depthm[i][j],ang=ANGm[i][j],lat=latm[i][j],lon=lonm[i][j],datum=datumm[i][j])
with open(outPath+'model_grid_hor', 'w') as f:
f.writelines(s)
# Write to corner_loc
s = []
for j in range(cnJ+1):
for i in range(cnI+1):
s += "{I:5d}{J:5d}{lon:12.6f}{lat:12.6f}{mask:5d}\n".format(I=i+1,J=j+1,lat=cn_latm[i][j],lon=cn_lonm[i][j],mask=1)
with open(outPath+'corner_loc', 'w') as f:
f.writelines(s)
if not isNLCD:
# Write model_grid to csv
s = []
s += "I,J,H1,H2,depth,ang,lat,lon,datum"
for j in range(1,cnJ-1):
for i in range(1,cnI-1):
if ~np.isnan(depthm[i][j]):
s += "\n{I:d},{J:d},{H1:.2f},{H2:.2f},{depth:.3f},{ang:.2f},{lat:.6f},{lon:.6f},{datum:.1f}".format(I=Im[i][j],J=Jm[i][j],H1=H1m[i][j],H2=H2m[i][j],depth=depthm[i][j],ang=ANGm[i][j],lat=latm[i][j],lon=lonm[i][j],datum=datumm[i][j])
with open(outPath+'model_grid.csv', 'w') as f:
f.writelines(s)
else:
# Write to bfric2d.inp
s = []
s += "NVARBF BFRIC\n"
s += " -1{base:10.5f}\n".format(base=BFRIC_base)
s += " I J VARBF"
for j in range(cnJ):
for i in range(cnI):
s += "\n{I:5d}{J:5d}{Man:10.5f}".format(I=i+1,J=j+1,Man=manm[i][j])
with open(outPath+'bfric2d.inp', 'w') as f:
f.writelines(s)
# Write model_grid and NLCD/Manning's to csv
s = []
s += "I,J,H1,H2,depth,ang,lat,lon,datum,NLCD,Mannings,Land"
for j in range(cnJ):
for i in range(cnI):
if ~np.isnan(depthm[i][j]):
s += "\n{I:d},{J:d},{H1:.2f},{H2:.2f},{depth:.3f},{ang:.2f},{lat:.6f},{lon:.6f},{datum:.1f},{NLCD:.0f},{Mannings:.3f},{Land:s}".format(I=Im[i][j],J=Jm[i][j],H1=H1m[i][j],H2=H2m[i][j],depth=depthm[i][j],ang=ANGm[i][j],lat=latm[i][j],lon=lonm[i][j],datum=datumm[i][j],NLCD=nlcdm[i][j],Mannings=manm[i][j],Land=LC_dict[int(nlcdm[i][j])])
with open(outPath+'model_grid.csv', 'w') as f:
f.writelines(s)
# Save to binary
np.savez('out/bin.npz',
cnI=cnI,cnJ=cnJ,Im=Im,Jm=Jm,H1m=H1m,H2m=H2m,depthm=depthm,
ANGm=ANGm,latm=latm,lonm=lonm,datumm=datumm,nlcdm=nlcdm,manm=manm,
cn_latm=cn_latm,cn_lonm=cn_lonm)
#%% Stats
stats_node = cnI*cnJ
stats_area = np.nansum((H1m*H2m).ravel())
stats_dt1 = np.nanmin((0.5*H1m/np.sqrt(9.80665*(depthm+3))).ravel())
stats_dt2 = np.nanmin((0.5*H2m/np.sqrt(9.80665*(depthm+3))).ravel())
stats_lon_max = np.nanmax(lonm.ravel())
stats_lon_min = np.nanmin(lonm.ravel())
stats_lat_max = np.nanmax(latm.ravel())
stats_lat_min = np.nanmin(latm.ravel())
stats_H1_mean = np.nanmean(H1m.ravel())
stats_H1_median = np.nanmedian(H1m.ravel())
stats_H1_max = np.nanmax(H1m.ravel())
stats_H1_min = np.nanmin(H1m.ravel())
stats_H2_mean = np.nanmean(H2m.ravel())
stats_H2_median = np.nanmedian(H2m.ravel())
stats_H2_max = np.nanmax(H2m.ravel())
stats_H2_min = np.nanmin(H2m.ravel())
stats_ANG_mean = np.nanmean(ANGm.ravel())
stats_ANG_median = np.nanmedian(ANGm.ravel())
stats_ANG_max = np.nanmax(ANGm.ravel())
stats_ANG_min = np.nanmin(ANGm.ravel())
stats_depth_mean = np.nanmean(depthm.ravel())
stats_depth_median = np.nanmedian(depthm.ravel())
stats_depth_max = np.nanmax(depthm.ravel())
stats_depth_min = np.nanmin(depthm.ravel())
# Write to stats.txt
s=[]
s+='Stats\n'
s+='Nodes: {:d} x {:d} = {:d}\n'.format(cnI,cnJ,stats_node)
s+='Extent: {:.6f}, {:.6f}, {:.6f}, {:.6f}\n'.format(stats_lon_min,stats_lat_min,stats_lon_max,stats_lat_max)
s+='Area: {:.2f} m^2\n'.format(stats_area)
s+='H1: mean({:.2f}), median({:.2f}), min({:.2f}), max({:.2f})\n'.format(stats_H1_mean,stats_H1_median,stats_H1_min,stats_H1_max)
s+='H2: mean({:.2f}), median({:.2f}), min({:.2f}), max({:.2f})\n'.format(stats_H2_mean,stats_H2_median,stats_H2_min,stats_H2_max)
s+='ANG: mean({:.2f}), median({:.2f}), min({:.2f}), max({:.2f})\n'.format(stats_ANG_mean,stats_ANG_median,stats_ANG_min,stats_ANG_max)
s+='Depth: mean({:.3f}), median({:.3f}), min({:.3f}), max({:.3f})\n'.format(stats_depth_mean,stats_depth_median,stats_depth_min,stats_depth_max)
s+='Min time step along I: {:.3f} s\n'.format(stats_dt1)
s+='Min time step along J: {:.3f} s\n'.format(stats_dt2)
with open(outPath+'stats.txt', 'w') as f:
f.writelines(s)
#%%
shutil.rmtree(tmpPath)
status = 'Job completed'
run_end = time.time()
print(run_end-run_start)
print('Job completed successfully.\n')
return jsonify(lngA=lngA,latA=latA,
lngB=lngB,latB=latB,
lngC=lngC,latC=latC,
lngO=lngO,latO=latO,
lngM=lngM,latM=latM,
lngN=lngN,latN=latN,
lenI=lenI,lenJ=lenJ,
cnI=cnI,cnJ=cnJ,
status=status)
#%%
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
# app.run(host= '0.0.0.0',port=7110,debug=True)
app.run(port=7110,debug=True)
|
Harkor421/mono-rnn
|
scraper/dasmalwerk.py
|
import os
from io import BytesIO
from zipfile import ZipFile
import requests
from bs4 import BeautifulSoup
def get_hrefs():
html = requests.get("https://das-malwerk.herokuapp.com").text
soup = BeautifulSoup(html, "html.parser")
rows = soup.find_all("tr")[1:]
malware2href = {}
for row in rows:
a_tags = row.find_all("a")
file_hash = a_tags[1].text
href = a_tags[0]["href"]
malware2href[file_hash] = href
return malware2href
def download(malware2href, save_dir="raw/dasmalwerk"):
try:
assert os.path.isdir(save_dir)
except AssertionError:
os.mkdir(save_dir)
for file_hash, href in malware2href.items():
source = requests.get(href, allow_redirects=True)
with ZipFile(BytesIO(source.content)) as f:
f.extractall(path=save_dir, pwd=b"infected")
def main():
malware2href = get_hrefs()
download(malware2href)
if __name__ == "__main__":
main()
|
Harkor421/mono-rnn
|
dataset.py
|
<filename>dataset.py
import os
import pickle
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, Dataset, Subset
class MalwareDataset(Dataset):
def __init__(self, benign_dir="data/benign", malware_dir="data/malware"):
self.benign_dir = benign_dir
self.malware_dir = malware_dir
self.benign_files = sorted(os.listdir(benign_dir))
self.malware_files = sorted(os.listdir(malware_dir))
def __getitem__(self, index):
try:
file_dir = os.path.join(self.benign_dir, self.benign_files[index])
label = 0.0
except IndexError:
file_dir = os.path.join(
self.malware_dir, self.malware_files[index - len(self.benign_files)],
)
label = 1.0
with open(file_dir, "rb") as f:
file_ = torch.tensor(pickle.load(f))
return file_, label
def __len__(self):
return len(self.benign_files) + len(self.malware_files)
class UniLabelDataset(Dataset):
def __init__(self, data_dir, is_malware):
self.data_dir = data_dir
self.is_malware = is_malware
self.files = sorted(os.listdir(data_dir))
def __getitem__(self, index):
file_dir = os.path.join(self.data_dir, self.files[index])
with open(file_dir, "rb") as f:
file_ = torch.tensor(pickle.load(f))
return file_, float(self.is_malware)
def __len__(self):
return len(self.files)
def collate_fn(batch):
xs = pad_sequence([x[0] for x in batch], max_len=4096, padding_value=256)
ys = torch.tensor([x[1] for x in batch])
return xs, ys
def pad_sequence(sequences, max_len=None, padding_value=0):
batch_size = len(sequences)
if max_len is None:
max_len = max([s.size(0) for s in sequences])
out_tensor = sequences[0].new_full((batch_size, max_len), padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
if max_len > length:
out_tensor[i, :length] = tensor
else:
out_tensor[i, :max_len] = tensor[:max_len]
return out_tensor
def train_val_test_split(idx, val_size, test_size):
tv_idx, test_idx = train_test_split(idx, test_size=test_size, shuffle=True)
train_idx, val_idx = train_test_split(tv_idx, test_size=val_size, shuffle=True)
return train_idx, val_idx, test_idx
def make_idx(dataset, val_size, test_size):
num_benign = len(dataset.benign_files)
num_malware = len(dataset.malware_files)
benign_idx = range(num_benign)
malware_idx = range(num_benign, num_benign + num_malware)
benign_train_idx, benign_val_idx, benign_test_idx = train_val_test_split(
benign_idx, val_size, test_size
)
malware_train_idx, malware_val_idx, malware_test_idx = train_val_test_split(
malware_idx, val_size, test_size
)
train_idx = benign_train_idx + malware_train_idx
val_idx = benign_val_idx + malware_val_idx
test_idx = benign_test_idx + malware_test_idx
return train_idx, val_idx, test_idx
def make_loaders(batch_size, val_size, test_size):
dataset = MalwareDataset()
train_idx, val_idx, test_idx = make_idx(dataset, val_size, test_size)
train_dataset = Subset(dataset, indices=train_idx)
val_dataset = Subset(dataset, indices=val_idx)
test_dataset = Subset(dataset, indices=test_idx)
train_loader = make_loader(train_dataset, batch_size)
val_loader = make_loader(val_dataset, batch_size)
test_loader = make_loader(test_dataset, batch_size)
return train_loader, val_loader, test_loader
def make_loader(dataset, batch_size):
return DataLoader(
dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=True
)
|
Harkor421/mono-rnn
|
train.py
|
import argparse
import torch
import models
from utils import plot_confusion_matrix, train
def main(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = MalConvPlus(8, 4096, 128, 32).to(device)
train(model, train_loader, val_loader, device, "malconv_plus")
plot_confusion_matrix(model, test_loader, "malconv_plus", device)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parser.parse_args()
main(args)
|
Harkor421/mono-rnn
|
scraper/eda.py
|
<gh_stars>10-100
import json
import os
from collections import Counter
import matplotlib.pyplot as plt
import pefile
def walk():
len_count = []
for directory in ("benign", "malware"):
data_dir = os.path.join("raw", directory)
for file_name in os.listdir(data_dir):
try:
file = pefile.PE(os.path.join(data_dir, file_name))
header = list(file.header)
len_count.append(len(header))
except pefile.PEFormatError:
print(f"Skipping {file_name}")
return Counter(len_count)
def plot(len_count):
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(len_count.keys(), len_count.values())
plt.show()
def write(len_count):
sorted_count = {
len_: count for len_, count in sorted(len_count.items(), key=lambda x: -x[1])
}
with open("len_count.json", "w+") as outfile:
json.dumps(sorted_count, outfile, indent=4)
if __name__ == "__main__":
len_count = walk()
plot(len_count)
write(len_count)
|
Harkor421/mono-rnn
|
scraper/make_data.py
|
import json
import os
import pickle
import time
import pefile
def main():
failed_files = []
for directory in ("benign", "malware"):
data_dir = os.path.join("raw", directory)
for file_name in os.listdir(data_dir):
output_dir = os.path.join(directory, file_name)
try:
file = pefile.PE(os.path.join(data_dir, file_name))
header = list(file.header)
with open(f"{output_dir}.pickle", "wb") as f:
pickle.dump(header, f)
except pefile.PEFormatError:
print(f"Skipping {file_name}")
failed_files.append(output_dir)
with open("log.json", "w") as outfile:
json.dump(failed_files, outfile, indent=4)
if __name__ == "__main__":
print("Pickling files...")
start = time.time()
main()
end = time.time()
print(f"Process completed in {int(end - start)} seconds")
|
Harkor421/mono-rnn
|
utils.py
|
<gh_stars>10-100
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from sklearn.metrics import auc, confusion_matrix, roc_curve
from torch import optim
from torch.nn.functional import sigmoid
from tqdm.auto import tqdm
def set_seed(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def count_params(model, trainable_only=True):
if trainable_only:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
return sum(p.numel() for p in model.parameters())
def set_plt_style():
plt.rcParams.update(
{"text.usetex": True, "font.family": "serif", "font.serif": ["cm"],}
)
def plot_confusion_matrix(model, test_loader, save_title, device, normalize="all"):
y_true, y_pred = predict(model, test_loader, device)
conf_mat = confusion_matrix(y_true, y_pred, normalize=normalize)
axis_labels = ("Benign", "Malware")
df = pd.DataFrame(conf_mat, index=axis_labels, columns=axis_labels)
plot = sns.heatmap(df, annot=True, cmap="Blues")
plot.figure.savefig(os.path.join("imgs", f"{save_title}_conf_mat.png"), dpi=300)
plt.close(plot.figure)
def plot_roc_curve(models, test_loader, save_title, device):
fig, ax = plt.subplots()
ax.grid(linestyle="--")
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
if isinstance(models, dict):
for label, model in models.items():
fpr, tpr, auc_score = _rates_auc(model, test_loader, device)
ax.plot(fpr, tpr, label=f"{label} ({auc_score:.2f})")
else:
fpr, tpr, auc_score = _rates_auc(models, test_loader, device)
ax.plot(fp, tpr, label=f"{save_title} ({auc_score:.2f})")
ax.plot([0, 1], [0, 1], linestyle="--", label="Chance (0.5)")
ax.legend(loc="best")
fig.savefig(os.path.join("imgs", f"{save_title}_roc.png"), dpi=300)
plt.close(fig)
def _rates_auc(model, test_loader, device):
y_true, y_pred = predict(model, test_loader, device, apply_sigmoid=True)
fpr, tpr, _ = metrics.roc_curve(y_true, y_pred)
auc_score = auc(fpr, tpr)
return fpr, tpr, auc_score
@torch.no_grad()
def predict(model, data_loader, device, apply_sigmoid=False, to_numpy=True):
model.eval()
y_true = []
y_pred = []
for inputs, labels in tqdm(data_loader, leave=False):
inputs = inputs.to(device)
outputs = model(inputs)
y_true.append(labels)
y_pred.append(outputs)
y_true = torch.cat(y_true).to(int)
if apply_sigmoid:
y_pred = sigmoid(torch.cat(y_pred))
else:
y_pred = (torch.cat(y_pred) > 0).to(int)
if to_numpy:
y_true = y_true.cpu().numpy()
y_pred = y_pred.cpu().numpy()
assert y_true.shape == y_pred.shape
model.train()
return y_true, y_pred
def get_accuracy(model, data_loader, device):
y_true, y_pred = predict(model, data_loader, device, to_numpy=False)
return 100 * (y_true == y_pred).to(float).mean().item()
def plot_train_history(train_loss_history, val_loss_history, save_title):
fig, ax = plt.subplots()
time_ = range(len(train_loss_history))
ax.set_xlabel("Epochs")
ax.set_ylabel("BCE Loss")
ax.grid(linestyle="--")
ax.plot(time_, train_loss_history, color="blue", label="train loss")
ax.plot(time_, val_loss_history, color="red", label="val loss")
ax.legend(loc="best")
fig.savefig(os.path.join("figures", f"{save_title}_train_history.png"), dpi=300)
plt.close(fig)
def train(
model,
train_loader,
val_loader,
device,
save_title,
lr=0.001,
patience=3,
num_epochs=50,
verbose=True,
):
train_loss_history = []
val_loss_history = []
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
monitor = EarlyStopMonitor(patience)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.5, patience=patience
)
for epoch in range(1, num_epochs + 1):
model.train()
train_loss = run_epoch(model, train_loader, device, criterion, optimizer)
train_loss_history.append(train_loss)
model.eval()
with torch.no_grad():
val_loss = run_epoch(model, val_loader, device, criterion)
val_loss_history.append(val_loss)
if verbose:
tqdm.write(
f"Epoch [{epoch}/{num_epochs}], "
f"Train Loss: {train_loss:.4f}, "
f"Val Loss: {val_loss:.4f}"
)
scheduler.step(val_loss)
if monitor.step(val_loss):
break
if len(val_loss_history) == 1 or val_loss < val_loss_history[-2]:
torch.save(
model.state_dict(), os.path.join("checkpoints", f"{save_title}.pt"),
)
plot_train_history(train_loss_history, val_loss_history, save_title)
def run_epoch(model, data_loader, device, criterion, optimizer=None):
total_loss = 0
for inputs, labels in tqdm(data_loader, leave=False):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
if optimizer:
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
return total_loss / len(data_loader)
class EarlyStopMonitor:
def __init__(self, patience, mode="min"):
assert mode in {"min", "max"}, "`mode` must be one of 'min' or 'max'"
self.log = []
self.mode = mode
self.count = 0
self.patience = patience
def step(self, metric):
if not self.log:
self.log.append(metric)
return False
flag = metric > self.log[-1]
if flag == (self.mode == "min"):
self.count += 1
else:
self.count = 0
self.log.append(metric)
return self.count > self.patience
|
Harkor421/mono-rnn
|
scraper/make_label.py
|
<filename>scraper/make_label.py<gh_stars>10-100
import csv
import os
def main():
dir = "raw"
with open(os.path.join(dir, "labels.csv"), mode="w") as f:
writer = csv.writer(f, delimiter=",")
for file_name in os.listdir(dir):
is_malware = len(file_name) > 31
writer.writerow([file_name, is_malware])
if __name__ == "__main__":
main()
|
Harkor421/mono-rnn
|
models.py
|
import torch
from torch import nn
from torch.nn import functional as F
class MalConvBase(nn.Module):
def __init__(self, embed_dim, max_len, out_channels, window_size, dropout=0.5):
super(MalConvBase, self).__init__()
self.embed = nn.Embedding(257, embed_dim)
self.dropout = nn.Dropout(dropout)
self.conv = nn.Conv1d(
in_channels=embed_dim,
out_channels=out_channels * 2,
kernel_size=window_size,
stride=window_size,
)
self.fc = nn.Linear(out_channels, 1)
def forward(self, x):
embedding = self.dropout(self.embed(x))
conv_in = embedding.permute(0, 2, 1)
conv_out = self.conv(conv_in)
glu_out = F.glu(conv_out, dim=1)
values, _ = glu_out.max(dim=-1)
output = self.fc(values).squeeze(1)
return output
class MalConvPlus(nn.Module):
def __init__(self, embed_dim, max_len, out_channels, window_size, dropout=0.5):
super(MalConvPlus, self).__init__()
self.tok_embed = nn.Embedding(257, embed_dim)
self.pos_embed = nn.Embedding(max_len, embed_dim)
self.dropout = nn.Dropout(dropout)
self.conv = nn.Conv1d(
in_channels=embed_dim,
out_channels=out_channels * 2,
kernel_size=window_size,
stride=window_size,
)
self.fc = nn.Linear(out_channels, 1)
def forward(self, x):
batch_size, seq_len = x.size(0), x.size(1)
tok_embedding = self.tok_embed(x)
pos = torch.arange(seq_len).unsqueeze(0).repeat(batch_size, 1).to(x.device)
pos_embedding = self.pos_embed(pos)
embedding = self.dropout(tok_embedding + pos_embedding)
conv_in = embedding.permute(0, 2, 1)
conv_out = self.conv(conv_in)
glu_out = F.glu(conv_out, dim=1)
values, _ = glu_out.max(dim=-1)
output = self.fc(values).squeeze(1)
return output
class RCNN(nn.Module):
def __init__(
self,
embed_dim,
out_channels,
window_size,
module,
hidden_size,
num_layers,
bidirectional,
residual,
dropout=0.5,
):
super(RCNN, self).__init__()
assert module.__name__ in {
"RNN",
"GRU",
"LSTM",
}, "`module` must be a `torch.nn` recurrent layer"
self.residual = residual
self.embed = nn.Embedding(257, embed_dim)
self.conv = nn.Conv1d(
in_channels=embed_dim,
out_channels=out_channels,
kernel_size=window_size,
stride=window_size,
)
self.rnn = module(
input_size=out_channels,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
)
self.dropout = nn.Dropout(dropout)
rnn_out_size = (int(bidirectional) + 1) * hidden_size
if residual:
self.fc = nn.Linear(out_channels + rnn_out_size, 1)
else:
self.fc = nn.Linear(rnn_out_size, 1)
def forward(self, x):
embedding = self.dropout(self.embed(x))
conv_in = embedding.permute(0, 2, 1)
conv_out = self.conv(conv_in)
if self.residual:
values, _ = conv_out.max(dim=-1)
conv_out = conv_out.permute(2, 0, 1)
rnn_out, _ = self.rnn(conv_out)
fc_in = rnn_out[-1]
if self.residual:
fc_in = torch.cat((fc_in, values), dim=-1)
output = self.fc(fc_in).squeeze(1)
return output
class AttentionRCNN(nn.Module):
def __init__(
self,
embed_dim,
out_channels,
window_size,
module,
hidden_size,
num_layers,
bidirectional,
attn_size,
residual,
dropout=0.5,
):
super(AttentionRCNN, self).__init__()
assert module.__name__ in {
"RNN",
"GRU",
"LSTM",
}, "`module` must be a `torch.nn` recurrent layer"
self.residual = residual
self.embed = nn.Embedding(257, embed_dim)
self.conv = nn.Conv1d(
in_channels=embed_dim,
out_channels=out_channels,
kernel_size=window_size,
stride=window_size,
)
self.rnn = module(
input_size=out_channels,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
)
rnn_out_size = (int(bidirectional) + 1) * hidden_size
self.local2attn = nn.Linear(rnn_out_size, attn_size)
self.global2attn = nn.Linear(rnn_out_size, attn_size, bias=False)
self.attn_scale = nn.Parameter(
nn.init.kaiming_uniform_(torch.empty(attn_size, 1))
)
self.dropout = nn.Dropout(dropout)
if residual:
self.fc = nn.Linear(out_channels + rnn_out_size, 1)
else:
self.fc = nn.Linear(rnn_out_size, 1)
def forward(self, x):
embedding = self.dropout(self.embed(x))
conv_in = embedding.permute(0, 2, 1)
conv_out = self.conv(conv_in)
if self.residual:
values, _ = conv_out.max(dim=-1)
conv_out = conv_out.permute(2, 0, 1)
rnn_out, _ = self.rnn(conv_out)
global_rnn_out = rnn_out.mean(dim=0)
attention = torch.tanh(
self.local2attn(rnn_out) + self.global2attn(global_rnn_out)
).permute(1, 0, 2)
alpha = F.softmax(attention.matmul(self.attn_scale), dim=-1)
rnn_out = rnn_out.permute(1, 0, 2)
fc_in = (alpha * rnn_out).sum(dim=1)
if self.residual:
fc_in = torch.cat((fc_in, values), dim=-1)
output = self.fc(fc_in).squeeze(1)
return output
|
Harkor421/mono-rnn
|
scraper/malshare.py
|
import argparse
import os
import random
import zipfile
import requests
from bs4 import BeautifulSoup
def construct_href(link):
root, rest = link.split("sample")
action, hash_ = rest.split("detail")
return f"{root}sampleshare{action}getfile{hash_}"
def download(session, href, save_dir):
source = session.get(href, allow_redirects=True)
try:
with ZipFile(BytesIO(source.content)) as f:
f.extractall(path=save_dir, pwd=b"infected")
except BadZipFile:
with open(os.path.join(save_dir, f"{index}"), "w+b") as f:
f.write(source.content)
def main(args):
try:
assert os.path.isdir(args.save_dir)
except AssertionError:
os.mkdir(args.save_dir)
with requests.Session() as session:
credentials = {"api_key": args.api_key or os.getenv("api_key")}
response = session.post("https://malshare.com", credentials)
assert response.status_code == 302
html = session.get("https://malshare.com/search.php?query=YRP/IsPE32").text
soup = BeautifulSoup(html, "html.parser")
tds = soup.find_all("td", {"class": "hash_font sorting_1"})
indices = random.sample(range(len(tds)), args.num_files)
for index in indices:
link = tds[index].find("a")["href"]
href = construct_href(link)
download(session, href, args.save_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Download malware")
parser.add_argument(
"--num_files", type=int, default=1000, help="number of malware to download"
)
parser.add_argument(
"--save_dir", type=str, default="raw/malshare", help="directory to save malware"
)
parser.add_argument("--api_key", type=str, default="", help="malshare api key")
args = parser.parse_args()
main(args)
|
violetguos/intro_machine_learning
|
a1/q1.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 26 13:16:31 2017
@author: vikuo
"""
from sklearn import datasets
import matplotlib.pyplot as plt
import numpy as np
import random
def load_data():
boston = datasets.load_boston()
X = boston.data
#506 row, 13 columns
#print X.shape[0]
y = boston.target #y is the price
#print y.shape[0]
features = boston.feature_names
return X,y,features
def visualize(X, y, features):
plt.figure(figsize=(20, 5))
feature_count = X.shape[1] #13 eatures
# i: index
for i in range(feature_count):
plt.subplot(3, 5, i + 1)
#TODO: Plot feature i against y
plt.plot(X[:,i], y, '.')
plt.ylabel("Price")
plt.xlabel(features[i])
plt.tight_layout()
plt.show()
def split_data_8020(X, Y):
#select columns from x, y
xrows = X.shape[1]
chosenSamples = random.sample(range(len(Y)),
len(Y)//5)
t_len = len(Y) - len(chosenSamples)
sample_len = len(chosenSamples)
trainingSetX = np.zeros((t_len, xrows))
testSetX = np.zeros((sample_len, xrows) )
trainingSetY = np.zeros(t_len)
testSetY = np.zeros(sample_len)
ii, ij = 0,0
#need whole numbers to divide, use the operator //
for i in range(len(Y)):
#implement insert xy sample tuple for now
if i not in chosenSamples:
#what = X[i,]
#print "wnat", what
trainingSetX[ii,] = X[i,]
#print "tslit, train X, ", len(trainingSetX)
trainingSetY[ii]= Y[i]#ROW of X
#print "thwaraw " ,X[i,]
ii +=1
elif i in chosenSamples:
testSetX[ij,]=X[i,]
testSetY[ij]=Y[i]
ij +=1
#print trainingSetX #.shape[0], testSetX.shape[0], trainingSetY, testSetY
return trainingSetX, testSetX,\
trainingSetY, testSetY
#def tabulate_weight(w, x):
# for i in range(len(x)):
# for a, b in zip(w,x[i]):
# print "{}\t{}".format(repr(a),repr(b))
def fit_regression(X,Y):
#TODO: implement linear regression
# Remember to use np.linalg.solve instead of inverting!
xtx = np.dot(np.transpose(X), X)
xty = np.dot(np.transpose(X), Y)
w = np.linalg.solve(xtx, xty)
#print type(w)
#Wtabulate_weight(w, X)
return w #w_1
def main():
# Load the data
X, y, features = load_data()
xrows = X.shape[1]
print("Features: {}".format(features))
# Visualize the features
visualize(X, y, features)
#TODO: Split data into train and test
X = np.concatenate((np.ones((506,1)),X),axis=1) #add constant one feature - no bias needed
# Fit regression model
training_x, testing_x, training_y, testing_y = split_data_8020(X, y)
#print "train x ", training_x.shape[1] #shape 0 is 1, shape 1 is 405
#print "train y ", training_y.shape[0] #shape 0 is 405
#print "test x ", test_x.shape[1] #shape 0 is 1, shape 1 is 101
#print "test y ", test_y.shape[0] #shape 0 is 101
w = fit_regression(training_x, training_y)
# Compute fitted values, MSE, etc.
y_hat = np.dot(testing_x, w)
#print "y_hat ", y_hat
#print "y ", y
#Mm
mse = ((y_hat - testing_y) **2).mean()
#print "train mse", train_mse
print "mse", mse
#another two error measures:
#mean norm, mean root
mnorm = sum(np.absolute(y_hat - testing_y))
root_mean_err = np.sqrt(((sum(y_hat - testing_y)) **2) / (len(y_hat)))
#TO DO
print "----Two extra error measurements:---"
print "normal error", mnorm
print "mean square root" , root_mean_err
#feacture selection
print "-----feature ranking----"
for i in range(len(w)):
print features[i], w[i] #"feature", elem
if __name__ == "__main__":
main()
|
violetguos/intro_machine_learning
|
a2/code/q2_0.py
|
'''
Question 2.0 Skeleton Code
Here you should load the data and plot
the means for each of the digit classes.
'''
import data
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib.pyplot.imshow
def mean_i_digit(i_digits):
'''returns the mean for one digit,
avg across 700 samples for 64 pixels
i_digit is ndarray
'''
i_mean = np.zeros(64)
i_sum = np.sum(i_digits, axis = 0)
for i in range(0,64):
i_mean[i]=i_sum[i]/700.0
#print i_mean
return i_mean
def plot_means(train_data, train_labels):
means = []
for i in range(0, 10):
i_mean_matrix = np.zeros((8,8))
i_digits = data.get_digits_by_label(train_data, train_labels, i)
# Compute mean of class i
#TODO: compute 64 by 64 matrix mean??
#700 row, 64 columns for each difit
i_mean = mean_i_digit(i_digits) #imean is 64
#tes_list =i_mean.tolist()
#print len(tes_list)
i_mean_matrix = np.reshape(i_mean, (8,8))
means.append(i_mean_matrix)
#print means
# Plot all means on same axis
all_concat = np.concatenate(means,1)
plt.imshow(all_concat, cmap='gray')
plt.show()
if __name__ == '__main__':
train_data, train_labels, _, _ = data.load_all_data_from_zip('a2digits.zip', 'data')
plot_means(train_data, train_labels)
|
violetguos/intro_machine_learning
|
a2/code/q2_2.py
|
'''
Question 2.2 Skeleton Code
Here you should implement and evaluate the Conditional Gaussian classifier.
'''
import data
import numpy as np
# Import pyplot - plt.imshow is useful!
import matplotlib.pyplot as plt
import json
def mean_i_digit(i_digits):
'''returns the mean for one digit,
avg across 700 samples for 64 pixels
i_digit is ndarray
'''
i_mean = np.zeros(64)
i_sum = np.sum(i_digits, axis = 0)
for i in range(0,64):
i_mean[i]=i_sum[i]/700.0
#print i_mean
return i_mean
def compute_mean_mles(train_data, train_labels):
'''
Compute the mean estimate for each digit class
Should return a numpy array of size (10,64)
The ith row will correspond to the mean estimate for digit class i
train_data: 7000 by 64
train_labels: 7000
'''
means = np.zeros((10, 64))
for i in range(0, 10):
i_mean_matrix = np.zeros((8,8))
i_digits = data.get_digits_by_label(train_data, train_labels, i)
means[i] = mean_i_digit(i_digits) #imean is 64
return means
def cov_vector(v1, v2):
'''
calcs sqrt(v1 - mean)/ 2
return a component of covar
'''
e_v1 = np.mean(v1)
e_v2 = np.mean(v2)
temp_sum = 0
for i in range(len(v1)):
#print "vqeoiajsiof", v1[i]
temp = (v1[i])*(v2[i]) - e_v1*e_v2
temp_sum +=temp
temp_sum = temp_sum/(1.0 * len(v1) - 1)
return (temp_sum)
def compute_sigma_mles(train_data, train_labels):
'''
Compute the covariance estimate for each digit class
Should return a three dimensional numpy array of shape (10, 64, 64)
consisting of a covariance matrix for each digit class
'''
covariances = np.zeros((10, 64, 64))
# Compute covariances
test_cov = np.zeros((10, 64, 64))
for i in range(0, 10):
i_digits = data.get_digits_by_label(train_data, train_labels, i)
#print "idigit", i_digits[:,i].shape #i digits 700 by 64
#construct 64 by 64
for ii in range(0, 64):
for jj in range(0, 64):
#print "-------------covar----------"
#*this is verified with np cov
i_cov_column = cov_vector(i_digits[:,ii], i_digits[:,jj])
#print i_cov_column
covariances[i][ii][jj] = i_cov_column
iden_matrix = 0.01*np.identity(64)
np.add(iden_matrix, covariances[i])
return covariances
def plot_cov_diagonal(covariances):
# Plot the diagonal of each covariance matrix side by side
cov_diag_all = []
for i in range(10):
i_mean_matrix = np.zeros((8,8))
cov_diag = np.diag(covariances[i])
log_cov_diag = np.log(cov_diag)
#print "-------------covdiag------"
#print cov_diag.shape
i_mean_matrix = np.reshape(log_cov_diag, (8,8))
cov_diag_all.append(i_mean_matrix)
all_concat = np.concatenate(cov_diag_all,1)
plt.imshow(all_concat, cmap='gray')
plt.show()
def generative_likelihood(digits, means, covariances):
'''
Compute the generative log-likelihood:
log p(x|y,mu,Sigma)
Should return an n x 10 numpy array
'''
n = digits.shape[0]
p_x = np.zeros((n,10))
for i in range(0, n):
for j in range(0, 10):
x = digits
pi_term = (2* np.pi) #-10/2
x_diff_miu = np.subtract(x[i], means[j])
inv_term = np.linalg.inv(covariances[j])
det_term = np.linalg.det(covariances[j])
x_miu_x_sigmak = np.dot(np.transpose(x_diff_miu), inv_term) #MATMUL
#print x_miu_x_sigmak
exp_term = np.dot(x_miu_x_sigmak, x_diff_miu)
p_x[i][j] = -(64 / 2) * np.log(pi_term)\
-0.5*np.log(det_term)\
-0.5*(exp_term)
#np.log(inv_det_root)
#+ np.log(0.1)
#p_x = p_x.T
#print "----------in generative helper"
#print np.exp(p_x)
return p_x
def conditional_likelihood(digits, means, covariances):
'''
Compute the conditional likelihood:
log p(y|x, mu, Sigma)
This should be a numpy array of shape (n, 10)
Where n is the number of datapoints and 10 corresponds to each digit class
'''
n = len(digits)
log_pxy_gen =generative_likelihood(digits, means, covariances)
for i in range(0, n):
#print "=============in cond likelihood"
#print log_pxy_gen[i].shape
p_x_y_= np.exp(log_pxy_gen[i])
p_x_y_ = p_x_y_ * 0.1 #verfied dim 10
#print "=============in cond likelihood pxy shape"
#print p_x_y_.shape
#print "=============in cond likelihood"
#print "p_x_y", p_x_y_
p_x_y_sum = np.sum(p_x_y_)
log_pyx_cond = log_pxy_gen + np.log(0.1)- np.log(p_x_y_sum)
return log_pyx_cond
def avg_conditional_likelihood(digits, labels, means, covariances):
#(digits, labels, means, covariances):
'''
Compute the average conditional likelihood over the true class labels
AVG( log p(y_i|x_i, mu, Sigma) )
i.e. the average log likelihood that the model assigns to the correct class label
'''
cond_likelihood = conditional_likelihood(digits, means, covariances)
# Compute as described above and return
n = len(digits)
p_y = 0
avg_p_y = 0
for i in range(0,n):
#cond_label = avg_item.argmin() #most probable, prediction
cond_label = labels[i]
p_y += cond_likelihood[i][int(cond_label)]
avg_p_y = p_y / n
print "-------------in avg cond likelihood--------"
print avg_p_y
return avg_p_y
def classify_data(digits, means, covariances):
'''
Classify new points by taking the most likely posterior class
'''
cond_likelihood = conditional_likelihood(digits, means, covariances)
cond_exp = np.exp(cond_likelihood)
#print "------cond likelihood----- ", cond_likelihood[0]
n = digits.shape[0]
max_class = []
# Compute and return the most likely class
for class_i in cond_exp:
#go through all n digits, pick the max out of 10
#= cond_exp[i,:] #ith row, has 10 digits
max_class.append(np.argmax(class_i)) #or is it argmax
return max_class
def classify_accuracy(predict_label, real_label, n):
accurate_class = 0
for i in range(0,n):
if predict_label[i] == real_label[i]:
accurate_class += 1
print "-------classify accuracy", (1.0 * accurate_class / n)
def main():
train_data, train_labels, test_data, test_labels = data.load_all_data('data')
# Fit the model
means = compute_mean_mles(train_data, train_labels)
covariances = compute_sigma_mles(train_data, train_labels)
print "============Q2.2 Part1 plot of log of Sigma_k diagonal"
plot_cov_diagonal(covariances)
print "============Q2.2 part2 average log likelihood========"
print "===========Train data average log likelihood========="
avg_train = avg_conditional_likelihood(train_data, train_labels, means, covariances)
print "===========Test data average log likelihood ========"
avg_test = avg_conditional_likelihood(test_data, test_labels, means, covariances)
#the final code for classify but need to get everything work now
print "=============Q2.2 part3 prediction and accuracy of each predication======"
print "=============Train data prediction and accuracy========"
train_predict = classify_data(train_data, means, covariances)
n_dim_train = train_labels.shape[0]
classify_accuracy(train_predict, train_labels, n_dim_train)
print "=============Test data prediction and accuracy========="
test_predict = classify_data(test_data, means, covariances)
n_dim_test = test_labels.shape[0]
classify_accuracy(test_predict, test_labels,n_dim_test )
if __name__ == '__main__':
main()
|
violetguos/intro_machine_learning
|
a2/code/q2_1.py
|
<gh_stars>0
'''
Question 2.1 Skeleton Code
Here you should implement and evaluate the k-NN classifier.
'''
import data
import numpy as np
# Import pyplot - plt.imshow is useful!
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
class KNearestNeighbor(object):
'''
K Nearest Neighbor classifier
'''
def __init__(self, train_data, train_labels):
self.train_data = train_data
self.train_norm = (self.train_data**2).sum(axis=1).reshape(-1,1)
self.train_labels = train_labels
def l2_distance(self, test_point):
'''
Compute L2 distance between test point and each training point
Input: test_point is a 1d numpy array
Output: dist is a numpy array containing the distances between the test point and each training point
'''
# Process test point shape
test_point = np.squeeze(test_point)
if test_point.ndim == 1:
test_point = test_point.reshape(1, -1)
assert test_point.shape[1] == self.train_data.shape[1]
# Compute squared distance
#train_norm = (self.train_data**2).sum(axis=1).reshape(-1,1)
test_norm = (test_point**2).sum(axis=1).reshape(1,-1)
dist = self.train_norm + test_norm - 2*self.train_data.dot(test_point.transpose())
return np.squeeze(dist)
def query_knn(self, test_point, k):
'''
Query a single test point using the k-NN algorithm
You should return the digit label provided by the algorithm
'''
labels = [float(i) for i in range(0,10)]
distances = np.array(self.l2_distance(test_point))
k_idx = np.array((distances.argsort()[:k]))
label_count = np.zeros(10)
#index is the label, number is # of instance in k Neighbours
for j in k_idx:
for i in range(len(labels)):
#print "train label j", self.train_labels[j], j
if self.train_labels[j] == labels[i]:
label_count[i] +=1
#print "label count", label_count
#if label_count.max() > k//2: #randomly pick the frist occuranace
max_label_idx = label_count.argmax()
#print "mac label", max_label_idx
digit = float(max_label_idx)
return digit
def cross_validation(train_data, train_labels, k_range=np.arange(1,16)):
all_k = []
for k in k_range:
# Loop over folds
# Evaluate k-NN
# ...
kf = KFold(n_splits=10)
k_train_accuracy = []
for train_index, test_index in kf.split(train_data):
x_train, x_test = train_data[train_index], train_data[test_index]
y_train, y_test = train_labels[train_index], train_labels[test_index]
knn_new = KNearestNeighbor(x_train, y_train)
k_train_accuracy.append(classification_accuracy(knn_new ,k, x_test, y_test))
k_accuracy = (1.0 *sum(k_train_accuracy)) / (1.0 *len(k_train_accuracy))
all_k.append(k_accuracy)
print "========== K & average across fold"
for knum in range(0, 15):
print (knum + 1)," & ", all_k[knum]
all_k = np.array(all_k)
optimal_k = all_k.argmax()
return optimal_k
def classification_accuracy(knn, k, eval_data, eval_labels):
'''
Evaluate the classification accuracy of knn on the given 'eval_data'
using the labels
'''
knn_labels = []
for col in eval_data:
#col is 64 vector
knn_labels.append((knn.query_knn(col, k)))
#print "knn_labels clasojsaires", type(knn_labels)
cnt_total = len(eval_labels)
cnt_accurate = 0
for j in range(len(eval_labels)):
if eval_labels[j] == knn_labels[j]:
cnt_accurate +=1
return float(cnt_accurate) / float(cnt_total)
def main():
train_data, train_labels, test_data, test_labels = data.load_all_data('data')
#print "lenlnelne",len(train_labels) = 7000, labels are floats
knn = KNearestNeighbor(train_data, train_labels)
#===========Q1,2--------#
#k_1_accuracy = classification_accuracy(knn, 1, test_data, test_labels)
#k_15_accuracy = classification_accuracy(knn, 15, train_data, train_labels)
print "=======K, traning accuracy, test accuracy===="
for knum in range(1, 16):
test_acc = classification_accuracy(knn, knum, test_data, test_labels)
train_acc = classification_accuracy(knn, knum, train_data, train_labels)
print knum," & ", train_acc, " & ", test_acc
#print "k 1", k_1_accuracy
#print "k 15.", k_15_accuracy
#-----------------Q3---------------#
opti_k_index = cross_validation(train_data, train_labels)
#k1_accuracy is the highest
k_1_test_accuracy = classification_accuracy(knn, 1, test_data, test_labels)
k_1_train_accuracy = classification_accuracy(knn, 1, train_data, train_labels)
print "k_1_test_accuracy", k_1_test_accuracy
print "k_1_train_accuracy", k_1_train_accuracy
if __name__ == '__main__':
main()
|
violetguos/intro_machine_learning
|
a1/q3.py
|
import numpy as np
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
BATCHES = 50
#####################
#This is a program that
#
#
class BatchSampler(object):
'''
A (very) simple wrapper to randomly sample batches without replacement.
You shouldn't need to touch this.
'''
def __init__(self, data, targets, batch_size):
self.num_points = data.shape[0]
self.features = data.shape[1]
self.batch_size = batch_size
self.data = data
self.targets = targets
self.indices = np.arange(self.num_points)
def random_batch_indices(self, m=None):
'''
Get random batch indices without replacement from the dataset.
If m is given the batch will be of size m. Otherwise will default to the class initialized value.
'''
if m is None:
indices = np.random.choice(self.indices, self.batch_size, replace=False)
else:
indices = np.random.choice(self.indices, m, replace=False)
return indices
def get_batch(self, m=None):
'''
Get a random batch without replacement from the dataset.
If m is given the batch will be of size m. Otherwise will default to the class initialized value.
'''
indices = self.random_batch_indices(m)
X_batch = np.take(self.data, indices, 0)
y_batch = self.targets[indices]
return X_batch, y_batch
def load_data_and_init_params():
'''
Load the Boston houses dataset and randomly initialise linear regression weights.
'''
print('------ Loading Boston Houses Dataset ------')
X, y = load_boston(True)
features = X.shape[1]
# Initialize w
w = np.random.randn(features) #w is ndarray
print("Loaded...")
print("Total data points: {0}\nFeature count: {1}".format(X.shape[0], X.shape[1]))
print("Random parameters, w: {0}".format(w))
print('-------------------------------------------\n\n\n')
return X, y, w
def cosine_similarity(vec1, vec2):
'''
Compute the cosine similarity (cos theta) between two vectors.
'''
dot = np.dot(vec1, vec2)
sum1 = np.sqrt(np.dot(vec1, vec1))
sum2 = np.sqrt(np.dot(vec2, vec2))
return dot / (sum1 * sum2)
#TODO: implement linear regression gradient
def lin_reg_gradient(X, y, w):
'''
Compute gradient of linear regression model parameterized by w
'''
xTrans = np.transpose(X)
xtx = np.dot(xTrans, X)
xtxw =np.dot(xtx, w)
xty = np.dot(xTrans, y)
#grad = np.dot(xtxw,xty)
grad = xtxw - xty
return (2*grad)
def var_grad (x):
len_x = np.size(x)
sum_x = 0
for i in range(len_x):
sum_x += x[i]
avg_x = sum_x /float(len_x)
sum_x2 = 0
for i in range(len(x)):
sum_x2+=(x[i] - avg_x)**2
var_x = sum_x2 / float(len_x)
return var_x
def square_metric(grad, grad_true):
diff = grad - grad_true
dist = (np.array(diff)**2).mean()
return np.sqrt(dist)
#print "average" , (reduce(lambda x, y: x + y, w_j) / len(w_j))
def grad_500(x, y, w, m, k, sampler):
grad_sum =0
batch_sum = 0
batch_avg =0
for i in range(0,k):
X_b, y_b = sampler.get_batch(m)
#print len(X_b)
#for j in range(m):
batch_grad = lin_reg_gradient(X_b, y_b,w)
batch_sum += batch_grad
batch_avg = batch_sum/m
grad_sum +=batch_avg
#print "batch_grad ", batch_grad
#print "grad", grad_sum
b_grad = grad_sum / k
#print "b_grad", b_grad
return b_grad
def grad_var500(x, y, w, m, k, sampler):
grad_sum =0
batch_sum = 0
batch_sum_list = []
batch_avg =0
var_list = []
grad_2d = []
var_j = 0
for i in range(0, k): #500 iters
X_b, y_b = sampler.get_batch(m)
#print len(X_b)
#for j in range(m):
batch_grad = lin_reg_gradient(X_b, y_b,w) #have 13
num = int(np.shape(batch_grad)[0])
grad_2d.append(batch_grad)
#batch_sum = np.sum(batch_grad)
#batch_sum_list.append(batch_sum)
#batch_avg = batch_sum/m
for j in range(0, num):
for i in range(0, k):
batch_sum += grad_2d[i][j]
batch_avg = batch_sum/float(k)
for i in range(0,k):
var_j += (grad_2d[i][j] - batch_avg)*(grad_2d[i][j] - batch_avg)
var_ret = var_j / float(k)
var_list.append(var_ret)
#print "batch_grad ", batch_grad
#print "grad", grad_sum
#b_grad = grad_sum / k
#print "b_grad", b_grad
return np.array(var_list)
def grad_real(x, y, w):
real_grad = lin_reg_gradient(x, y, w)
#print "real grad ", real_grad
#grad_sum +=batch_grad
#b_grad = grad_sum / k
#print batch_grad
return real_grad
def plot_log(m, sigma):
print "----plotint var---"
for i in range(13): #np.size(sigma):
print np.shape(sigma)
plt.plot(np.log(m), np.log(sigma[i]))
#plt.yscale('log')
plt.show()
def main():
# Load data and randomltq3_y initialise weights
X, y, w = load_data_and_init_params()
# Create a batch sampler to generate random batches from data
batch_sampler = BatchSampler(X, y, BATCHES)
# Example usage
#for K = 500
k = 500
m = 50
batch_grad = grad_500(X, y, w, m, k, batch_sampler)
print "batch grad", batch_grad
#print "final avg,", batch_grad
true_grad = grad_real(X, y, w)
#compute diff
diff_sq = square_metric(batch_grad, true_grad)
diff_cos = cosine_similarity(batch_grad, true_grad)
print "diff_sq", diff_sq
print "diff cos", diff_cos
#varience
b_m_grad = []
sigma = []
for m1 in range(1,401):
#X_b, y_b = batch_sampler.get_batch(m1)
b_m = grad_var500(X, y, w, m1, k, batch_sampler)
#print "sig per", sigma_per
#sigma_per = var_grad(b_m)
sigma.append(b_m)
print ("sigma")
sigma = np.array(sigma)
sigma_reshape = np.transpose(sigma)
sigma_reshape = np.flip(sigma_reshape, 1)
#print ("sigma len ", len(sigma)) 400
print ("sigma len 1", len(sigma_reshape[0])) #13
m_plot= np.arange(1,401)
print type(m_plot)
plot_log(m_plot, sigma_reshape)
#reinit m for plotting
#print "sigma", sigma
#square Diff = 79165708.6263
#cosine similarity, diif_cos = 0.999995845102
if __name__ == '__main__':
main()
|
violetguos/intro_machine_learning
|
a3/q2.py
|
import numpy as np
from sklearn.datasets import fetch_mldata
import matplotlib.pyplot as plt
import math
np.random.seed(1847)
class BatchSampler(object):
'''
A (very) simple wrapper to randomly sample batches without replacement.
You shouldn't need to touch this.
'''
def __init__(self, data, targets, batch_size):
self.num_points = data.shape[0]
self.features = data.shape[1]
self.batch_size = batch_size
self.data = data
self.targets = targets
self.indices = np.arange(self.num_points)
def random_batch_indices(self, m=None):
'''
Get random batch indices without replacement from the dataset.
If m is given the batch will be of size m. Otherwise will default to the class initialized value.
'''
if m is None:
indices = np.random.choice(self.indices, self.batch_size, replace=False)
else:
indices = np.random.choice(self.indices, m, replace=False)
return indices
def get_batch(self, m=None):
'''
Get a random batch without replacement from the dataset.
If m is given the batch will be of size m. Otherwise will default to the class initialized value.
'''
indices = self.random_batch_indices(m)
X_batch = np.take(self.data, indices, 0)
y_batch = self.targets[indices]
return X_batch, y_batch
class GDOptimizer(object):
'''
A gradient descent optimizer with momentum
lr - learning rate
beta - momentum hyperparameter
'''
def __init__(self, lr, beta=0.0, vel = 0.0):
self.lr = lr
self.beta = beta
self.vel = vel
def update_params(self, params, grad):
# Update parameters using GD with momentum and return
# the updated parameters
#print "val", self.vel
v_t_plus = self.beta * self.vel
#print "vt", v_t_plus
v_t_plus = v_t_plus + grad
params = params - (self.lr * v_t_plus)
self.vel = v_t_plus
return params
class SVM(object):
'''
A Support Vector Machine
'''
def __init__(self, c, feature_count):
self.c = c
self.w = np.random.normal(0.0, 0.1, feature_count)
self.b = 0
#self.feature_count = feature_count
def hinge_loss(self, X, y):
'''
Compute the hinge-loss for input data X (shape (n, m)) with target y (shape (n,)).
Returns a length-n vector containing the hinge-loss per data point.
'''
# Implement hinge loss
#print self.w.shape (784,)
#print X.shape (100, 784)
#print y.shape (100)
wt = np.transpose(self.w)
#print self.w
wtx = np.dot(X, self.w)
wtx_plus_c = wtx #shape (100,)
#print wtx_plus_c.shape
n = X.shape[0]
l_hinge = np.zeros(n)
for i in range(n):
l_hinge[i] = max(y[i]*(1- wtx_plus_c[i]), 0)
return l_hinge
def grad(self, X, y):
'''
Compute the gradient of the SVM objective for input data X (shape (n, m))
with target y (shape (n,))
Returns the gradient with respect to the SVM parameters (shape (m,)).
'''
# Compute (sub-)gradient of SVM objective
n, m = X.shape
xt= np.transpose(X)
sum_vec = np.dot(xt, y)
grad = self.w
c_over_n = self.c * 1.0 / n
c_over_n_arr = [c_over_n * 1.0] * m
c_over_n_arr[0] = 1 #grad[0]
c_over_n_arr = np.asarray(c_over_n_arr)
reg_vec = np.multiply(sum_vec, c_over_n_arr)
grad = grad - reg_vec #/ n
return (grad)
def classify(self, X):
'''
Classify new input data matrix (shape (n,m)).
Returns the predicted class labels (shape (n,))
'''
# Classify points as +1 or -1
#w is shape m
n, m = X.shape #(784, 2757)
xt = np.transpose(X)
#print xt
xtw = np.dot(X, self.w)
#print("######self b ", self.b)
y = xtw +self.b
#print "y_ classify ", y
#print y[0]
res = np.zeros(n)
for i in range(n):
if y[i] > 0:
res[i] = 1.0
else:
res[i] = -1.0
return res
def load_data():
'''
Load MNIST data (4 and 9 only) and split into train and test
'''
mnist = fetch_mldata('MNIST original', data_home='./data')
label_4 = (mnist.target == 4)
label_9 = (mnist.target == 9)
data_4, targets_4 = mnist.data[label_4], np.ones(np.sum(label_4))
data_9, targets_9 = mnist.data[label_9], -np.ones(np.sum(label_9))
data = np.concatenate([data_4, data_9], 0)
data = data / 255.0
targets = np.concatenate([targets_4, targets_9], 0)
permuted = np.random.permutation(data.shape[0])
train_size = int(np.floor(data.shape[0] * 0.8))
train_data, train_targets = data[permuted[:train_size]], targets[permuted[:train_size]]
test_data, test_targets = data[permuted[train_size:]], targets[permuted[train_size:]]
print("Data Loaded")
print("Train size: {}".format(train_size))
print("Test size: {}".format(data.shape[0] - train_size))
print("-------------------------------")
return train_data, train_targets, test_data, test_targets
def optimize_test_function(optimizer, w_init=10.0, steps=200):
'''
Optimize the simple quadratic test function and return the parameter history.
'''
def func(x):
return 0.01 * x * x
def func_grad(x):
return 0.02 * x
w = w_init
w_history = [w_init]
for i in range(steps):
# Optimize and update the history
grad = func_grad(w)
w = optimizer.update_params(w, grad)
w_history.append(w)
return w_history
def optimize_svm(train_data, train_targets, penalty, optimizer, batchsize, iters):
'''
Optimize the SVM with the given hyperparameters. Return the trained SVM.
SVM weights can be updated using the attribute 'w'. i.e. 'svm.w = updated_weights'
penalty is penalty = C in the equation
'''
#sample, each penalty
n, m = train_data.shape
svm = SVM(penalty, m)
w_init = np.sum(svm.w)
#print w_init
batch_sampler = BatchSampler(train_data, train_targets, batchsize)
for i in range(iters):
grad_estimate = 0
for j in range(int(n/batchsize)):
batch_train, batch_targets = batch_sampler.get_batch()
svm_grad = svm.grad(batch_train, batch_targets)
grad_estimate += (optimizer.update_params(svm.w, svm_grad))# + hinge_loss #+ h_loss))
svm.w = grad_estimate/(n/batchsize)
svm.b = -penalty * 1.0/m * np.sum(train_targets)#(batch_targets)
return svm
def plot_w(w):
i_mean_matrix = np.reshape(w, (28,28))
plt.imshow(i_mean_matrix, cmap='gray')
plt.show()
#plt.plot(w)
#plt.show()
def accuracy_func(res, targets):
'''
simple accuracy calculation
'''
n = len(res) #targets.shape[0]
accurate = 0
for i in range(n):
#print i, j
if res[i] == targets[i]:
#print i, j
accurate = accurate + 1
return 1.0*(accurate)/n
def hinge_avg(hinge):
return np.mean(hinge)
def q2_3_ans1(accu):
print "test accuracy ", accu
def q2_3_ans2(accu):
print "train accuracy ", accu
if __name__ == '__main__':
gd1 = GDOptimizer(1, 0)
opt_test_1 = optimize_test_function(gd1)
gd2 = GDOptimizer(1, 0.9)
opt_test_2 = optimize_test_function(gd2)
print "========Q2.1 start =============="
print "=====opt test beta = 0===="
plt.plot(opt_test_1, label = 'beta = 0 ')
#plt.show()
print "======opt test beta = 0.9====="
plt.plot(opt_test_2, label = 'beta = 0.9')
plt.title("SGD test")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print "=======Q2.1 end ==============="
print
print "==========Q2.2 and 2.3 SVM ==========="
gd1 = GDOptimizer(0.05, 0, 0)
train_data, train_targets, test_data, test_targets = load_data()
#Add one to bias
n_train, m_train = train_data.shape
n_test, m_test = test_data.shape
#train_ones = np.ones((n_train, 1))
np.insert(train_data, 0,1, axis=1)
#test_ones = np.ones((n_test, 1))
np.insert(test_data, 0,1, axis = 1)
penalty = 1
res = optimize_svm(train_data, train_targets, penalty, gd1, 100, 500)
pred_train = res.classify(train_data)
pred_test = res.classify(test_data)
print "======= SVM , momentum = 0 ======="
#print "weight, ", res.w
print "svm hinge loss train ", hinge_avg(res.hinge_loss(train_data, train_targets))
print "svm hinge loss test ", hinge_avg(res.hinge_loss(test_data, test_targets))
q2_3_ans1(accuracy_func(pred_train, train_targets))
q2_3_ans2(accuracy_func(pred_test, test_targets))
print "plot W, momemtum = 0"
plot_w(res.w)
print "======= SVM, momentum = 0.1 ======="
gd2 = GDOptimizer(0.05, 0.1, 0)
res2 = optimize_svm(train_data, train_targets, penalty, gd2, 100, 500)
pred_test2 = res2.classify(test_data)
pred_train2 = res2.classify(train_data)
#print "weight with momentum ", res2.w
print "svm hinge loss train ", hinge_avg(res2.hinge_loss(train_data, train_targets))
print "svm hinge loss test ", hinge_avg(res2.hinge_loss(test_data, test_targets))
q2_3_ans1(accuracy_func(pred_train2, train_targets))
q2_3_ans2(accuracy_func(pred_test2, test_targets))
print "plot W, momemtum = 0.1"
plot_w(res2.w)
|
violetguos/intro_machine_learning
|
a2/code/q2_3.py
|
<reponame>violetguos/intro_machine_learning
'''
Question 2.3 Skeleton Code
Here you should implement and evaluate the Naive Bayes classifier.
'''
import data
import numpy as np
# Import pyplot - plt.imshow is useful!
import matplotlib.pyplot as plt
def binarize_data(pixel_values):
'''
Binarize the data by thresholding around 0.5
'''
return np.where(pixel_values > 0.5, 1.0, 0.0)
def compute_parameters(train_data, train_labels):
'''
Compute the eta MAP estimate/MLE with augmented data
You should return a numpy array of shape (10, 64)
where the ith row corresponds to the ith digit class.
'''
#make a hash table list, i is label, nc is total count
eta = np.zeros((10, 64))
nc = np.zeros((10, 64))
#for each class k, count the 1st pixel in 700 vectors that is one
#add the beta distribution
for i in range(0, 10):
i_digits = data.get_digits_by_label(train_data, train_labels, i)
for j in range(0, 700):
for k in range(0, 64):
if i_digits[j][k] == 1:
nc[i][k] +=1
#calculate beta(2,2)
for i in range(0, 10):
for j in range(0, 64):
eta[i][j] = 1.0*(nc[i][j] + 2 -1) / (700 +2 +2+ -2)
#print "nc_list", (nc)
#print eta
return eta
def plot_images(class_images):
'''
Plot each of the images corresponding to each class side by side in grayscale
'''
img_matrix = []
for i in range(10):
img_i = class_images[i]
i_matrix = np.zeros((8,8))
i_matrix = np.reshape(img_i, (8,8))
img_matrix.append(i_matrix)
all_concat = np.concatenate(img_matrix,1)
plt.imshow(all_concat, cmap='gray')
plt.show()
def generate_new_data(eta):
'''
Sample a new data point from your generative distribution p(x|y,theta) for
each value of y in the range 0...10
Plot these values
'''
generated_data = np.zeros((10, 64))
for i in range(0, 10):
for j in range(0, 64):
if eta[i][j] > 0.5:
b_j = 1
else:
b_j = 0
#generated_data[i][j] = pow(eta[i][j], b_j) *\
#pow((1-eta[i][j]),(1 -b_j))
generated_data[i][j] = b_j
plot_images(generated_data)
def generative_likelihood(bin_digits, eta):
'''
Compute the generative log-likelihood:
log p(x|y, eta)
Should return an n x 10 numpy array
'''
n = bin_digits.shape[0]
log_p_x = np.zeros((n, 10))
for i in range(0,n):
for j in range(0, 10):
w0c = 0
for k in range(0,64):
nkj = (eta[j][k]) **(bin_digits[i][k])
one_min_nkj = (1 -eta[j][k]) **(1 - bin_digits[i][k])
w0c += (np.log(nkj) + np.log(one_min_nkj))
log_p_x[i][j] = w0c
#print log_p_x
return log_p_x
def conditional_likelihood(bin_digits, eta):
'''
Compute the conditional likelihood:
log p(y|x, eta)
This should be a numpy array of shape (n, 10)
Where n is the number of datapoints and 10 corresponds to each digit class
'''
n = bin_digits.shape[0]
#print "n",n, " ", bin_digits.shape[1]
p_y_x= generative_likelihood(bin_digits, eta)
#P(y = c | x , theta) = 0.1 * p(x| y = c)
'''
for i in range(0, n):
for j in range(0, 10):
w0c = 0
wcj = 0
for k in range(0, 64):
wcj += bin_digits[i][k] * np.log((eta[j][k])/(1- eta[j][k]))
w0c += np.log(1- eta[j][k])
'''
bc = np.log(0.1)
#print "-------in cond likelihood"
#print "-----before add"
#print p_y_x
p_y_x += bc
#print p_y_x
return p_y_x
def avg_conditional_likelihood(bin_digits, labels, eta):
'''
Compute the average conditional likelihood over the true class labels
AVG( log p(y_i|x_i, eta) )
i.e. the average log likelihood that the model assigns to the correct class label
'''
cond_likelihood = conditional_likelihood(bin_digits, eta)
# Compute as described above and return
n = len(bin_digits)
p_y = 0
avg_p_y = 0
for i in range(0,n):
avg_item = (cond_likelihood[i,:])
#cond_label = avg_item.argmin() #most probable, prediction
cond_label = labels[i]
p_y += cond_likelihood[i][int(cond_label)]
avg_p_y = p_y / n
print "-------------in avg cond likelihood--------"
print avg_p_y
return avg_p_y
def classify_data(bin_digits, eta):
'''
Classify new points by taking the most likely posterior class
'''
cond_likelihood = conditional_likelihood(bin_digits, eta)
n = bin_digits.shape[0]
new_points = np.zeros(n)
for i in range(0, n):
#print cond_likelihood[i]
test = cond_likelihood[i]
new_points[i] =np.argmax(test)
return new_points
def classify_accuracy(predict_label, real_label):
n = real_label.shape[0]
accurate_class = 0
for i in range(0,n):
if predict_label[i] == real_label[i]:
accurate_class += 1
print "-------classify accuracy", (1.0 * accurate_class / n)
def main():
train_data, train_labels, test_data, test_labels = data.load_all_data('data')
train_data, test_data = binarize_data(train_data), binarize_data(test_data)
# Fit the model
eta = compute_parameters(train_data, train_labels)
#Q2=------new images------
# Evaluation
print "===========Q2.3========="
print "eta image"
plot_images(eta)
print "new sample image"
generate_new_data(eta)
print "---------Q 2.3.2---------"
train_predict = classify_data(train_data, eta)
test_predict = classify_data(test_data, eta)
print "---------avg likelihood----------"
avg_train = avg_conditional_likelihood(train_data, train_labels, eta)
avg_test = avg_conditional_likelihood(test_data, test_labels, eta)
print "---------Q 2.3.6 Predication accuracy----"
train_acc = classify_accuracy(train_predict, train_labels)
print "train accuracy", train_acc
test_acc = classify_accuracy(test_predict, test_labels)
print "test accuracy", test_acc
if __name__ == '__main__':
main()
|
violetguos/intro_machine_learning
|
a3/q1.py
|
<gh_stars>0
'''
Question 1 Skeleton Code
'''
import sklearn
import numpy as np
from sklearn.model_selection import KFold
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.naive_bayes import BernoulliNB
import sklearn.neighbors
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.cluster import KMeans
from sklearn import tree
from pprint import pprint
def load_data():
# import and filter data
newsgroups_train = fetch_20newsgroups(subset='train',remove=('headers', 'footers', 'quotes'))
newsgroups_test = fetch_20newsgroups(subset='test',remove=('headers', 'footers', 'quotes'))
categroies = newsgroups_train.target_names
return newsgroups_train, newsgroups_test, categroies
def bow_features(train_data, test_data):
# Bag-of-words representation
bow_vectorize = CountVectorizer()
bow_train = bow_vectorize.fit_transform(train_data.data) #bag-of-word features for training data
bow_test = bow_vectorize.transform(test_data.data)
feature_names = bow_vectorize.get_feature_names() #converts feature index to the word it represents.
shape = bow_train.shape
print('{} train data points.'.format(shape[0]))
print('{} feature dimension.'.format(shape[1]))
print('Most common word in training set is "{}"'.format(feature_names[bow_train.sum(axis=0).argmax()]))
return bow_train, bow_test, feature_names
def tf_idf_features(train_data, test_data):
# Bag-of-words representation
tf_idf_vectorize = TfidfVectorizer()
tf_idf_train = tf_idf_vectorize.fit_transform(train_data.data) #bag-of-word features for training data
feature_names = tf_idf_vectorize.get_feature_names() #converts feature index to the word it represents.
tf_idf_test = tf_idf_vectorize.transform(test_data.data)
return tf_idf_train, tf_idf_test, feature_names
def confusion_mat(true_labels, predict_labels):
#number of unique labels
#unique_true_labels = set(true_labels)
conf = np.zeros((20,20))
#count number of labels for each of the 20 categories
#for i in range(20):
print "******************NOTICE ME***************"
print len(true_labels)
for j in range(len(predict_labels)):
curr_true_class = true_labels[j]
#for i in range(len(predict_labels)):
curr_pred_class = predict_labels[j]#0 to 19
conf[int(curr_pred_class)][int(curr_true_class)] +=1
return conf
def most_confused_class(conf_mat):
conf_max = 0
for i in range(20):
for j in range(20):
if i != j:
if conf_mat[i][j] > conf_max:
ci = i
cj = j
conf_max = conf_mat[i][j]
return ci, cj #np.unravel_index(conf_mat.argmax(), conf_mat.shape)
def bnb_baseline(bow_train, train_labels, bow_test, test_labels):
# training the baseline model
binary_train = (bow_train>0).astype(int)
binary_test = (bow_test>0).astype(int)
model = BernoulliNB()
model.fit(binary_train, train_labels)
#evaluate the baseline model
train_pred = model.predict(binary_train)
print('BernoulliNB baseline train accuracy = {}'.format((train_pred == train_labels).mean()))
test_pred = model.predict(binary_test)
print('BernoulliNB baseline test accuracy = {}'.format((test_pred == test_labels).mean()))
return model
def svm_cross_val(X_train, y_train, X_test, y_test):
rand_states = [0]# [0, 10, 20, 30, 40, 50]
all_accuracy = []
for rand in rand_states:
# Loop over folds
# Evaluate SVMs
# ...
kf = KFold(n_splits=10)
fold_test_accuracy = []
for train_index, test_index in kf.split(X_train):
x_train_fold, x_test_fold = X_train[train_index], X_train[test_index]
y_train_fold, y_test_fold = y_train[train_index], y_train[test_index]
svm_res = svm_news(x_train_fold, y_train_fold, x_test_fold, y_test_fold, rand)
fold_test_accuracy.append(svm_res)
fold_accuracy = (1.0 *sum(fold_test_accuracy)) / (1.0 *len(fold_test_accuracy))
all_accuracy.append(fold_accuracy)
all_accuracy = np.array(all_accuracy)
optimal_rand = all_accuracy.argmax()
print "Cross Validate result: rand state = ", optimal_rand
#use the optimal, get the confusion matrix
def svm_news(X_train, y_train, X_test, y_test, rand_, y_names=None, confusion=False):
'''
predicting using SVM
'''
print "======================"
print "SVM algorithm, hyper random state = ", rand_
clf = LinearSVC(random_state=rand_)
#clf = sklearn.neighbors.KNeighborsClassifier(n_neighbors)#, weights=weights)
clf.fit(X_train, y_train)
train_pred = clf.predict(X_train)
print('svm train accuracy = {}'.format((train_pred == y_train).mean()))
print('svm train confustion matrix')
test_pred = clf.predict(X_test)
test_conf = confusion_mat(y_test, test_pred)
test_accuracy = (test_pred == y_test).mean()
print('svm test accuracy = {}'.format((test_pred == y_test).mean()))
print('svm train confustion matrix')
#pprint(test_conf.tolist())
print test_conf
ci, cj = most_confused_class(test_conf)
#print "u est shape", y_names.shape
print "most confused classes = ", ci, y_names[ci], cj, y_names[cj]
#for latex
#for i in range(20):
# for j in range(20):
# if j < 19:
# print test_conf[i][j], '&',
# else:
# print test_conf[i][j], '\\\\'
return test_accuracy
def rand_forest_cross_val(X_train, y_train, X_test, y_test):
num_est_arr = [10, 30, 50, 80, 100, 120, 150]
best_est = [150]
all_accuracy = []
for num_est in num_est_arr:
# Loop over folds
# Evaluate SVMs
# ...
kf = KFold(n_splits=10)
fold_test_accuracy = []
for train_index, test_index in kf.split(X_train):
x_train_fold, x_test_fold = X_train[train_index], X_train[test_index]
y_train_fold, y_test_fold = y_train[train_index], y_train[test_index]
rand_forest_res = rand_forest_news(x_train_fold, y_train_fold, x_test_fold, y_test_fold, num_est)
fold_test_accuracy.append(rand_forest_res)
fold_accuracy = (1.0 *sum(fold_test_accuracy)) / (1.0 *len(fold_test_accuracy))
all_accuracy.append(fold_accuracy)
all_accuracy = np.array(all_accuracy)
optimal_rand = all_accuracy.argmax()
print "Cross Validate result: rand state = ", num_est_arr[optimal_rand]
def rand_forest_news(X_train, y_train, X_test, y_test, n_estimate, y_names=None, confusion=False):
clf = RandomForestClassifier(n_estimators= n_estimate)
clf.fit(X_train, y_train)
#evaluate accuracy
print "=============="
print "Random forest ensamble algorithm"
print "Fold with num_estimators = ",n_estimate
train_pred = clf.predict(X_train)
print('rand forest baseline train accuracy = {}'.format((train_pred == y_train).mean()))
test_pred = clf.predict(X_test)
print('rand forest baseline test accuracy = {}'.format((test_pred == y_test).mean()))
test_accuracy = (test_pred == y_test).mean()
return test_accuracy
def nn_news_cross_val(X_train, y_train, X_test, y_test):
nn_layers = {
'Single neuron neural network':MLPClassifier(hidden_layer_sizes=(10 )),
'hidden layer: (20, 10)':MLPClassifier(hidden_layer_sizes=(20,10 )),
#'hidden layer: (1, 2, 1)': MLPClassifier(hidden_layer_sizes=(1, 2, 1)),
'hidden layer: (5, 10, 5)':MLPClassifier(hidden_layer_sizes=(5, 10, 5)),
'hidden layer: (10, 20, 10)': MLPClassifier(hidden_layer_sizes=(10, 20, 10)),
'hidden layer: (15, 25, 15)': MLPClassifier(hidden_layer_sizes=(15, 25, 15)),
}
all_accuracy = []
for cls_name, cls in nn_layers.items():
# Loop over folds
# Evaluate NNs
print "NN fold with layer ",cls_name
kf = KFold(n_splits=10)
fold_test_accuracy = []
for train_index, test_index in kf.split(X_train):
x_train_fold, x_test_fold = X_train[train_index], X_train[test_index]
y_train_fold, y_test_fold = y_train[train_index], y_train[test_index]
nn_res = nn_news(cls, x_train_fold, y_train_fold, x_test_fold, y_test_fold)
fold_test_accuracy.append(nn_res)
fold_accuracy = (1.0 *sum(fold_test_accuracy)) / (1.0 *len(fold_test_accuracy))
all_accuracy.append(fold_accuracy)
all_accuracy = np.array(all_accuracy)
optimal_rand = all_accuracy.argmax()
print "Cross Validate result: nn layer = ", nn_layers.items()[optimal_rand][0]
def nn_news(cls, X_train, y_train, X_test, y_test, y_names=None, confusion=False):
cls.fit(X_train,y_train)
predictions = cls.predict(X_test)
train_pred = cls.predict(X_train)
print "======================="
print('nn train accuracy = {}'.format((train_pred == y_train).mean()))
test_pred = cls.predict(X_test)
print('nn test accuracy = {}'.format((test_pred == y_test).mean()))
test_accuracy = (test_pred == y_test).mean()
return test_accuracy
##############Methods that did not work well#####################
'''
def decision_tree_news(X_train, y_train, X_test, y_test,k_, feature_sel = True, y_names=None, confusion=False):
clf = tree.DecisionTreeClassifier(criterion = "gini")
#clf = tree.DecisionTreeRegressor()
if feature_sel:
ch2 = SelectKBest(chi2, k=k_)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
clf = clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
print(sklearn.metrics.accuracy_score(y_test,predictions))
'''
if __name__ == '__main__':
#==============================================================================
#NOTE: categories would return a list =
# ['alt.atheism',
# 'comp.graphics',
# 'comp.os.ms-windows.misc',
# 'comp.sys.ibm.pc.hardware',
# 'comp.sys.mac.hardware',
# 'comp.windows.x',
# 'misc.forsale',
# 'rec.autos',
# 'rec.motorcycles',
# 'rec.sport.baseball',
# 'rec.sport.hockey',
# 'sci.crypt',
# 'sci.electronics',
# 'sci.med',
# 'sci.space',
# 'soc.religion.christian',
# 'talk.politics.guns',
# 'talk.politics.mideast',
# 'talk.politics.misc',
# 'talk.religion.misc']
#ONLY FOR DEBUGGIN purposes!
#==============================================================================
train_data, test_data, categories_20 = load_data()
train_bow, test_bow, feature_names = bow_features(train_data, test_data)
bnb_model = bnb_baseline(train_bow, train_data.target, test_bow, test_data.target)
train_tf, test_tf, feature_tf_names = tf_idf_features(train_data, test_data)
#TOP 3 algorithms
#SVM is the best
svm_cross_val(train_tf, train_data.target, test_tf, test_data.target)
rand_forest_cross_val(train_tf, train_data.target, test_tf, test_data.target)
nn_news_cross_val(train_tf, train_data.target, test_tf, test_data.target)
single_nn = MLPClassifier(hidden_layer_sizes=(10 ))
#final result with The picked hyperparameters
svm_news(train_tf, train_data.target, test_tf, test_data.target, 0 , categories_20, confusion=False)
nn_news(single_nn, train_tf, train_data.target, test_tf, test_data.target)
rand_forest_news(train_tf, train_data.target, test_tf, test_data.target, 150)
|
violetguos/intro_machine_learning
|
a1/q2.py
|
<reponame>violetguos/intro_machine_learning<filename>a1/q2.py
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 20:39:09 2017
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
np.random.seed(0)
from scipy.misc import logsumexp
######################################################
# This is a program that performs k fold validatinon #
# and Locally reweighted least squares #
######################################################
# load boston housing prices dataset
boston = load_boston()
x = boston['data']
N = x.shape[0]
x = np.concatenate((np.ones((506,1)),x),axis=1) #add constant one feature - no bias needed
d = x.shape[1]
y = boston['target']
idx = np.random.permutation(range(N))
#helper function
def l2(A,B):
'''
Input: A is a Nxd matrix
B is a Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between A[i,:] and B[j,:]
i.e. dist[i,j] = ||A[i,:]-B[j,:]||^2
'''
A_norm = (A**2).sum(axis=1).reshape(A.shape[0],1)
B_norm = (B**2).sum(axis=1).reshape(1,B.shape[0])
dist = A_norm+B_norm-2*A.dot(B.transpose())
return dist
#helper function
def run_on_fold(x_test, y_test, x_train, y_train, taus):
'''
Input: x_test is the N_test x d design matrix
y_test is the N_test x 1 targets vector
x_train is the N_train x d design matrix
y_train is the N_train x 1 targets vector
taus is a vector of tau values to evaluate
output: losses a vector of average losses one for each tau value
'''
N_test = x_test.shape[0]
losses = np.zeros(taus.shape)
for j,tau in enumerate(taus):
predictions = np.array([LRLS(x_test[i,:].reshape(d,1),x_train,y_train, tau) \
for i in range(N_test)])
losses[j] = ((predictions.flatten()-y_test.flatten())**2).mean()
print("Running on 1 of k folds")
return losses
def LRLS(test_datum,x_train,y_train, tau,lam=1e-5):
'''
Input: test_datum is a dx1 test vector
x_train is the N_train x d design matrix
y_train is the N_train x 1 targets vector
tau is the local reweighting parameter
lam is the regularization parameter
output is y_hat the prediction on test_datum
'''
## TODO
a_ii_denom_arr = [] #store each j
#a_ii_denom_sum = np.matrix([]) #store dist matrix N by d
a_ii_denom = [] #store the log sum over j
a_ii = [] #the Array that stores each exp over exp sum
x_x_dist = l2(np.transpose(test_datum), x_train) #N_train by d matrix
#print ("x x dist. ", x_x_dist.shape())
rows = x_x_dist.shape[0] #1
cols = x_x_dist.shape[1] #304
#print("row", rows)
#print("cols", cols)
#sum over the column
for j in range(0, cols):
#append all the column values
a_ii_denom_arr.append(- x_x_dist[0][j]/ (2 * tau**2))
a_ii_denom_arr = np.array(a_ii_denom_arr)
a_ii_denom_log = logsumexp(a_ii_denom_arr)
a_ii_denom.append(np.exp(a_ii_denom_log))
a_ii_denom = np.array(a_ii_denom)
for j in range(0, cols):
a_ii_nom = np.exp(- x_x_dist[0][j]/ (2 * tau**2))
a_ii.append(a_ii_nom / a_ii_denom)
a_ii = np.array(a_ii)
Aii = np.diagflat(a_ii) #A must be N by N
#w∗ = XTAX+λI −1XTAy, (xtax + I)w = Xtay
lam_i = lam * np.identity(len(x_train[1])) #lambda times I
#print("lami", np.shape(lam_i))
#x transpose * a* x + lambda I
#compute x_t times a first
xta = np.dot(np.transpose(x_train), Aii)
xtax_i = np.dot(xta, x_train) + lam_i
#x transpose times A * Y
xtay = np.dot(xta, y_train )
w = np.linalg.solve(xtax_i, xtay)
y_hat = np.dot(w, test_datum)
return y_hat
def partition_k(x, y, num, i):
'''
returns x_train, x_test, y_train, y_test
'''
x_test = x[(i*num):((i+1)*num):,] #select the test bit
A= x[0:(i*num),:]
B = x[((i+1)*num): ,: ]
if len(A) ==0:
x_train = B
else:
x_train = np.concatenate((A, B), axis =0) #select the rest 304x14
y_test = y[(i*num):((i+1)*num)] #select elems from array
y_train = np.concatenate([y[0:(i)*num], y[(i+1)*num:]]) #select the rest 304
return x_test, y_test, x_train, y_train
def run_k_fold(x,y,taus,k):
'''
Input: x is the N x d design matrix
y is the N x 1 targets vector
taus is a vector of tau values to evaluate
K in the number of folds
output is losses a vector of k-fold cross validation losses one for each tau value
'''
## TODO
N = len(y)
num_per_fold = N//k #floor division
losses =[]
for i in range(0, k):
x_test, y_test,x_train, y_train = partition_k(x,y,num_per_fold, i)
per_losses = run_on_fold(x_test, y_test, x_train, y_train, taus)
losses.append(per_losses)
return np.array(losses)
#def average_ith(losses):
# rows = len(losses[1]) #should be 5
# cols = len(losses[0])
# sum_list = []
# for i in range(0, cols):
# sum1 = 0
# for j in range(0, rows):
# sum1 +=losses[i][j]
# sum1 = sum1/rows
# sum_list.append(sum1)
#
# return np.array(sum_list)
def average_loss_per_tau(losses):
'''
Average loss of a given tau value
'''
avg_list = []
for i in range(len(losses[0])):
sum_tau = 0
for j in range(0, 5):
sum_tau += losses[j][i]
avg = sum_tau / float(5)
avg_list.append(avg)
return avg_list
if __name__ == "__main__":
# In this excersice we fixed lambda (hard coded to 1e-5) and only set tau value.
# Feel free to play with lambda as well if you wish
print ("--------Loading and Computing--------------")
taus = np.logspace(1,3,400)
losses = run_k_fold(x,y,taus,k=5)
for i in range(0,5):
plt.plot(taus, losses[i])
plt.ylabel("losses")
plt.xlabel("taus")
plt.show()
loss_avg = average_loss_per_tau(losses)
plt.plot(taus, loss_avg)
plt.show()
print("min loss = {}".format(np.array(loss_avg).min()))
|
catusf/tudienanhviet
|
bin/tab2opf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Script for conversion of Stardict tabfile (<header>\t<definition>
# per line) into the OPF file for MobiPocket Dictionary
#
# For usage of dictionary convert it by:
# (wine) mobigen.exe DICTIONARY.opf
# or now...
# kindlegen DICTIONARY.opf
#
# MobiPocket Reader at: www.mobipocket.com for platforms:
# PalmOs, Windows Mobile, Symbian (Series 60, Series 80, 90, UIQ), Psion, Blackberry, Franklin, iLiad (by iRex), BenQ-Siemens, Pepper Pad..
# http://www.mobipocket.com/en/DownloadSoft/DownloadManualInstall.asp
# mobigen.exe available at:
# http://www.mobipocket.com/soft/prcgen/mobigen.zip
#
# Copyright (C) 2007 - <NAME> (www.klokan.cz)
# Copyright (C) 2015 - <NAME> (github.com/apeyser)
#
#
# Version history:
# 0.1 (19.7.2007) Initial version
# 0.2 (2/2015) Rework removing encoding, runs on python3
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
# VERSION
VERSION = "0.2"
import sys
import os
import argparse
from itertools import islice, count, groupby
from contextlib import contextmanager
import importlib
# Hand-made table from PloneTool.py
mapping_custom_1 = {
138: 's', 142: 'z', 154: 's', 158: 'z', 159: 'Y' }
# UnicodeData.txt does not contain normalization of Greek letters.
mapping_greek = {
912: 'i', 913: 'A', 914: 'B', 915: 'G', 916: 'D', 917: 'E', 918: 'Z',
919: 'I', 920: 'TH', 921: 'I', 922: 'K', 923: 'L', 924: 'M', 925: 'N',
926: 'KS', 927: 'O', 928: 'P', 929: 'R', 931: 'S', 932: 'T', 933: 'Y',
934: 'F', 936: 'PS', 937: 'O', 938: 'I', 939: 'Y', 940: 'a', 941: 'e',
943: 'i', 944: 'y', 945: 'a', 946: 'b', 947: 'g', 948: 'd', 949: 'e',
950: 'z', 951: 'i', 952: 'th', 953: 'i', 954: 'k', 955: 'l', 956: 'm',
957: 'n', 958: 'ks', 959: 'o', 960: 'p', 961: 'r', 962: 's', 963: 's',
964: 't', 965: 'y', 966: 'f', 968: 'ps', 969: 'o', 970: 'i', 971: 'y',
972: 'o', 973: 'y' }
# This may be specific to German...
mapping_two_chars = {
140 : 'O', 156: 'o', 196: 'A', 246: 'o', 252: 'u', 214: 'O',
228 : 'a', 220: 'U', 223: 's', 230: 'e', 198: 'E' }
mapping_latin_chars = {
192 : 'A', 193 : 'A', 194 : 'A', 195 : 'a', 197 : 'A', 199 : 'C', 200 : 'E',
201 : 'E', 202 : 'E', 203 : 'E', 204 : 'I', 205 : 'I', 206 : 'I', 207 : 'I',
208 : 'D', 209 : 'N', 210 : 'O', 211 : 'O', 212 : 'O', 213 : 'O', 215 : 'x',
216 : 'O', 217 : 'U', 218 : 'U', 219 : 'U', 221 : 'Y', 224 : 'a', 225 : 'a',
226 : 'a', 227 : 'a', 229 : 'a', 231 : 'c', 232 : 'e', 233 : 'e', 234 : 'e',
235 : 'e', 236 : 'i', 237 : 'i', 238 : 'i', 239 : 'i', 240 : 'd', 241 : 'n',
242 : 'o', 243 : 'o', 244 : 'o', 245 : 'o', 248 : 'o', 249 : 'u', 250 : 'u',
251 : 'u', 253 : 'y', 255 : 'y' }
# Feel free to add new user-defined mapping. Don't forget to update mapping dict
# with your dict.
mapping = {}
mapping.update(mapping_custom_1)
mapping.update(mapping_greek)
mapping.update(mapping_two_chars)
mapping.update(mapping_latin_chars)
inflections = {}
# Stop with the encoding -- it's broken anyhow
# in the kindles and undefined.
def normalizeLetter(ch):
try: ch = mapping[ch]
except KeyError: pass
return ch
def normalizeUnicode(text):
"""
Reduce some characters to something else
"""
return ''.join(normalizeLetter(c) for c in text)
# Args:
# --verbose
# --module: module to load and attempt to extract getdef, getkey & mapping
# --source: source language code (en by default)
# --target: target language code (en by default)
# file: the tab delimited file to read
def parseargs():
if len(sys.argv) < 1:
print("tab2opf (Stardict->MobiPocket)")
print("------------------------------")
print("Version: %s" % VERSION)
print("Copyright (C) 2007 - <NAME>")
print()
print("Usage: python tab2opf.py [-utf] DICTIONARY.tab")
print()
print("ERROR: You have to specify a .tab file")
sys.exit(1)
parser = argparse.ArgumentParser("tab2opf")
parser.add_argument("-v", "--verbose", help="make verbose",
action="store_true")
parser.add_argument("-m", "--module",
help="Import module for mapping, getkey, getdef")
parser.add_argument("-i", "--inflection", help="Path to inflection file")
parser.add_argument("-s", "--source", default="en", help="Source language")
parser.add_argument("-t", "--target", default="en", help="Target language")
parser.add_argument("file", help="tab file to input")
return parser.parse_args()
def loadmember(mod, attr, dfault):
if hasattr(mod, attr):
print("Loading {} from {}".format(attr, mod.__name__))
globals()[attr] = getattr(mod, attr)
else: globals()[attr] = dfault
def importmod():
global MODULE
if MODULE is None: mod = None
else:
mod = importlib.import_module(MODULE)
print("Loading methods from: {}".format(mod.__file__))
loadmember(mod, 'getkey', lambda key: key)
loadmember(mod, 'getdef', lambda dfn: dfn)
loadmember(mod, 'mapping', {})
# loadmember(mod, 'getInflections', lambda key: key) # Get inflections
args = parseargs()
VERBOSE = args.verbose
FILENAME = args.file
MODULE = args.module
INFLECT = args.inflection
INLANG = args.source
OUTLANG = args.target
importmod()
# add a single [term, definition]
# to defs[key]
# r is a tab split line
def readkey(r, defs):
try: term, defn = r.split('\t',1)
except ValueError:
print("Bad line: '{}'".format(r))
raise
term = term.strip()
defn = getdef(defn)
# defn = defn.replace("\\\\","\\").\
# replace(">", "\\>").\
# replace("<", "\\<").\
# replace("\\n","<br/>\n").\
# strip()
nkey = normalizeUnicode(term)
key = getkey(nkey)
key = key.\
replace('"', "'").\
replace('<', '\\<').\
replace('>', '\\>').\
lower().strip()
nkey = nkey.\
replace('"', "'").\
replace('<', '\\<').\
replace('>', '\\>').\
lower().strip()
if key == '':
raise Exception("Missing key {}".format(term))
if defn == '':
raise Exception("Missing definition {}".format(term))
if VERBOSE: print(key, ":", term)
ndef = [term, defn, key == nkey]
if key in defs: defs[key].append(ndef)
else: defs[key] = [ndef]
# Skip empty lines and lines that only have a comment
def inclline(s):
s = s.lstrip()
return len(s) != 0 and s[0] != '#'
# Open file containing reflections
# with format: key \t inflections (seperated by '|' character)
# for instance:
# sorrow sorrowed|sorrows|sorrowing
#
def readinflections():
if VERBOSE: print("Reading {}".format(INFLECT))
if not INFLECT:
print('No inflection file.')
return None
with open(INFLECT,'r', encoding='utf-8') as fr:
inflections = {}
for l in fr.readlines():
[key, text] = l.strip().split('\t')
items = text.split('|')
inflections[key] = items
print('**** No of inflections: %i' % len(inflections))
return inflections
# Iterate over FILENAME, reading lines of
# term {tab} definition
# skips empty lines and commented out lines
#
def readkeys():
if VERBOSE: print("Reading {}".format(FILENAME))
with open(FILENAME,'r', encoding='utf-8') as fr:
defns = {}
for r in filter(inclline, fr):
readkey(r, defns)
return defns
# Write to key file {name}{n}.html
# put the body inside the context manager
# The onclick here gives a kindlegen warning
# but appears to be necessary to actually
# have a lookup dictionary
@contextmanager
def writekeyfile(name, i):
fname = "{}{}.html".format(name, i)
if VERBOSE: print("Key file: {}".format(fname))
with open(fname, 'w', encoding='utf-8') as to:
to.write("""<?xml version="1.0" encoding="utf-8"?>
<html xmlns:idx="www.mobipocket.com" xmlns:mbp="www.mobipocket.com" xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<mbp:pagebreak/>
<mbp:frameset>
<mbp:slave-frame display="bottom" device="all" breadth="auto" leftmargin="0" rightmargin="0" bottommargin="0" topmargin="0">
<div align="center" bgcolor="yellow"/>
<a onclick="index_search()">Dictionary Search</a>
</div>
</mbp:slave-frame>
<mbp:pagebreak/>
""")
try: yield to
finally:
to.write("""
</mbp:frameset>
</body>
</html>
""")
# Order definitions by keys, then by whether the key
# matches the original term, then by length of term
# then alphabetically
def keyf(defn):
term = defn[0]
if defn[2]: l = 0
else: l = len(term)
return l, term
# Write into to the key, definition pairs
# key -> [[term, defn, key==term]]
def writekey(to, key, defn):
terms = iter(sorted(defn, key=keyf))
for term, g in groupby(terms, key=lambda d: d[0]):
# Build string for inflections, if any
infs = inflections.get(term, None)
if not infs:
infstring = ''
else:
itemstext = ''
for item in infs:
itemstext += r' <idx:iform value="{item}" />'.format(item = item) + '\n'
infstring = '''
<idx:infl>
{itemstext} </idx:infl>'''.format(itemstext = itemstext)
to.write(
"""
<idx:entry name="word" scriptable="yes">
<h2>
<idx:orth value="{key}">{term}{infstring}
</idx:orth>
</h2>
""".format(term=term, key=key, infstring=infstring))
to.write('; '.join(ndefn for _, ndefn, _ in g))
to.write(
"""
</idx:entry>
"""
)
if VERBOSE: print(key)
# Write all the keys, where defns is a map of
# key --> [[term, defn, key==term]...]
# and name is the basename
# The files are split so that there are no more than
# 10,000 keys written to each file (why?? I dunno)
#
# Returns the number of files.
def writekeys(defns, name):
keyit = iter(sorted(defns))
for j in count():
with writekeyfile(name, j) as to:
keys = list(islice(keyit, 10000))
if len(keys) == 0: break
for key in keys:
writekey(to, key, defns[key])
return j+1
# After writing keys, the opf that references all the key files
# is constructed.
# openopf wraps the contents of writeopf
#
@contextmanager
def openopf(ndicts, name):
fname = "%s.opf" % name
if VERBOSE: print("Opf: {}".format(fname))
with open(fname, 'w') as to:
to.write("""<?xml version="1.0"?><!DOCTYPE package SYSTEM "oeb1.ent">
<!-- the command line instruction 'prcgen dictionary.opf' will produce the dictionary.prc file in the same folder-->
<!-- the command line instruction 'mobigen dictionary.opf' will produce the dictionary.mobi file in the same folder-->
<package unique-identifier="uid" xmlns:dc="Dublin Core">
<metadata>
<dc-metadata>
<dc:Identifier id="uid">{name}</dc:Identifier>
<!-- Title of the document -->
<dc:Title><h2>{name}</h2></dc:Title>
<dc:Language>EN</dc:Language>
</dc-metadata>
<x-metadata>
<output encoding="utf-8" flatten-dynamic-dir="yes"/>
<DictionaryInLanguage>{source}</DictionaryInLanguage>
<DictionaryOutLanguage>{target}</DictionaryOutLanguage>
</x-metadata>
</metadata>
<!-- list of all the files needed to produce the .prc file -->
<manifest>
""".format(name=name, source=INLANG, target=OUTLANG))
yield to
to.write("""
<tours/>
<guide> <reference type="search" title="Dictionary Search" onclick= "index_search()"/> </guide>
</package>
"""
)
# Write the opf that describes all the key files
def writeopf(ndicts, name):
with openopf(ndicts, name) as to:
for i in range(ndicts):
to.write(
""" <item id="dictionary{ndict}" href="{name}{ndict}.html" media-type="text/x-oeb1-document"/>
""".format(ndict=i, name=name))
to.write("""
</manifest>
<!-- list of the html files in the correct order -->
<spine>
"""
)
for i in range(ndicts):
to.write("""
<itemref idref="dictionary{ndict}"/>
""".format(ndict=i))
to.write("""
</spine>
""")
######################################################
# main
######################################################
print("Reading keys")
defns = readkeys()
inflections = readinflections()
name = os.path.splitext(os.path.basename(FILENAME))[0]
print("Writing keys")
ndicts = writekeys(defns, name)
keys = defns.keys()
print("Writing opf")
writeopf(ndicts, name)
|
catusf/tudienanhviet
|
bin/create_english_inflections.py
|
<gh_stars>10-100
# from nltk.corpus import words
from pattern.en import lexeme #conjugate, lemma,
from pattern.en import pluralize
# List of common English words
wordlist = set(open("../misc/354984si.ngl").read().split())
keys = set(open("english_keys.txt", encoding='utf-8').read().split())
def getInflections(key):
inflections=set()
# print('"%s"' % key)
if key.isalpha():
try:
try:
lexeme(key)
except:
pass
inflections.add(lexeme(key)) # get all lexem inflections of words
inflections.add(pluralize(key)) # add plural inflections
inflections.intersection_update(wordlist)
print(inflections)
except:
pass
# print("Unexpected error")
return inflections
keyfile = open("english_inflections.txt", "w", encoding='utf-8')
try:
print(lexeme('be'))
except:
pass
print(lexeme('be'))
print(lexeme('conclusion'))
print(lexeme('harlot'))
print(pluralize('be'))
print(pluralize('conclusion'))
print(pluralize('harlot'))
for k in keys:
# print(lexeme(k))
# print(pluralize(k))
inflections=set()
# print('"%s"' % key)
inflections.update(lexeme(k)) # get all lexem inflections of words
inflections.add(pluralize(k)) # add plural inflections
if k in inflections:
inflections.remove(k)
inflections.intersection_update(wordlist)
# print(inflections)
if len(inflections):
keyfile.write('%s\t%s\n' % (k, '|'.join(inflections)))
keyfile.close()
|
Candygoblen123/roboco
|
ytthings.py
|
import googleapiclient.discovery
import googleapiclient.errors
from pathlib import Path
class apiAsker:
def __init__(self):
self.youtube = googleapiclient.discovery.build("youtube", "v3", developerKey=Path('youtubeapikey.txt').read_text())
def listId(self, id: str):
return self.youtube.channels().list(part="snippet", id=id).execute()
if __name__ == "__main__":
theThing = apiAsker()
print(theThing.listId('UCRUULstZRWS1lDvJBzHnkXA'))
|
Candygoblen123/roboco
|
roboco.py
|
<reponame>Candygoblen123/roboco
import asyncio
from asyncio.tasks import sleep
import json
import re
from typing import Dict, Set, Union, List
import discord
import discord_slash
from util import *
client = discord.Client(intents=discord.Intents.all(), activity=discord.Game(name='Send all complaints to yoyoyonono#5582'))
timestamp_match = re.compile(r'\d\d:\d\d:\d\d|\d\d:\d\d')
slash = discord_slash.SlashCommand(client, sync_commands=True)
kalm_moments: discord.TextChannel
slash_command_guilds = [599331416773230593]
onii_chan: str
help_file: str
with open("README.md", "r") as fin:
help_file_list = fin.read().splitlines()
for i, v in enumerate(help_file_list):
if v == "## Commmands":
help_file_list = help_file_list[i:]
break
help_file = "\n".join(x for x in help_file_list)
def save_pin_roles(new_pin_roles):
global pin_roles
pin_roles = new_pin_roles
with open("roles.txt", "w") as fout:
json.dump(list(pin_roles), fout)
def add_pin_roles(new_pin_roles):
global pin_roles
pin_roles.add(new_pin_roles)
with open("roles.txt", "w") as fout:
json.dump(list(pin_roles), fout)
def remove_pin_roles(new_pin_roles):
global pin_roles
pin_roles.remove(new_pin_roles)
with open("roles.txt", "w") as fout:
json.dump(list(pin_roles), fout)
def save_invisible_channels(new_invisible):
global invisible_channels
invisible_channels = new_invisible
with open("channels.txt", "w") as fout:
json.dump(list(invisible_channels), fout)
def add_invisible_channels(new_invisible):
global invisible_channels
invisible_channels.add(new_invisible)
with open("channels.txt", "w") as fout:
json.dump(list(invisible_channels), fout)
def remove_invisible_channels(new_invisible):
global invisible_channels
invisible_channels.remove(new_invisible)
with open("channels.txt", "w") as fout:
json.dump(list(invisible_channels), fout)
async def wait_delete(message: discord.Message, time: float = 1):
await sleep(time)
await message.delete()
@client.event
async def on_ready():
global kalm_moments
print("We have logged in as", client.user)
kalm_moments = client.get_channel(887839202174181416)
@register_command("queryc")
async def on_queryc(message: discord.Message, message_content: str):
await message.channel.send(
"Channels the bot can't see: "
+ str([message.guild.get_channel(x).name for x in invisible_channels])
)
@slash.slash(name="queryc",
description="See list of channels invilible to the bot.",
guild_ids=slash_command_guilds)
async def on_slash_queryc(ctx: discord_slash.SlashContext):
await ctx.send(
"Channels the bot can't see: "
+ str([ctx.guild.get_channel(x).name for x in invisible_channels])
)
@register_command("query")
async def on_query(message: discord.Message, message_content: str):
await message.channel.send(
"Roles who can pin: " + str([message.guild.get_role(x).name for x in pin_roles])
)
@slash.slash(name="query",
description="See list of roles that are able to pin messages.",
guild_ids=slash_command_guilds)
async def on_slash_query(ctx: discord_slash.SlashContext):
await ctx.send(
"Roles who can pin: " + str([ctx.guild.get_role(x).name for x in pin_roles])
)
@register_command("help")
async def on_help(message: discord.Message, message_content: str):
await message.channel.send(f"{help_file}")
@slash.slash(name="help",
description="Take your best guess.",
guild_ids=slash_command_guilds)
async def on_slash_help(ctx: discord_slash.SlashContext):
await ctx.send(f"{help_file}")
@register_command("forcopy")
async def on_forcopy(message: discord.Message, message_content: str):
await message.channel.send(f"ids: {' '.join(map(str, pin_roles))}")
@slash.slash(name="forcopy",
description="Get role ids that are able to ping, for copying into a set.",
guild_ids=slash_command_guilds)
async def on_slash_forcopy(ctx: discord_slash.SlashContext):
await ctx.send(f"ids: {' '.join(map(str, pin_roles))}")
@register_command("pinset")
@needs_contributor
async def on_pingset(message: discord.Message, message_content: str):
save_pin_roles(
{int("".join(filter(str.isdigit, x))) for x in message_content[9:].split(" ")}
)
@slash.slash(name="pinset",
description="Gives a role permission to pin messages. Requires the @Contributor role.",
options=[
discord_slash.utils.manage_commands.create_option(
name="role",
description="The role that you want to add to the approved role list.",
option_type=8,
required=True
)
], guild_ids=slash_command_guilds)
async def on_slash_pingset(ctx: discord_slash.SlashContext, role: discord.Role):
if await is_contributor(ctx.author):
add_pin_roles(role.id)
await ctx.send("Pin permission granted.")
else:
await ctx.send("This action requires elevated privileges. Nice try tho.")
@slash.slash(name="pinremove",
description="Revokes a role's permission to pin messages. Requires the @Contributor role.",
options=[
discord_slash.utils.manage_commands.create_option(
name="role",
description="The role that you want to remove from the approved role list.",
option_type=8,
required=True
)
], guild_ids=slash_command_guilds)
async def on_slash_pingremove(ctx: discord_slash.SlashContext, role: discord.Role):
if await is_contributor(ctx.author):
remove_pin_roles(role.id)
await ctx.send("Pin permission removed.")
else:
await ctx.send("This action requires elevated privileges. Nice try tho.")
@register_command("pinsetid")
@needs_contributor
async def on_set(message: discord.Message, message_content: str):
save_pin_roles({int(x) for x in message_content[4:].split(" ")})
@slash.slash(name="pinsetid",
description="Gives a role permission to pin messages. Uses the role's ID. Requires the @Contributor role.",
options=[
discord_slash.utils.manage_commands.create_option(
name="roleid",
description="The ID of role that you want to add to the approved role list.",
option_type=3,
required=True
)
], guild_ids=slash_command_guilds)
async def on_slash_set(ctx: discord_slash.SlashContext, roleid: str):
if await is_contributor(ctx.author):
add_pin_roles(int(roleid))
await ctx.send("Pin permission granted.")
else:
await ctx.send("This action requires elevated privileges. Nice try tho.")
@slash.slash(name="pinremoveid",
description="Removes a role's permission to pin messages. Uses the role's ID. Requires the @Contributor role.",
options=[
discord_slash.utils.manage_commands.create_option(
name="roleid",
description="The ID of role that you want to remove from the approved role list.",
option_type=3,
required=True
)
], guild_ids=slash_command_guilds)
async def on_slash_remove(ctx: discord_slash.SlashContext, roleid: str):
if await is_contributor(ctx.author):
remove_pin_roles(int(roleid))
await ctx.send("Pin permission removed.")
else:
await ctx.send("This action requires elevated privileges. Nice try tho.")
@register_command("channelblock")
@needs_contributor
async def on_channelm(message: discord.Message, message_content: str):
save_invisible_channels(
{
int("".join(y for y in x if y.isdigit()))
for x in message_content[10:].split(" ")
}
)
@slash.slash(name="channelblock",
description="Makes a channel invisible to the bot. Requires the @Contributor role.",
options=[
discord_slash.utils.manage_commands.create_option(
name="channel",
description="The channel you want to block.",
option_type=7,
required=True
)
], guild_ids=slash_command_guilds)
async def on_slash_channelm(ctx: discord_slash.SlashContext, channel: discord.TextChannel):
if await is_contributor(ctx.author):
add_invisible_channels(channel.id)
await ctx.send("Added channel to the block list.")
else:
await ctx.send("This action requires elevated privileges. Nice try tho.")
@slash.slash(name="channelunblock",
description="Makes an invisible channel visible again to the bot. Requires the @Contributor role.",
options=[
discord_slash.utils.manage_commands.create_option(
name="channel",
description="The channel you want to unblock.",
option_type=7,
required=True
)
], guild_ids=slash_command_guilds)
async def on_slash_rm_channelm(ctx, channel):
if await is_contributor(ctx.author):
remove_invisible_channels(channel.id)
await ctx.send("Removed channel from the block list.")
else:
await ctx.send("This action requires elevated privileges. Nice try tho.")
@slash.slash(name="channelidblock",
description="Makes a channel invisible to the bot. Uses the channel's ID. Requires the @Contributor role.",
options=[
discord_slash.utils.manage_commands.create_option(
name="channelid",
description="The ID of the channel you want to block.",
option_type=3,
required=True
)
], guild_ids=slash_command_guilds)
async def on_slash_rm_channel(ctx: discord_slash.SlashContext, channelid: str):
if await is_contributor(ctx.author):
add_invisible_channels(int(channelid))
await ctx.send("Added channel to the block list.")
else:
await ctx.send("This action requires elevated privileges. Nice try tho.")
@slash.slash(name="channelidunblock",
description="Makes an invisible channel visible again to the bot, uses ID. Requires the @Contributor role.",
options=[
discord_slash.utils.manage_commands.create_option(
name="channelid",
description="The id of the channel you want to unblock.",
option_type=7,
required=True
)
], guild_ids=slash_command_guilds)
async def on_slash_id_rm_channel(ctx: discord_slash.SlashContext, channelid: str):
if await is_contributor(ctx.author):
remove_invisible_channels(int(channelid))
await ctx.send("Removed channel from the block list.")
else:
await ctx.send("This action requires elevated privileges. Nice try tho.")
@register_command("channelidblock")
@needs_contributor
async def on_channel(message: discord.Message, message_content: str):
save_invisible_channels(set(map(int, message_content[8:].split(" "))))
@register_command("bean")
async def on_bean(message: discord.Message, message_content: str):
await message.channel.send(f"{message_content[5:]} has been beaned")
@slash.slash(name="bean",
description="Beans a user.",
options=[
discord_slash.utils.manage_commands.create_option(
name="user",
description="The user you wish to bean.",
option_type=6,
required=True
)
],
guild_ids=slash_command_guilds)
async def on_slash_bean(ctx: discord_slash.SlashContext, user: discord.User):
await ctx.send(f"{user.mention} has been beaned.")
@register_command(None)
async def on_default(message: discord.Message):
await message.channel.send("syntax error")
@client.event
async def on_message(message: discord.Message):
if (
message.author == client.user
or message.channel.id in invisible_channels
or not message.content.startswith(".rbc")
):
return
message_content = message.content[5:]
print(message_content)
command = command_starts_with(message_content)
await asyncio.gather(
*(
callback(message=message, message_content=message_content)
for callback in commands[command]
)
)
@client.event
async def on_reaction_add(reaction: discord.Reaction, user: discord.User):
global kalm_moments
print(reaction.emoji)
if reaction.emoji == "📌":
if not reaction.message.channel.is_nsfw():
if await any_reaction_pinners(reaction):
if not any((x.embeds[0].author.url if len(x.embeds) > 0 else None) == reaction.message.jump_url for x in await kalm_moments.history().flatten()):
send_embed = discord.Embed(timestamp=reaction.message.created_at)
if not reaction.message.reference:
send_embed.set_author(
name=reaction.message.author.display_name,
url=reaction.message.jump_url,
icon_url=reaction.message.author.avatar_url,
)
send_embed.add_field(
name=f"#{reaction.message.channel.name}",
value=f"[{reaction.message.content}]({reaction.message.jump_url})",
inline=False,
)
else:
send_embed.set_author(
name="multiple people",
url=reaction.message.jump_url,
)
send_embed.add_field(
name=f"#{reaction.message.channel.name}",
value="multiple messages",
inline=False,
)
await add_replies_to_embed(send_embed, reaction.message, 1, reaction.message.channel)
for x in reversed(reaction.message.attachments):
if x.filename.lower().endswith(
(".jpg", ".jpeg", ".png", ".gif", ".gifv")
):
send_embed.set_image(url=x.url)
await kalm_moments.send(embed=send_embed)
message_embed = discord.Embed()
message_embed.set_author(
name=client.user.name,
icon_url=client.user.avatar_url,
)
message_embed.add_field(
name="📌",
value=f"{(await first_pinner(reaction)).display_name} has pinned a [message]({reaction.message.jump_url}) to #{kalm_moments.name}.",
inline=False,
)
await reaction.message.channel.send(embed=message_embed)
else:
await reaction.message.channel.send(
"You don't have the proper role to pin that message"
)
else:
await reaction.message.channel.send("no pinning in nsfw channels. bad")
elif reaction.emoji == "📍":
if not reaction.message.channel.is_nsfw():
if await any_reaction_pinners(reaction):
if not any((x.embeds[0].author.url if len(x.embeds) > 0 else None) == reaction.message.jump_url for x in await kalm_moments.history().flatten()):
send_embed = discord.Embed(timestamp=reaction.message.created_at)
send_embed.set_author(
name=reaction.message.author.display_name,
url=reaction.message.jump_url,
icon_url=reaction.message.author.avatar_url,
)
send_embed.add_field(
name=f"#{reaction.message.channel.name}",
value=f"[{reaction.message.content}]({reaction.message.jump_url})",
inline=False,
)
for x in reversed(reaction.message.attachments):
if x.filename.lower().endswith(
(".jpg", ".jpeg", ".png", ".gif", ".gifv")
):
send_embed.set_image(url=x.url)
await kalm_moments.send(embed=send_embed)
message_embed = discord.Embed()
message_embed.set_author(
name=client.user.name,
icon_url=client.user.avatar_url,
)
message_embed.add_field(
name="📍",
value=f"{(await first_pinner(reaction)).display_name} has pinned a [message]({reaction.message.jump_url}) to #{kalm_moments.name}.",
inline=False,
)
await reaction.message.channel.send(embed=message_embed)
else:
await reaction.message.channel.send(
"You don't have the proper role to pin that message"
)
else:
await reaction.message.channel.send("no pinning in nsfw channels. bad")
async def add_replies_to_embed(embed: discord.Embed, message: discord.Message, depth: int, channel: discord.TextChannel):
if not message or depth > 24:
return
if message.reference:
await add_replies_to_embed(embed, await channel.fetch_message(message.reference.message_id), depth+1, channel)
embed.add_field(
name=message.author.display_name,
value=f"[{message.content}]({message.jump_url})",
inline=False,
)
async def any_reaction_pinners(reaction: discord.Reaction) -> bool:
return any((user_has_pin(x)) for x in (await reaction.users().flatten()))
async def first_pinner(reaction: discord.Reaction) -> discord.Member:
return next((x for x in (await reaction.users().flatten()) if user_has_pin(x)))
def user_has_pin(user: discord.Member) -> bool:
return any(y.id in pin_roles for y in user.roles)
if __name__ == "__main__":
with open("roles.txt", "r") as fin:
pin_roles: Set[int] = set(json.load(fin))
with open("channels.txt", "r") as fin:
invisible_channels: Set[int] = set(json.load(fin))
with open("clientsecret.txt", "r") as fin:
client.run(fin.read())
|
Unprocessable/pandas
|
pandas/tests/series/test_diff.py
|
<reponame>Unprocessable/pandas
from pandas import (
Series
)
from numpy import nan
def test_diff():
data = Series([0,-1,-2,-3,-4,-3,-2,-1,0,-1,-1,0,-1,-2,-3,-2,0])
filtered = data.between(-2,0, inclusive = True)
diff_boolean = filtered.diff()
expected_boolean = Series([nan, False, False, True, False, False, True, False, False, False, False, False, False, False, True, True, False])
assert diff_boolean.equals(expected_boolean)
diff_data = data.diff()
expected_data = Series([nan, -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 0.0, 1.0, -1.0, -1.0, -1.0, 1.0, 2.0])
assert diff_data.equals(expected_data)
|
SunDoge/L-GCN
|
utils/__init__.py
|
import pandas as pd
from .dictionary import Dictionary, CharDictionary
import pickle
from typing import Dict, List
import torch
import os
import numpy as np
# from torchpie.config import config
MULTIPLE_CHOICE_TASKS = ['action', 'transition']
def load_csv_from_dataset(task: str, split: str) -> pd.DataFrame:
return pd.read_csv(f'data/dataset/{split}_{task}_question.csv', sep='\t')
def load_dictionary(config, task: str, level: str) -> Dictionary:
cache_path = config.get_string('cache_path')
if level == 'word':
return Dictionary.load_from_file(f'{cache_path}/{task}_dictionary.pkl')
elif level == 'char':
return CharDictionary.load_from_file(f'{cache_path}/{task}_char_dictionary.pkl')
def load_answer_dict() -> Dict[str, int]:
with open('cache/frameqa_answer_dict.pkl', 'rb') as f:
return pickle.load(f)
def get_vocab_size(config, task: str, level: str = 'word') -> int:
dictionary = load_dictionary(config, task, level)
return len(dictionary.idx2word)
def batch_to_gpu(batch: List[torch.Tensor]) -> List[torch.Tensor]:
new_batch = [x.cuda(non_blocking=True) for x in batch]
return new_batch
def load_pretrained_character_embedding(path='data/glove.840B.300d-char.txt') -> (Dict[str, int], torch.Tensor):
cached_path = os.path.join('.vector_cache', 'glove.840B.300d-char.txt.pt')
if os.path.exists(cached_path):
print('Cache exists')
return torch.load(cached_path)
char2index = dict()
data = []
with open(path, 'r') as f:
for index, line in enumerate(f.readlines()):
line_split = line.strip().split()
vec = np.array(line_split[1:], dtype=np.float32)
char = line_split[0]
char2index[char] = index
data.append(vec)
data = np.array(data)
data = torch.from_numpy(data)
print('Build cache')
torch.save([char2index, data], cached_path)
return char2index, data
@torch.no_grad()
def count_correct(pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
pred_id = pred.argmax(dim=1)
corrects = pred_id.eq(target).sum()
return corrects
@torch.no_grad()
def accuracy(pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
num_corrects = count_correct(pred, target)
acc = num_corrects.float() * 100. / target.shape[0]
return acc
|
SunDoge/L-GCN
|
scripts/build_tgif_cache.py
|
<gh_stars>10-100
import argparse
import os
import pickle
from typing import Dict, Optional
import pandas as pd
import torch
from torchtext import vocab
from tqdm import tqdm
from utils import load_csv_from_dataset
from utils.dictionary import Dictionary, CharDictionary
from utils.const import IGNORE_INDEX
from typing import Union
MULTIPLE_CHOICE_TASKS = ['action', 'transition']
ALL_TASKS = ['action', 'transition', 'count', 'frameqa']
# class Args(TypedArgs):
# all_tasks = ['action', 'transition', 'count', 'frameqa']
# def __init__(self):
# parser = argparse.ArgumentParser()
# self.tasks = parser.add_argument(
# '-t',
# '--tasks',
# nargs=argparse.ONE_OR_MORE,
# choices=Args.all_tasks,
# default=Args.all_tasks,
# )
# self.output_path: Union[str, argparse.Action] = parser.add_argument(
# '-o',
# '--output-path',
# default='cache'
# )
# self.parse_args_from(parser)
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--task', nargs=argparse.ONE_OR_MORE,
choices=ALL_TASKS,
default=ALL_TASKS,
help='which subtask to preprocess'
)
parser.add_argument(
'-o', '--output-path',
default='cache/tgif'
)
def load_csv(path: str) -> pd.DataFrame:
return pd.read_csv(path, sep='\t')
def build_answer_dict(args: Args) -> Dict[str, int]:
answer_dict_path = os.path.join(
args.output_path, 'frameqa_answer_dict.pkl')
# If cache exists, use cache
if os.path.exists(answer_dict_path):
print(f'{answer_dict_path} exists, load cache')
with open(answer_dict_path, 'rb') as f:
return pickle.load(f)
print(f'{answer_dict_path} not exists, build cache')
# Must be train split
df = load_csv_from_dataset('frameqa', 'Train')
all_answers = df['answer']
answers = list(set(all_answers))
answer_dict = {answer: i for i, answer in enumerate(answers)}
with open(answer_dict_path, 'wb') as f:
pickle.dump(answer_dict, f)
return answer_dict
def build_pretrained_embedding(args: Args, task: str, dictionary: Dictionary, vector: vocab.Vocab):
embedding_path = os.path.join(args.output_path, f'{task}_embedding.pt')
if os.path.exists(embedding_path):
print(f'{embedding_path} exists, return')
return
print(f'{embedding_path} not exists, build')
word2idx = dictionary.word2idx
weights = torch.zeros(len(word2idx), 300)
for word, index in word2idx.items():
weight = vector[word]
weights[index] = weight
torch.save(weights, embedding_path)
def process_open_ended(args: Args, task: str, dictionary: Dictionary, char_dictionary: CharDictionary,
answer_dict: Dict[str, int] = None):
def process(split: str):
print(f'processing {task} {split}')
data = []
df = load_csv_from_dataset(task, split)
for index, row in tqdm(df.iterrows(), total=len(df)):
question = row['question']
answer = row['answer']
gif_name = row['gif_name']
question_ids = dictionary.tokenize(question)
question_chars = char_dictionary.tokenize(question)
if task == 'frameqa':
answer_id = IGNORE_INDEX
if answer in answer_dict:
answer_id = answer_dict[answer]
else:
# https://github.com/YunseokJANG/tgif-qa/blob/master/code/gifqa/data_util/tgif.py#L561
answer_id = max(float(answer), 1.0)
# answer_id = float(answer)
data.append({
'question': question,
'answer': answer,
'question_ids': question_ids,
'question_chars': question_chars,
'answer_id': answer_id,
'gif_name': gif_name
})
filename = os.path.join(
args.output_path, f'{split}_{task}_question.pkl')
with open(filename, 'wb') as f:
pickle.dump(data, f)
process('Train')
process('Test')
def build_dictionary(args: Args, task: str) -> Dictionary:
dictionary_path = os.path.join(args.output_path, f'{task}_dictionary.pkl')
if os.path.exists(dictionary_path):
print(f'{dictionary_path} exists, load cache')
return Dictionary.load_from_file(dictionary_path)
dictionary = Dictionary()
def build(split: str):
df = load_csv_from_dataset(task, split)
for question in df['question']:
dictionary.tokenize(
question, add_word=True, extra_dict=glove.stoi if split == 'Test' else None)
if task in MULTIPLE_CHOICE_TASKS:
for answer_key in ['a1', 'a2', 'a3', 'a4', 'a5']:
for answer in df[answer_key]:
dictionary.tokenize(
answer, add_word=True, extra_dict=glove.stoi if split == 'Test' else None)
build('Train')
build('Test')
dictionary.dump_to_file(dictionary_path)
return dictionary
def build_char_dictionary(args: Args, task: str) -> Dictionary:
dictionary_path = os.path.join(
args.output_path, f'{task}_char_dictionary.pkl')
if os.path.exists(dictionary_path):
print(f'{dictionary_path} exists, load cache')
return CharDictionary.load_from_file(dictionary_path)
dictionary = CharDictionary()
def build(split: str):
df = load_csv_from_dataset(task, split)
for question in df['question']:
dictionary.tokenize(
question, add_word=True, extra_dict=glove.stoi if split == 'Test' else None)
if task in MULTIPLE_CHOICE_TASKS:
for answer_key in ['a1', 'a2', 'a3', 'a4', 'a5']:
for answer in df[answer_key]:
dictionary.tokenize(
answer, add_word=True, extra_dict=glove.stoi if split == 'Test' else None)
build('Train')
build('Test')
dictionary.dump_to_file(dictionary_path)
return dictionary
def process_multiple_choice(args: Args, task: str, dictionary: Dictionary, char_dictionary: CharDictionary):
def process(split: str):
print(f'processing {task} {split}')
data = []
df = load_csv_from_dataset(task, split)
for index, row in tqdm(df.iterrows(), total=len(df)):
question = row['question']
answer_keys = ['<KEY>']
answers = [row[key] for key in answer_keys]
gif_name = row['gif_name']
answer_id = int(row['answer'])
question_ids = dictionary.tokenize(question)
question_chars = char_dictionary.tokenize(question)
answer_ids = [dictionary.tokenize(answer)
for answer in answers]
answer_chars = [char_dictionary.tokenize(
answer) for answer in answers]
data.append({
'question': question,
'answers': answers,
'question_ids': question_ids,
'question_chars': question_chars,
'answer_ids': answer_ids,
'answer_chars': answer_chars,
'answer_id': answer_id,
'gif_name': gif_name
})
filename = os.path.join(
args.output_path, f'{split}_{task}_question.pkl')
with open(filename, 'wb') as f:
pickle.dump(data, f)
process('Train')
process('Test')
if __name__ == "__main__":
# args = Args()
args = parser.parse_args()
os.makedirs(args.output_path, exist_ok=True)
glove = vocab.GloVe()
if 'frameqa' in args.tasks:
answer_dict = build_answer_dict(args)
dictionary = build_dictionary(args, 'frameqa')
char_dictionary = build_char_dictionary(args, 'frameqa')
build_pretrained_embedding(args, 'frameqa', dictionary, glove)
process_open_ended(args, 'frameqa', dictionary,
char_dictionary, answer_dict=answer_dict)
if 'count' in args.tasks:
dictionary = build_dictionary(args, 'count')
char_dictionary = build_char_dictionary(args, 'count')
build_pretrained_embedding(args, 'count', dictionary, glove)
process_open_ended(args, 'count', dictionary, char_dictionary)
if 'action' in args.tasks:
dictionary = build_dictionary(args, 'action')
char_dictionary = build_char_dictionary(args, 'action')
build_pretrained_embedding(args, 'action', dictionary, glove)
process_multiple_choice(args, 'action', dictionary, char_dictionary)
if 'transition' in args.tasks:
dictionary = build_dictionary(args, 'transition')
char_dictionary = build_char_dictionary(args, 'transition')
build_pretrained_embedding(args, 'transition', dictionary, glove)
process_multiple_choice(
args, 'transition', dictionary, char_dictionary)
|
SunDoge/L-GCN
|
utils/logging.py
|
import sys
import logging
from typing import Optional
import os
def set_default_logger(experiment_path: Optional[str], debug=False):
log_format = '%(asctime)s|%(levelname)-8s| %(message)s'
formatter = logging.Formatter(log_format)
handlers = []
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
handlers.append(console_handler)
if experiment_path is not None:
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
filename = os.path.join(experiment_path, 'experiment.log')
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(formatter)
handlers.append(file_handler)
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
handlers=handlers,
level=level
)
|
SunDoge/L-GCN
|
scripts/split_n_parts.py
|
<reponame>SunDoge/L-GCN<filename>scripts/split_n_parts.py
import os
import numpy as np
from typing import List
from utils.io import dump_pickle
import argparse
from pathlib import Path
# FRAME_PATH = '/mnt/dataset/tgif-qa/frames/'
parser = argparse.ArgumentParser()
parser.add_argument(
'-f', '--frame-path', default='data/tgif/frames',
help='path to frames'
)
parser.add_argument(
'-o', '--output', default='data/tgif/frame_splits/', type=Path,
help='path to save the splited pickle file'
)
parser.add_argument(
'-n', '--num-parts', default=6, type=int,
help='split into N parts'
)
def get_all_gifs(frame_path: str):
gifs = os.listdir(frame_path)
gif_paths = [os.path.join(frame_path, gif) for gif in gifs]
return gif_paths
def split_n_parts(gifs: List[str], n: int = 4):
parts = np.array_split(gifs, n)
return parts
if __name__ == "__main__":
args = parser.parse_args()
gifs = get_all_gifs(args.frame_path)
# Split into N parts
parts = split_n_parts(gifs, args.num_parts)
for i, part in enumerate(parts):
dump_pickle(list(part), args.output / f'split{i}.pkl')
|
SunDoge/L-GCN
|
utils/config.py
|
from pyhocon import ConfigFactory
from arguments import args
config = ConfigFactory.parse_file(args.config)
|
SunDoge/L-GCN
|
utils/data.py
|
<filename>utils/data.py
import os
import pickle
from glob import glob
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms as T
from pprint import pprint
from tqdm import tqdm
class VideoDataset(Dataset):
def __init__(self, root: str, extension='*.jpg'):
self.root = root
self.extension = extension
self.videos = os.listdir(root)
def by_index(path: str):
basename = os.path.basename(path)
index = os.path.splitext(basename)[0]
return int(index)
self.video_dict = dict()
for video in tqdm(self.videos):
self.video_dict[video] = sorted(
glob(os.path.join(root, video, extension)),
key=by_index
)
self.samples = list()
self.indices = dict()
index = 0
for key, value in self.video_dict.items():
self.samples.extend(value)
num_frames = len(value)
self.indices[key] = [index, num_frames]
index += num_frames
self.transform = T.Compose([
T.Resize((224, 224), interpolation=Image.BICUBIC),
# T.Resize((300, 300), interpolation=Image.BICUBIC),
T.ToTensor(),
T.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
print('self.video_dict')
pprint(self.video_dict[video])
def __getitem__(self, index: int):
sample = self.samples[index]
img = Image.open(sample).convert('RGB')
img = self.transform(img)
return img
def __len__(self):
return len(self.samples)
def save_indices(self, path: str):
with open(os.path.join(path, 'indices.pkl'), 'wb') as f:
pickle.dump(self.indices, f)
|
SunDoge/L-GCN
|
utils/io.py
|
import json
import pickle
from PIL import Image, features
# from torchpie.logging import logger
import logging
logger = logging.getLogger(__name__)
logger.info(f'pillow version: {Image.PILLOW_VERSION}')
logger.info(
'Using jpeg-turbo: {}'.format(features.check_feature('libjpeg_turbo')))
def load_json(filename: str):
with open(filename, 'r') as f:
return json.load(f)
def dump_json(obj, filename: str):
with open(filename, 'w') as f:
json.dump(obj, f)
def load_pickle(filename: str):
with open(filename, 'rb') as f:
return pickle.load(f)
def dump_pickle(obj, filename: str):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def load_image(path: str) -> Image.Image:
return Image.open(path).convert('RGB')
|
SunDoge/L-GCN
|
scripts/merge_bboxes.py
|
import torch
# from typed_args import TypedArgs
import argparse
import os
from typing import Dict, List, NewType, Tuple
from tqdm import tqdm
from numpy.lib.format import open_memmap
import numpy as np
from utils.npy_file import NpyFile
from utils.io import dump_pickle
# class Args(TypedArgs):
# def __init__(self):
# parser = argparse.ArgumentParser()
# self.bboxes = parser.add_argument(
# '--bboxes'
# )
# self.output = parser.add_argument(
# '-o', '--output'
# )
# self.num_bboxes: int = parser.add_argument(
# '-n', '--num-bboxes', type=int, default=10
# )
# self.parse_args_from(parser)
parser = argparse.ArgumentParser()
parser.add_argument('--bboxes', help='path to bboxes')
parser.add_argument('-o', '--output', help='output path')
parser.add_argument('-n', '--num-bboxes', type=int, default=5,
help='use N bboxes, 5 is enough, 10 for ablation study')
def load_bboxes(args: Args) -> List[NpyFile]:
splits = os.listdir(args.bboxes)
splits = sorted(splits)
print(splits)
fps = []
for split in tqdm(splits):
fp = NpyFile(os.path.join(args.bboxes, split))
fps.append(fp)
return fps
def get_new_indices(fp: NpyFile, index: int) -> Dict[str, Tuple[int, int]]:
indices = fp.indices
for k in indices.keys():
indices[k][0] += index
return indices
def count_frames(fps: List[NpyFile]) -> int:
res = 0
for fp in fps:
res += len(fp.data)
return res
def main(args):
os.makedirs(args.output, exist_ok=True)
fps = load_bboxes(args)
total_frames = count_frames(fps)
print('total_frames:', total_frames)
new_indices = dict()
new_fp = open_memmap(
os.path.join(args.output, 'data.npy'),
mode='w+',
dtype=np.float32,
shape=(total_frames, 10, 2048)
)
index = 0
for fp in tqdm(fps):
length = len(fp.data)
new_fp[index: index + length] = fp.data
new_indices.update(get_new_indices(fp, index))
index += length
del new_fp
dump_pickle(new_indices, os.path.join(args.output, 'indices.pkl'))
if __name__ == "__main__":
# args = Args()
args = parser.parse_args()
main(args)
|
SunDoge/L-GCN
|
model/rnn.py
|
<gh_stars>10-100
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class RNNEncoder(nn.Module):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bidirectional=True,
rnn=nn.LSTM,
dropout=0
):
super().__init__()
self.num_directions = 2 if bidirectional else 1
self.rnn: nn.LSTM = rnn(
input_size,
hidden_size,
num_layers=num_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=dropout
)
def forward(self, input, lengths):
sorted_input, sorted_lengths, reverse_indices = self._sort_batch(
input, lengths)
packed_input = pack_padded_sequence(
sorted_input, sorted_lengths, batch_first=True)
self.rnn.flatten_parameters()
output, hidden = self.rnn(packed_input)
output, lengths = pad_packed_sequence(output, batch_first=True)
output = output[reverse_indices]
if isinstance(self.rnn, nn.LSTM):
hidden = hidden[0]
hidden = hidden[-self.num_directions:]
hidden = hidden.transpose(0, 1).contiguous()
hidden = hidden.view(hidden.size(0), -1)
hidden = hidden[reverse_indices]
return output, hidden
def _sort_batch(self, input: torch.Tensor, lengths: torch.Tensor):
sorted_legnths, indices = lengths.sort(dim=0, descending=True)
sorted_input = input[indices]
reverse_indices = indices.scatter(
0, indices, torch.arange(len(indices), device=lengths.device)
)
return sorted_input, sorted_legnths, reverse_indices
class RNNEncoderOld(nn.Module):
"""A RNN wrapper handles variable length inputs, always set batch_first=True.
Supports LSTM, GRU and RNN. Tested with PyTorch 0.3 and 0.4
"""
def __init__(self, word_embedding_size, hidden_size, bidirectional=True,
dropout_p=0, n_layers=1, rnn_type="lstm", return_hidden=True, return_outputs=True):
super(RNNEncoderOld, self).__init__()
"""
:param word_embedding_size: rnn input size
:param hidden_size: rnn output size
:param dropout_p: between rnn layers, only useful when n_layer >= 2
"""
self.rnn_type = rnn_type
self.n_dirs = 2 if bidirectional else 1
# - add return_hidden keyword arg to reduce computation if hidden is not needed.
self.return_hidden = return_hidden
self.return_outputs = return_outputs
self.rnn = getattr(nn, rnn_type.upper())(word_embedding_size, hidden_size, n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=dropout_p)
def sort_batch(self, seq, lengths):
sorted_lengths, perm_idx = lengths.sort(0, descending=True)
reverse_indices = [0] * len(perm_idx)
for i in range(len(perm_idx)):
reverse_indices[perm_idx[i]] = i
sorted_seq = seq[perm_idx]
return sorted_seq, list(sorted_lengths), reverse_indices
def forward(self, inputs, lengths):
"""
inputs, sorted_inputs -> (B, T, D)
lengths -> (B, )
outputs -> (B, T, n_dirs * D)
hidden -> (n_layers * n_dirs, B, D) -> (B, n_dirs * D) keep the last layer
- add total_length in pad_packed_sequence for compatiblity with nn.DataParallel, --remove it
"""
assert len(inputs) == len(lengths)
sorted_inputs, sorted_lengths, reverse_indices = self.sort_batch(
inputs, lengths)
packed_inputs = pack_padded_sequence(
sorted_inputs, sorted_lengths, batch_first=True)
outputs, hidden = self.rnn(packed_inputs)
if self.return_outputs:
# outputs, lengths = pad_packed_sequence(outputs, batch_first=True, total_length=int(max(lengths)))
outputs, lengths = pad_packed_sequence(outputs, batch_first=True)
outputs = outputs[reverse_indices]
else:
outputs = None
if self.return_hidden: #
if self.rnn_type.lower() == "lstm":
hidden = hidden[0]
hidden = hidden[-self.n_dirs:, :, :]
hidden = hidden.transpose(0, 1).contiguous()
hidden = hidden.view(hidden.size(0), -1)
hidden = hidden[reverse_indices]
else:
hidden = None
return outputs, hidden
def max_along_time(outputs, lengths):
""" Get maximum responses from RNN outputs along time axis
:param outputs: (B, T, D)
:param lengths: (B, )
:return: (B, D)
"""
outputs = [outputs[i, :int(lengths[i]), :].max(dim=0)[0]
for i in range(len(lengths))]
return torch.stack(outputs, dim=0)
|
SunDoge/L-GCN
|
train.py
|
import logging
import math
import os
import pickle
import random
from pprint import pprint
from typing import Dict
import numpy as np
import pandas as pd
# import ipdb
import torch
import torch.nn.functional as F
from pyhocon import ConfigFactory
from termcolor import colored
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import utils
from arguments import args
from dataset import get_dataloader
from model import get_model
from utils import MULTIPLE_CHOICE_TASKS, accuracy, count_correct
# from torchpie.config import config
# from torchpie.environment import args, experiment_path
# from torchpie.logging import logger
# from torchpie.meters import AverageMeter
# from torchpie.parallel import FakeObj
# from torchpie.utils.checkpoint import save_checkpoint
from utils.checkpoint import save_checkpoint
from utils.config import config
from utils.dictionary import CharDictionary, Dictionary
from utils.io import load_pickle
from utils.logging import set_default_logger
from utils.meters import AverageMeter
logger = logging.getLogger(__name__)
def train(model: nn.Module, loader: DataLoader, criterion: nn.Module, optimzier: optim.Optimizer, epoch: int):
loader_length = len(loader)
losses = AverageMeter('Loss')
if TASK in utils.MULTIPLE_CHOICE_TASKS or TASK in ['frameqa', 'youtube2text']:
result = AverageMeter('Acc')
else:
result = AverageMeter('MSE')
model.train()
# for i, data in enumerate(loader):
for i, data in enumerate(tqdm(loader)):
data = utils.batch_to_gpu(data)
(
question, question_length, question_chars,
a1, a1_length, a1_chars,
a2, a2_length, a2_chars,
a3, a3_length, a3_chars,
a4, a4_length, a4_chars,
a5, a5_length, a5_chars,
features, c3d_features, bbox_features, bbox,
answer
) = data
if config.get_bool('abc.is_multiple_choice'):
answer = torch.zeros_like(answer)
out = model(
question, question_length, question_chars,
a1, a1_length, a1_chars,
a2, a2_length, a2_chars,
a3, a3_length, a3_chars,
a4, a4_length, a4_chars,
a5, a5_length, a5_chars,
features, c3d_features, bbox_features, bbox
)
loss: torch.Tensor = criterion(out, answer)
optimzier.zero_grad()
loss.backward()
optimzier.step()
compute_score(losses, result, out, answer, loss)
# logger.info(
# f'Train Epoch [{epoch}][{i}/{loader_length}]\t'
# f'{result}%\t{losses}'
# )
if args.debug:
break
writer.add_scalar(f'Train/{losses.name}', losses.avg, epoch)
writer.add_scalar(f'Train/{result.name}', result.avg, epoch)
@torch.no_grad()
def test(model: nn.Module, loader: DataLoader, criterion: nn.Module, epoch: int) -> float:
loader_length = len(loader)
losses = AverageMeter('Loss')
if TASK in utils.MULTIPLE_CHOICE_TASKS or TASK in ['frameqa', 'youtube2text']:
result = AverageMeter('Acc')
else:
result = AverageMeter('MSE')
type_meters = dict()
if TASK == 'youtube2text':
youtube2text_meters: Dict[int, AverageMeter] = dict()
for qtype_id, qtype in youtube2text_qtype_dict.items():
youtube2text_meters[qtype_id] = AverageMeter(qtype, fmt=':.3f')
youtube2text_meters['other'] = AverageMeter('other', fmt=':.3f')
model.eval()
final_out = []
for i, data in enumerate(tqdm(loader)):
data = utils.batch_to_gpu(data)
(
question, question_length, question_chars,
a1, a1_length, a1_chars,
a2, a2_length, a2_chars,
a3, a3_length, a3_chars,
a4, a4_length, a4_chars,
a5, a5_length, a5_chars,
features, c3d_features, bbox_features, bbox,
answer
) = data
if config.get_bool('abc.is_multiple_choice'):
answer = torch.zeros_like(answer)
out = model(
question, question_length, question_chars,
a1, a1_length, a1_chars,
a2, a2_length, a2_chars,
a3, a3_length, a3_chars,
a4, a4_length, a4_chars,
a5, a5_length, a5_chars,
features, c3d_features, bbox_features, bbox
)
loss: torch.Tensor = criterion(out, answer)
compute_score(losses, result, out, answer, loss)
if TASK == 'youtube2text':
corrects = out.argmax(dim=1).eq(answer)
qtype_ids = question[:, 0]
all_corrects = corrects.sum()
all_questions = len(question)
for qtype_id in youtube2text_qtype_dict.keys():
qtype_meter = youtube2text_meters[qtype_id]
current_qtype = qtype_ids.eq(qtype_id)
num_questions = current_qtype.sum()
if num_questions > 0:
currect_qtype_corrects = (
corrects & current_qtype).sum()
qtype_meter.update(
currect_qtype_corrects.float() / num_questions,
num_questions
)
all_corrects -= currect_qtype_corrects
all_questions -= num_questions
if all_questions > 0:
youtube2text_meters['other'].update(
all_corrects.float() / all_questions, all_questions)
if args.debug:
break
writer.add_scalar(f'Test/{losses.name}', losses.avg, epoch)
writer.add_scalar(f'Test/{result.name}', result.avg, epoch)
if TASK == 'youtube2text':
avg_per_class = 0
for meter in youtube2text_meters.values():
logger.info(f'Test Epoch [{epoch}] {meter}, n={meter.count}')
avg_per_class += meter.avg
avg_per_class /= 3
logger.info(f'Test Epoch [{epoch}], Avg. Per-class: {avg_per_class}')
for meter in youtube2text_meters.values():
type_meters[meter.name] = meter.avg.item()
return result.avg, type_meters
@torch.no_grad()
def compute_score(losses: AverageMeter, result: AverageMeter, out: torch.Tensor, answer: torch.Tensor,
loss: torch.Tensor):
batch_size = answer.shape[0]
if TASK in utils.MULTIPLE_CHOICE_TASKS or TASK in ['frameqa', 'youtube2text']:
acc = accuracy(out, answer)
result.update(acc.item(), batch_size)
elif TASK == 'count':
out = out * 10. + 1.
mse = F.mse_loss(out.round().clamp(1., 10.), answer.clamp(1., 10.))
result.update(mse.item(), batch_size)
if TASK in MULTIPLE_CHOICE_TASKS or config.get_bool('abc.is_multiple_choice'):
losses.update(loss.item() / batch_size, batch_size)
else:
losses.update(loss.item(), batch_size)
def main():
best_result = math.inf if TASK == 'count' else 0.0
best_type_meters = dict()
train_loader, test_loader = get_dataloader(config, logger)
num_classes = 1
if TASK == 'frameqa':
answer_dict = utils.load_answer_dict()
num_classes = len(answer_dict)
if TASK == 'youtube2text':
if config.get_bool('abc.is_multiple_choice'):
num_classes = 1
else:
num_classes = 1000
logger.info(f'Num classes: {num_classes}')
vocab_size = utils.get_vocab_size(config, TASK, level='word')
char_vocab_size = utils.get_vocab_size(config, TASK, level='char')
model = get_model(vocab_size, char_vocab_size, num_classes)
model = model.cuda()
if TASK in MULTIPLE_CHOICE_TASKS:
criterion = nn.CrossEntropyLoss(reduction='sum')
elif TASK == 'count':
inner_criterion = nn.MSELoss()
def criterion(input, target):
target = (target - 1.) / 10.
return inner_criterion(input, target)
# criterion = nn.SmoothL1Loss()
elif TASK in ['frameqa']:
criterion = nn.CrossEntropyLoss()
elif TASK == 'youtube2text':
if config.get_bool('abc.is_multiple_choice'):
criterion = nn.CrossEntropyLoss(reduction='sum')
else:
criterion = nn.CrossEntropyLoss()
optimizer_type = config.get_string('optimizer')
if optimizer_type == 'adam':
optimizer = optim.Adam(
model.parameters(), lr=config.get_float('adam.lr'))
else:
raise Exception(f'Unknow optimizer: {optimizer_type}')
start_epoch = 1
end_epoch = config.get_int('num_epochs')
for epoch in range(start_epoch, end_epoch + 1):
logger.info(f'Epoch [{epoch}/{end_epoch}] start')
train(model, train_loader, criterion, optimizer, epoch)
current_result, current_type_meters = test(
model, test_loader, criterion, epoch)
logger.info(f'Epoch [{epoch}/{end_epoch}] end')
if args.debug:
break
is_best = False
if TASK == 'count':
if current_result < best_result:
is_best = True
best_result = current_result
else:
if current_result > best_result:
is_best = True
best_result = current_result
best_type_meters = current_type_meters
logger.info(
colored("Current best result: {:.2f}, Exp path: {}".format(best_result, args.experiment_path), "red"))
logger.info(best_type_meters)
save_checkpoint({
'arch': config.get_string('arch'),
'task': TASK,
'state_dict': model.state_dict(),
'epoch': epoch + 1,
'best_result': best_result,
'optimizer': optimizer.state_dict(),
'best_type_meters': best_type_meters,
}, is_best=is_best, folder=args.experiment_path)
if TASK == 'count':
logger.info(f'Best MSE: {best_result}')
else:
logger.info(f'Best Acc: {best_result}')
def fix_seed(config):
seed = config.get_int('seed')
logger.info(f'Set seed={seed}')
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if __name__ == "__main__":
set_default_logger(args.experiment_path, debug=args.debug)
# config = ConfigFactory.parse_file(args.config)
fix_seed(config)
pprint(config)
TASK = config.get_string('task')
best_meters = dict()
if TASK == 'youtube2text':
youtube2text_dictionary = Dictionary.load_from_file(
os.path.join(
config.get_string('cache_path'), 'youtube2text_dictionary.pkl'
)
)
youtube2text_qtype_dict = dict()
for qtype in ['what', 'who']:
qtype_id = youtube2text_dictionary.word2idx[qtype]
youtube2text_qtype_dict[qtype_id] = qtype
if args.experiment_path is not None:
writer = SummaryWriter(log_dir=args.experiment_path)
else:
# writer: SummaryWriter = FakeObj()
raise Exception('No exp path for tensorboard')
main()
writer.close()
|
SunDoge/L-GCN
|
utils/npy_file.py
|
import math
import os
import pickle
from functools import lru_cache
import numpy as np
from numpy.lib.format import open_memmap
from typing import Tuple, List, Dict
class NpyFile:
def __init__(self, name: str, mode='r'):
self.name = name
self.index_path = os.path.join(name, 'indices.pkl')
self.data_path = os.path.join(name, 'data.npy')
with open(self.index_path, 'rb') as f:
self.indices = pickle.load(f)
self.data = np.load(self.data_path, mmap_mode=mode)
def __getitem__(self, name: str) -> np.memmap:
index, length = self.indices[name]
return self.data[index: index + length]
def __len__(self):
return len(self.indices)
class NpyFileBuilder:
def __init__(self, name: str, shape: Tuple[int], dtype=np.float32, exist_ok: bool = False):
os.makedirs(name, exist_ok=exist_ok)
self.name = name
self.index = 0
self.fp = open_memmap(
name,
mode='w+',
shape=shape,
dtype=dtype
)
self.indices = dict()
def insert(self, key: str, value: np.ndarray):
length = len(value)
self.indices[key] = [self.index, length]
self.fp[self.index: self.index + length] = value
self.index += length
def compress_symmetric(data: np.ndarray) -> np.ndarray:
assert np.allclose(data, data.T)
flat = data[np.triu_indices(data.shape[0])]
return flat
def get_dim_from_length(flat: np.ndarray) -> int:
'''
(sqrt(1 + 8 * L) - 1) / 2
'''
@lru_cache(maxsize=256)
def impl(length: int) -> int:
return int(math.sqrt(length * 8 + 1) - 1) // 2
return impl(len(flat))
def decompress_symmetric(flat: np.ndarray) -> np.ndarray:
dim = get_dim_from_length(flat)
data: np.ndarray = np.zeros((dim, dim), dtype=flat.dtype)
data[np.triu_indices(dim)] = flat
data = data + data.T - np.diag(data.diagonal())
return data
|
SunDoge/L-GCN
|
scripts/extract_resnet152_features_with_bboxes.py
|
<gh_stars>10-100
import argparse
import os
import pickle
from glob import glob
from pprint import pprint
import numpy as np
import torch
import torchvision.transforms as T
from numpy.lib.format import open_memmap
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision.datasets import ImageFolder
from torchvision.models.resnet import ResNet, resnet152
from torchvision.models.vgg import VGG, vgg16_bn
from tqdm import tqdm
# from typed_args import TypedArgs
import json
from torchvision.ops import roi_align
from collections import defaultdict
from typing import List, Tuple, Dict
from torch import nn
from io import BytesIO
from utils.io import load_pickle
# class Args(TypedArgs):
# def __init__(self):
# parser = argparse.ArgumentParser()
# self.input_dir = parser.add_argument('-i', '--input-dir')
# self.output_dir = parser.add_argument(
# '-o', '--output-dir', default='data/resnet152_layer4_features')
# self.batch_size = parser.add_argument(
# '-b', '--batch-size', type=int, default=512
# )
# self.num_workers = parser.add_argument(
# '-n', '--num-workers', type=int, default=4
# )
# self.part = parser.add_argument(
# '-p', '--part'
# )
# self.num_boxes = parser.add_argument(
# '--num-boxes', type=int
# )
# self.arch = parser.add_argument(
# '-a', '--arch', help='vgg16, c3d = 4096, resnet = 2048', default='resnet152'
# )
# self.frame_path = parser.add_argument(
# '-f', '--frame-path', help='MSVD'
# )
# self.parse_args_from(parser)
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-dir')
parser.add_argument(
'-o', '--output-dir', default='data/resnet152_layer4_features'
)
parser.add_argument(
'-b', '--batch-size', type=int, default=512
)
parser.add_argument(
'-n', '--num-workers', type=int, default=4
)
parser.add_argument(
'-p', '--part', help='path to pt file'
)
parser.add_argument(
'--num-boxes', type=int
)
parser.add_argument(
'-a', '--arch', help='vgg16, c3d = 4096, resnet = 2048', default='resnet152'
)
parser.add_argument(
'-f', '--frame-path', help='MSVD only'
)
parser.add_argument(
'--msvd', action='store_true', help='MSVD uses different frame naming strategy'
)
class VideoDataset(Dataset):
def __init__(self, args, keys: Dict[str, int], extension='*.jpg'):
self.args = args
self.root = args.input_dir
self.part = args.part
self.extension = extension
# with open(self.part, 'rb') as f:
# self.videos = torch.load(BytesIO(f.read()))
self.video_dict = defaultdict(list)
if args.arch == 'resnet152':
for gif_name, num_frames in keys.items():
for i in range(num_frames):
self.video_dict[gif_name].append(
os.path.join(self.root, gif_name, f'{i}.jpg')
)
elif args.arch == 'vgg16':
samples: List[str] = load_pickle(args.frame_path)
videos = []
for sample in tqdm(samples):
gif_name = sample.split('/')[-1]
videos.append(gif_name)
num_frames = len(os.listdir(sample))
selected_frames = np.linspace(
0, num_frames, 20 + 2)[1:20 + 1].astype(np.int) + 1
for n in selected_frames:
if args.msvd:
frame_path = os.path.join(
sample, f'{n + 1:06}.jpg') # For MSVD-QA
else:
frame_path = os.path.join(
sample, f'{n}.jpg') # For TGIF-QA
self.video_dict[gif_name].append(
# os.path.join(sample, f'{n}.jpg') # For TGIF-QA
# os.path.join(sample, f'{n + 1:06}.jpg') # For MSVD-QA
frame_path
)
self.samples = list()
self.indices = dict()
index = 0
for key, value in self.video_dict.items():
self.samples.extend(value)
num_frames = len(value)
self.indices[key] = [index, num_frames]
index += num_frames
self.transform = T.Compose([
T.Resize((224, 224), interpolation=Image.BICUBIC),
T.ToTensor(),
T.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
print('self.video_dict')
pprint(self.video_dict[gif_name])
# del self.videos
def __getitem__(self, index: int):
sample = self.samples[index]
img = Image.open(sample).convert('RGB')
img = self.transform(img)
return img
def __len__(self):
return len(self.samples)
def save_indices(self, path: str):
with open(os.path.join(path, 'indices.pkl'), 'wb') as f:
pickle.dump(self.indices, f)
# def collate_fn(batch: List[Tuple[torch.Tensor, torch.Tensor]]) -> (torch.Tensor, List[torch.Tensor]):
# images, boxes = zip(*batch)
# images = torch.stack(images)
# return images, boxes
def get_model(args):
if args.arch == 'resnet152':
def my_forward(self: ResNet, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x = self.avgpool(x)
# x = torch.flatten(x, 1)
# x = self.fc(x)
return x
model = resnet152(pretrained=True).cuda()
model.forward = my_forward.__get__(model, ResNet)
model.eval()
elif args.arch == 'vgg16':
full_model = vgg16_bn(pretrained=True).cuda()
full_model.eval()
model = full_model.features
else:
raise Exception
return model
def get_bbox(args):
bboxes: Dict[str, List[Dict[str, torch.Tensor]]] = torch.load(args.part)
keys = dict()
box_list = list()
for gif_name, frames in tqdm(bboxes.items()):
keys[gif_name] = len(frames)
for frame in frames:
bbox = frame['bbox']
new_boxes = torch.zeros((args.num_boxes, 4))
N, _ = bbox.shape
N = min(args.num_boxes, N)
# Resize to 7x7
new_boxes[:N] = bbox[:N] * 7.
box_list.append(new_boxes)
return box_list, keys
# class RoiModel(nn.Module):
# def __init__(self):
# super().__init__()
# self.resnet = get_model()
# def forward(self, images: torch.Tensor, boxes: List[torch.Tensor]):
# output = self.resnet(images)
# output = roi_align(output, boxes, (1, 1))
# output = output
# return output
def get_dataloader(args, keys: Dict[str, int]):
dataset = VideoDataset(args, keys)
loader = DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=False,
pin_memory=True,
# collate_fn=collate_fn
)
return loader
def extract_features(args):
model = get_model(args)
# model = RoiModel()
# model = nn.DataParallel(model)
# model = model.cuda()
# model.eval()
bboxes, keys = get_bbox(args)
loader = get_dataloader(args, keys)
N = len(loader.dataset)
# out_channels = 2048 if args.arch == 'resnet152' else 512
if args.arch == 'resnet152':
out_channels = 2048
elif args.arch == 'vgg16':
out_channels = 512
fp = open_memmap(
os.path.join(args.output_dir, 'data.npy'),
mode='w+',
dtype=np.float32,
shape=(N, args.num_boxes, out_channels)
)
with torch.no_grad():
for i, images in tqdm(enumerate(loader), total=len(loader)):
images = images.cuda()
output = model(images)
current_batch_size = images.shape[0]
current_index = i * args.batch_size
current_boxes = bboxes[current_index: current_index +
current_batch_size]
current_boxes = [b.cuda() for b in current_boxes]
output = roi_align(output, current_boxes, (1, 1))
# index = i * args.batch_size
# import ipdb; ipdb.set_trace()
fp[current_index: current_index + current_batch_size] = output.view(
current_batch_size, args.num_boxes, out_channels).cpu().numpy()
print(fp[N - 1])
del fp
loader.dataset.save_indices(args.output_dir)
if __name__ == "__main__":
# args = Args()
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
extract_features(args)
|
SunDoge/L-GCN
|
scripts/extract_resnet152_features.py
|
<gh_stars>10-100
import argparse
import os
import pickle
from glob import glob
from pprint import pprint
import numpy as np
import torch
import torchvision.transforms as T
from numpy.lib.format import open_memmap
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision.datasets import ImageFolder
from torchvision.models.resnet import ResNet, resnet152, resnet101
from torchvision.models.vgg import VGG, vgg16_bn
from tqdm import tqdm
# from typed_args import TypedArgs
from utils.data import VideoDataset
# from maskrcnn_benchmark.structures.bounding_box import BoxList
# class Args(TypedArgs):
# def __init__(self):
# parser = argparse.ArgumentParser()
# self.input_dir = parser.add_argument('-i', '--input-dir')
# self.output_dir = parser.add_argument(
# '-o', '--output-dir', default='data/resnet152_layer4_features')
# self.batch_size = parser.add_argument(
# '-b', '--batch-size', type=int, default=512
# )
# self.num_workers = parser.add_argument(
# '-n', '--num-workers', type=int, default=4
# )
# self.parse_args_from(parser)
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-dir', help='path to tgif frames')
parser.add_argument('-o', '--output-dir', default='data/tgif/resnet152_pool5_features',
help='path to save the output features')
parser.add_argument('-b', '--batch-size', type=int, default=512)
parser.add_argument('-n', '--num-workers', type=int, default=4)
class MyDataset:
def __init__(self):
pass
def get_model():
def my_forward(self: ResNet, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
# x = self.fc(x)
return x
# model = resnet152(pretrained=True).cuda()
model = resnet101(pretrained=True).cuda()
model.forward = my_forward.__get__(model, ResNet)
model.eval()
return model
def get_dataloader(args):
dataset = VideoDataset(args.input_dir)
loader = DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=False,
pin_memory=True
)
return loader
def extract_features(args):
model = get_model()
loader = get_dataloader(args)
N = len(loader.dataset)
fp = open_memmap(
os.path.join(args.output_dir, 'data.npy'),
mode='w+',
dtype=np.float32,
shape=(N, 2048)
)
with torch.no_grad():
for i, images in tqdm(enumerate(loader), total=len(loader)):
images = images.cuda()
output = model(images)
current_batch_size = images.shape[0]
index = i * args.batch_size
fp[index: index + current_batch_size] = output.cpu().numpy()
print(fp[N-1])
del fp
loader.dataset.save_indices(args.output_dir)
if __name__ == "__main__":
# args = Args()
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
extract_features(args)
|
SunDoge/L-GCN
|
dataset/qa_pair.py
|
from typing import List
class QAPair:
def __init__(self, task: str):
self.task = task
def from_row(self, row):
self.gif_name: str = row.gif_name
self.question: str = row.question.lower()
if self.task in ['action', 'trans']:
self.answers: List[str] = [
row.a0, row.a1, row.a2, row.a3, row.a4
]
self.answer_index: int = row.answer
else:
self.answer_index: str = row.answer
|
SunDoge/L-GCN
|
utils/checkpoint.py
|
import os
import torch
import shutil
def save_checkpoint(state: dict, is_best=False, folder='', filename='checkpoint.pth.tar'):
filename = os.path.join(folder, filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(folder, 'model_best.pth.tar'))
|
SunDoge/L-GCN
|
arguments.py
|
<reponame>SunDoge/L-GCN
import argparse
import sys
import time
__ALL__ = ['args']
# class Args(TypedArgs):
# def __init__(self):
# parser = argparse.ArgumentParser()
# self.experiment_path: Optional[str] = parser.add_argument(
# '-e',
# '--experiment-path',
# type=str,
# nargs='?',
# default='!default',
# required=False,
# help='path to save your experiment'
# )
# self.config: Optional[str] = parser.add_argument(
# '-c',
# '--config',
# type=str,
# nargs='?',
# required=False,
# help='path to config files'
# )
# self.debug: bool = parser.add_argument(
# '-d',
# '--debug',
# action='store_true',
# help='debug mode'
# )
# self.resume: str = parser.add_argument(
# '-r',
# '--resume',
# type=str,
# help='resume an experiment'
# )
# # This arg is for distributed
# self.local_rank: int = parser.add_argument(
# '--local_rank',
# default=0,
# type=int
# )
# self.parse_known_args_from(parser)
# self.create_experiment_path()
# def create_experiment_path(self):
# timestamp = get_timestamp()
# if self.experiment_path is None:
# if self.debug:
# experiment_path = os.path.join(
# 'output', '{}_debug'.format(timestamp))
# else:
# experiment_path = os.path.join('output', timestamp)
# else:
# # No experiment path
# if self.experiment_path == '!default':
# experiment_path = None
# else:
# experiment_path = self.experiment_path
# if experiment_path is not None and self.local_rank == 0:
# try:
# os.makedirs(experiment_path)
# except Exception as e:
# if not F.ask_remove_older_experiment_path(experiment_path):
# raise e
# self.experiment_path = experiment_path
def get_timestamp(fmt: str = '%Y%m%d_%H%M%S') -> str:
timestamp = time.strftime(fmt, time.localtime())
return timestamp
def create_experiment_path(self):
timestamp = get_timestamp()
if self.experiment_path is None:
if self.debug:
experiment_path = os.path.join(
'output', '{}_debug'.format(timestamp))
else:
experiment_path = os.path.join('output', timestamp)
else:
# No experiment path
if self.experiment_path == '!default':
experiment_path = None
else:
experiment_path = self.experiment_path
if experiment_path is not None and self.local_rank == 0:
try:
os.makedirs(experiment_path)
except Exception as e:
if not F.ask_remove_older_experiment_path(experiment_path):
raise e
self.experiment_path = experiment_path
parser = argparse.ArgumentParser()
parser.add_argument(
'-e',
'--experiment-path',
type=str,
nargs='?',
default='!default',
required=False,
help='path to save your experiment'
)
parser.add_argument(
'-c',
'--config',
type=str,
nargs='?',
required=False,
help='path to config files'
)
parser.add_argument(
'-d',
'--debug',
action='store_true',
help='debug mode'
)
parser.add_argument(
'-r',
'--resume',
type=str,
help='resume an experiment'
)
parser.add_argument(
'--local_rank',
default=0,
type=int
)
parser.add_argument # TODO
args = parser.parse_args()
|
SunDoge/L-GCN
|
dataset/tgifqa_dataset.py
|
<reponame>SunDoge/L-GCN
import os
import pickle
import random
from logging import Logger
import ipdb
import numpy as np
import torch
from pyhocon import ConfigTree
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, get_worker_info
from typing import List
from utils.npy_file import NpyFile
class VQAFeatureDataset(Dataset):
def __init__(self, opt: ConfigTree, logger: Logger, split: str):
self.split = split
self.logger = logger
self.task = opt.get_string('task')
self.num_frames = opt.get_int('num_frames')
self.question_path = os.path.join(
opt.get_string('question_path'),
f'{split.capitalize()}_{self.task}_question.pkl'
)
self.feature_path = opt.get_string('feature_path')
self.use_bbox_features = opt.get_bool('use_bbox_features')
self.c3d_feature_path = opt.get_string('c3d_feature_path')
if self.use_bbox_features:
logger.info('Using bbox features!')
self.bbox_features_path = opt.get_string('bbox_feature_path')
self.bbox_features = NpyFile(self.bbox_features_path)
# self.label_path = opt.get_string('label_path')
# self.labels = NpyFile(self.label_path)
# self.score_path = opt.get_string('score_path')
# self.scores = NpyFile(self.score_path)
self.bbox_path = opt.get_string('bbox_path')
self.bbox = NpyFile(self.bbox_path)
self.num_bbox = opt.get_int('num_bbox')
if self.c3d_feature_path is not None:
self.c3d_features = NpyFile(self.c3d_feature_path)
self.frame_range = np.arange(self.num_frames)
self.samples = self._load_questions()
self.features = self._load_feature()
def __getitem__(self, index: int):
sample = self.samples[index]
question = sample['question_ids']
question_length = len(question)
answer_id = sample['answer_id']
gif_name = sample['gif_name']
question_chars = sample['question_chars']
features = self.features[gif_name]
num_frames = features.shape[0]
if self.c3d_feature_path is not None:
c3d_features = self.c3d_features[gif_name]
num_frames = min(c3d_features.shape[0], num_frames)
else:
c3d_features = torch.zeros(1)
feature_indices = self._resample(num_frames)
features = features[feature_indices]
if self.c3d_feature_path is not None:
c3d_features = c3d_features[feature_indices]
c3d_features = torch.from_numpy(c3d_features)
if self.task in ['action', 'transition']:
answers = [torch.LongTensor(answer)
for answer in sample['answer_ids']]
answer_chars = sample['answer_chars']
else:
answers = [torch.LongTensor([0]) for _ in range(5)]
answer_chars = [torch.LongTensor([0]) for _ in range(5)]
answer_lengths = [len(answer) for answer in answers]
features = torch.from_numpy(features)
question = torch.LongTensor(question)
question_chars = torch.LongTensor(question_chars)
if self.use_bbox_features:
bbox_features = self.bbox_features[gif_name][feature_indices, :self.num_bbox]
bbox = self.bbox[gif_name][feature_indices, :self.num_bbox]
# scores = self.scores[gif_name][feature_indices, :self.num_bbox]
# labels = self.labels[gif_name][feature_indices, :self.num_bbox]
bbox_features = torch.from_numpy(bbox_features)
bbox = torch.from_numpy(bbox)
# scores = torch.from_numpy(scores)
# labels = torch.from_numpy(labels)
else:
bbox_features = torch.FloatTensor([0])
bbox = torch.FloatTensor([0])
# scores = torch.FloatTensor([0])
# labels = torch.LongTensor([0])
return (
question, question_length, question_chars,
answers[0], answer_lengths[0], answer_chars[0],
answers[1], answer_lengths[1], answer_chars[1],
answers[2], answer_lengths[2], answer_chars[2],
answers[3], answer_lengths[3], answer_chars[3],
answers[4], answer_lengths[4], answer_chars[4],
features, c3d_features, bbox_features, bbox,
answer_id
)
def __len__(self):
return len(self.samples)
def _resample(self, n: int) -> np.ndarray:
gap = n / self.num_frames
new_range = gap * self.frame_range
if self.split == 'train' and n > self.num_frames:
new_range += random.random() * gap
new_range = new_range.astype(np.int64)
return new_range
def _load_questions(self):
self.logger.info(f'loading questions from {self.question_path}')
with open(self.question_path, 'rb') as f:
return pickle.load(f)
def _load_feature(self):
return NpyFile(self.feature_path)
def _load_bbox_features(self):
splits = os.listdir(self.bbox_features_path)
def cat_into_shared_memory(batch: List[torch.Tensor]):
out = None
elem = batch[0]
if get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
def collate_fn(batch):
(
question, question_length, question_chars,
a1, a1_length, a1_chars,
a2, a2_length, a2_chars,
a3, a3_length, a3_chars,
a4, a4_length, a4_chars,
a5, a5_length, a5_chars,
features, c3d_features, bbox_features, bbox,
answer
) = zip(*batch)
question = pad_sequence(question, batch_first=True)
question_length = torch.LongTensor(question_length)
question_chars = pad_sequence(question_chars, batch_first=True)
a1 = pad_sequence(a1, batch_first=True)
a1_length = torch.LongTensor(a1_length)
a1_chars = pad_sequence(a1_chars, batch_first=True)
a2 = pad_sequence(a2, batch_first=True)
a2_length = torch.LongTensor(a2_length)
a2_chars = pad_sequence(a2_chars, batch_first=True)
a3 = pad_sequence(a3, batch_first=True)
a3_length = torch.LongTensor(a3_length)
a3_chars = pad_sequence(a3_chars, batch_first=True)
a4 = pad_sequence(a4, batch_first=True)
a4_length = torch.LongTensor(a4_length)
a4_chars = pad_sequence(a4_chars, batch_first=True)
a5 = pad_sequence(a5, batch_first=True)
a5_length = torch.LongTensor(a5_length)
a5_chars = pad_sequence(a5_chars, batch_first=True)
features = cat_into_shared_memory(features)
c3d_features = cat_into_shared_memory(c3d_features)
bbox_features = cat_into_shared_memory(bbox_features)
bbox = cat_into_shared_memory(bbox)
# scores = cat_into_shared_memory(scores)
# labels = cat_into_shared_memory(labels)
answer = torch.tensor(answer)
return (
question, question_length, question_chars,
a1, a1_length, a1_chars,
a2, a2_length, a2_chars,
a3, a3_length, a3_chars,
a4, a4_length, a4_chars,
a5, a5_length, a5_chars,
features, c3d_features, bbox_features, bbox,
answer
)
def get_dataset(opt: ConfigTree, logger: Logger) -> (Dataset, Dataset):
train_set = VQAFeatureDataset(opt, logger, 'train')
test_set = VQAFeatureDataset(opt, logger, 'test')
return train_set, test_set
|
SunDoge/L-GCN
|
utils/const.py
|
<reponame>SunDoge/L-GCN
MSVD_QTYPES = [
'what', 'who', 'how', 'when', 'where'
]
MSVD_QTYPE_DICT = {k: v for v, k in enumerate(MSVD_QTYPES)}
# ingore index for crossentropy
IGNORE_INDEX = -100
|
SunDoge/L-GCN
|
dataset/__init__.py
|
from logging import Logger
from pyhocon import ConfigTree
from torch.utils.data import DataLoader
# from torchpie.environment import args
from arguments import args
from .tgifqa_dataset import collate_fn
def get_dataloader(opt: ConfigTree, logger: Logger) -> (DataLoader, DataLoader):
if opt.get_string('task') in ['msvd', 'youtube2text']:
from .msvd_dataset import get_dataset
else:
from .tgifqa_dataset import get_dataset
train_set, test_set = get_dataset(opt.get_config('dataset'), logger)
train_loader = DataLoader(
train_set,
batch_size=opt.get_int('batch_size'),
shuffle=True,
num_workers=0 if args.debug else opt.get_int('num_workers'),
pin_memory=True,
collate_fn=collate_fn
)
test_loader = DataLoader(
test_set,
batch_size=opt.get_int('batch_size'),
shuffle=False,
num_workers=0 if args.debug else opt.get_int('num_workers'),
pin_memory=True,
collate_fn=collate_fn
)
return train_loader, test_loader
|
SunDoge/L-GCN
|
scripts/merge_box_scores_and_labels.py
|
import torch
from typed_args import TypedArgs, add_argument
import argparse
import os
from typing import Dict, List, NewType, Optional
from tqdm import tqdm
from numpy.lib.format import open_memmap
import numpy as np
from utils.io import dump_pickle
from dataclasses import dataclass
SplitData = NewType('SplitData', Dict[str, List[Dict[str, torch.Tensor]]])
# class Args(TypedArgs):
# def __init__(self):
# parser = argparse.ArgumentParser()
# self.bboxes = parser.add_argument(
# '--bboxes'
# )
# self.output = parser.add_argument(
# '-o', '--output'
# )
# self.num_bboxes: int = parser.add_argument(
# '-n', '--num-bboxes', type=int, default=10
# )
# self.parse_args_from(parser)
@dataclass
class Args(TypedArgs):
bboxes: str = add_argument(
'--bboxes',
help='folder containing all split{n}.pt'
)
output: str = add_argument(
'-o', '--output',
help='output dir'
)
num_bboxes: int = add_argument(
'-n', '--num-bboxes', default=5,
help='10 for ablation study, 5 is enough'
)
def load_bboxes(args: Args) -> List[SplitData]:
splits = os.listdir(args.bboxes)
# splits.remove('split5.pt.bak')
splits = sorted(splits)
print(splits)
for split in tqdm(splits):
data = torch.load(os.path.join(args.bboxes, split))
yield data
def count_frames(args: Args):
num_frames = 0
for data in load_bboxes(args):
for k, v in data.items():
num_frames += len(v)
print(f'total frames: {num_frames}')
return num_frames
def main(args: Args):
os.makedirs(args.output, exist_ok=True)
num_frames = count_frames(args)
data = load_bboxes(args)
# scores = dict()
# labels = dict()
# fp_scores = open_memmap(
# os.path.join(args.output, 'scores.npy'),
# mode='w+',
# dtype=np.float32,
# shape=(num_frames, args.num_bboxes)
# )
# fp_labels = open_memmap(
# os.path.join(args.output, 'labels.npy'),
# mode='w+',
# dtype=np.int64,
# shape=(num_frames, args.num_bboxes)
# )
# We don't need scores and labels
fp_bbox = open_memmap(
os.path.join(args.output, 'data.npy'),
mode='w+',
dtype=np.float32,
shape=(num_frames, args.num_bboxes, 4)
)
indices = dict()
index = 0
for split_data in data:
for key, value in tqdm(split_data.items()):
score_list = []
label_list = []
bbox_list = []
num_frames = len(value)
for frame in value:
frame_labels = frame['labels']
frame_scores = frame['scores']
frame_bbox = frame['bbox']
N = frame_labels.shape[0]
N = min(N, args.num_bboxes)
# print(frame_labels.shape)
# print(frame_scores.shape)
# exit()
new_labels = torch.empty(
(args.num_bboxes,), dtype=frame_labels.dtype).fill_(-1)
new_scores = torch.zeros((args.num_bboxes,))
new_bbox = torch.zeros((args.num_bboxes, 4))
new_labels[:N] = frame_labels[:N]
new_scores[:N] = frame_scores[:N]
new_bbox[:N] = frame_bbox[:N]
label_list.append(new_labels)
score_list.append(new_scores)
bbox_list.append(new_bbox)
labels = torch.stack(label_list).numpy()
scores = torch.stack(score_list).numpy()
bbox = torch.stack(bbox_list).numpy()
indices[key] = [index, num_frames]
# fp_scores[index: index + num_frames] = scores
# fp_labels[index: index + num_frames] = labels
fp_bbox[index: index + num_frames] = bbox
index += num_frames
# torch.save(scores, 'scores.pt')
# torch.save(labels, 'labels.pt')
del fp_bbox
# del fp_labels
# del fp_scores
dump_pickle(indices, os.path.join(args.output, 'indices.pkl'))
if __name__ == "__main__":
args = Args()
main(args)
|
SunDoge/L-GCN
|
model/embedding.py
|
<gh_stars>10-100
import torch
import torch.nn.functional as F
from torch import nn
# https://github.com/BangLiu/QANet-PyTorch/blob/master/model/QANet.py
class Highway(nn.Module):
def __init__(self, layer_num, size):
super().__init__()
self.n = layer_num
self.linear = nn.ModuleList(
[nn.Linear(size, size) for _ in range(self.n)])
self.gate = nn.ModuleList([nn.Linear(size, size)
for _ in range(self.n)])
def forward(self, x):
#x: shape [batch_size, hidden_size, length]
dropout = 0.1
for i in range(self.n):
gate = torch.sigmoid(self.gate[i](x))
nonlinear = self.linear[i](x)
nonlinear = F.dropout(nonlinear, p=dropout, training=self.training)
x = gate * nonlinear + (1 - gate) * x
return x
class Embedding(nn.Module):
def __init__(self, wemb_dim, cemb_dim, d_model,
dropout_w=0.1, dropout_c=0.05):
super().__init__()
self.conv2d = nn.Conv2d(
cemb_dim, d_model, kernel_size=(1, 5), padding=0, bias=True)
nn.init.kaiming_normal_(self.conv2d.weight, nonlinearity='relu')
self.conv1d = nn.Linear(wemb_dim + d_model, d_model, bias=False)
self.high = Highway(2, d_model)
self.dropout_w = dropout_w
self.dropout_c = dropout_c
def forward(self, ch_emb, wd_emb):
# [B, Tw, Tc, C] -> [B, C, Tw, Tc]
ch_emb = ch_emb.permute(0, 3, 1, 2)
ch_emb = F.dropout(ch_emb, p=self.dropout_c, training=self.training)
ch_emb = self.conv2d(ch_emb)
ch_emb = F.relu(ch_emb)
ch_emb, _ = torch.max(ch_emb, dim=3)
wd_emb = F.dropout(wd_emb, p=self.dropout_w, training=self.training)
# wd_emb = wd_emb.transpose(1, 2)
ch_emb = ch_emb.transpose(1, 2)
emb = torch.cat([ch_emb, wd_emb], dim=2)
emb = self.conv1d(emb)
emb = self.high(emb)
return emb
|
SunDoge/L-GCN
|
model/__init__.py
|
from torch import nn
from pyhocon import ConfigTree
from .lgcn import LGCN
# from torchpie.config import config
# from torchpie.logging import logger
from utils.config import config
import logging
logger = logging.getLogger(__name__)
def get_model(
vocab_size: int,
char_vocab_size: int,
num_classes: int
) -> nn.Module:
arch = config.get_string('arch')
logger.info(f'Using model: {arch}')
if arch == 'abc':
opt = config.get_config('abc')
opt.put('vocab_size', vocab_size)
opt.put('num_classes', num_classes)
opt.put('char_vocab_size', char_vocab_size)
model = LGCN(opt)
else:
raise Exception(f'No such arch: {arch}')
return model
|
SunDoge/L-GCN
|
model/lgcn.py
|
<reponame>SunDoge/L-GCN
import torch
import torch.nn.functional as F
from pyhocon import ConfigTree
from torch import nn
# from torchpie.config import config
# from torchpie.logging import logger
from utils.config import config
import logging
logger = logging.getLogger(__name__)
from .attention import BidafAttn
from .rnn import RNNEncoder
from .embedding import Embedding
from .gcn import GCN
from .PosEmbed import positionalencoding1d
import ipdb
class LGCN(nn.Module):
multiple_choice_tasks = ['action', 'transition']
def __init__(self, opt: ConfigTree):
super().__init__()
self.opt = opt
self.vocab_size = opt.get_int('vocab_size')
self.char_vocab_size = opt.get_int('char_vocab_size')
self.hidden_size = opt.get_int('hidden_size')
self.video_channels = opt.get_int('video_channels')
self.c3d_channels = opt.get_int('c3d_channels')
self.position_dim = 128
self.num_classes = opt.get_int('num_classes')
self.task = opt.get_string('task')
self.num_frames = opt.get_int('num_frames')
self.pooling = opt.get_string('pooling')
self.use_char_embedding = opt.get_bool('character_embedding')
self.use_gcn = opt.get_bool('use_gcn')
self.use_c3d = opt.get_bool('use_c3d')
self.use_bbox = opt.get_bool('use_bbox')
self.use_bboxPos = opt.get_bool('use_bboxPos')
self.use_framePos = opt.get_bool('use_framePos')
self.use_image = opt.get_bool('use_image')
self.use_boxFC = opt.get_bool('use_boxFC')
self.use_boxLSTM = opt.get_bool('use_boxLSTM')
self.num_box = opt.get_int('num_bbox')
self.node_dim = opt.get_int('gcn.node_dim')
self.is_multiple_choice = self.task in self.multiple_choice_tasks or opt.get_bool(
'is_multiple_choice')
logger.warning(f'self.is_multiple_choice: {self.is_multiple_choice}')
logger.warning(f'Using {self.num_box} boxes!')
if 'embedding_path' not in opt:
self.embedding = nn.Embedding(self.vocab_size, 300)
logger.info('Init embedding randomly.')
else:
embedding_path = opt.get_string('embedding_path')
self.embedding = nn.Embedding.from_pretrained(
torch.load(embedding_path), freeze=False
)
logger.info(f'Using pretrained embedding: {embedding_path}')
if self.use_char_embedding:
self.char_embedding = nn.Embedding(self.char_vocab_size, 64)
self.mix_embedding = Embedding(300, 64, 300)
logger.info('Using char embedding!')
self.out_features = self.hidden_size * 2
if self.use_bbox:
node_dim = self.node_dim
node_dim += self.position_dim if self.use_bboxPos else 0
node_dim += self.position_dim if self.use_framePos else 0
self.gcn_fc = nn.Sequential(
nn.Linear(node_dim, self.out_features),
nn.ELU(inplace=True),
)
if self.use_bboxPos:
self.bbox_fc = nn.Sequential(
nn.Conv2d(4, 64, kernel_size=1),
nn.ReLU(),
nn.BatchNorm2d(64),
# nn.Dropout(0.5),
nn.Conv2d(64, 128, kernel_size=1),
nn.BatchNorm2d(128),
nn.ReLU(),
# nn.Dropout(0.5)
)
logger.info("Using bboxPos")
if self.use_framePos:
self.framePos = positionalencoding1d(128, self.num_frames)
self.framePos = self.framePos.unsqueeze(
1).expand(-1, self.num_box, -1).cuda()
logger.info("Using framePos")
if self.use_gcn:
logger.info('Init GCN')
self.gcn = GCN(
self.out_features,
self.out_features,
self.out_features,
0.5,
opt.get_list('gcn.mode'),
True,
opt.get_int('gcn.num_layers'),
ST_n_next=opt.get_int('gcn.ST_n_next')
)
else:
logger.warning('Use bbox only')
if self.use_boxFC:
self.boxFC = nn.Sequential(
nn.Linear(self.out_features, self.out_features),
nn.ELU(inplace=True),
)
if self.use_boxLSTM:
self.boxLSTM = nn.LSTM(self.out_features, int(self.out_features/2),
num_layers=1,
batch_first=True,
bidirectional=True,
dropout=0)
if self.use_c3d:
logger.warning('Use c3d')
self.c3d_fc = nn.Sequential(
nn.Conv1d(self.c3d_channels,
self.out_features, 3, padding=1),
nn.ELU(inplace=True)
)
self.num_streams = sum([self.use_image, self.use_bbox, self.use_c3d])
self.merge = nn.Sequential(
nn.Linear(self.out_features * self.num_streams, self.out_features),
nn.ELU(inplace=True)
)
self.lstm_raw = RNNEncoder(
300, self.hidden_size, bidirectional=True, num_layers=1, rnn=nn.LSTM)
self.video_fc = nn.Sequential(
nn.Conv1d(self.video_channels, self.out_features, 3, padding=1),
nn.ELU(inplace=True)
)
self.attention = BidafAttn(None, method='dot')
if self.is_multiple_choice:
self.lstm_input_size = self.out_features * 5
else:
self.lstm_input_size = self.out_features * 3
self.lstm_mature = RNNEncoder(
self.lstm_input_size,
self.out_features,
bidirectional=True,
num_layers=1,
rnn=nn.LSTM
)
self.classifier = nn.Sequential(
nn.Linear(self.out_features * 2, self.num_classes)
)
def forward(
self,
question, question_length, question_chars,
a1, a1_length, a1_chars,
a2, a2_length, a2_chars,
a3, a3_length, a3_chars,
a4, a4_length, a4_chars,
a5, a5_length, a5_chars,
features, c3d_features, bbox_features, bbox
):
# import ipdb; ipdb.set_trace()
# B, T, N, _ = bbox_features.shape
# assert N == self.num_box
B = question.shape[0]
if self.use_bbox:
video_length = torch.tensor(
[self.num_frames * self.num_box] * B, dtype=torch.long)
else:
video_length = torch.tensor(
[self.num_frames] * B, dtype=torch.long)
question_embedding = self.embedding(question)
if self.use_char_embedding:
question_chars = self.char_embedding(question_chars)
question_embedding = self.mix_embedding(
question_chars, question_embedding)
if self.is_multiple_choice:
a1_embedding = self.embedding(a1)
a2_embedding = self.embedding(a2)
a3_embedding = self.embedding(a3)
a4_embedding = self.embedding(a4)
a5_embedding = self.embedding(a5)
if self.use_char_embedding:
a1_chars = self.char_embedding(a1_chars)
a2_chars = self.char_embedding(a2_chars)
a3_chars = self.char_embedding(a3_chars)
a4_chars = self.char_embedding(a4_chars)
a5_chars = self.char_embedding(a5_chars)
a1_embedding = self.mix_embedding(a1_chars, a1_embedding)
a2_embedding = self.mix_embedding(a2_chars, a2_embedding)
a3_embedding = self.mix_embedding(a3_chars, a3_embedding)
a4_embedding = self.mix_embedding(a4_chars, a4_embedding)
a5_embedding = self.mix_embedding(a5_chars, a5_embedding)
raw_out_question, _ = self.lstm_raw(
question_embedding, question_length)
if self.is_multiple_choice:
raw_out_a1, _ = self.lstm_raw(a1_embedding, a1_length)
raw_out_a2, _ = self.lstm_raw(a2_embedding, a2_length)
raw_out_a3, _ = self.lstm_raw(a3_embedding, a3_length)
raw_out_a4, _ = self.lstm_raw(a4_embedding, a4_length)
raw_out_a5, _ = self.lstm_raw(a5_embedding, a5_length)
video_embedding = self.video_fc(
features.transpose(1, 2)).transpose(1, 2)
if self.use_bbox:
video_embedding = video_embedding.unsqueeze(
2).expand(-1, -1, self.num_box, -1).reshape(B, -1, self.out_features)
streams = []
if self.use_image:
streams.append(video_embedding)
if self.use_c3d:
c3d_embedding = self.c3d_fc(
c3d_features.transpose(1, 2)
).transpose(1, 2)
if self.use_bbox:
c3d_embedding = c3d_embedding.unsqueeze(
2).expand(-1, -1, self.num_box, -1).reshape(B, -1, self.out_features)
streams.append(c3d_embedding)
if self.use_bbox:
"""bboxPos and framePos"""
if self.use_bboxPos:
bbox_pos = self.bbox_fc(bbox.permute(
0, 3, 1, 2)).permute(0, 2, 3, 1)
bbox_features = torch.cat(
[bbox_features, bbox_pos], dim=-1)
if self.use_framePos:
framePos = self.framePos.unsqueeze(0).expand(B, -1, -1, -1)
bbox_features = torch.cat(
[bbox_features, framePos], dim=-1)
bbox_features = self.gcn_fc(bbox_features)
bbox_features = bbox_features.view(B, -1, self.out_features)
if self.use_gcn:
bbox_features = self.gcn(bbox_features, video_length, bbox)
if self.use_boxFC:
bbox_features = self.boxFC(bbox_features)
if self.use_boxLSTM:
bbox_features, _ = self.boxLSTM(bbox_features)
streams.append(bbox_features)
assert len(streams) != 0
streams = torch.cat(streams, dim=-1)
video_embedding = self.merge(streams)
u_q, _ = self.attention(
video_embedding, video_length, raw_out_question, question_length)
if self.is_multiple_choice:
u_a1, _ = self.attention(
video_embedding, video_length, raw_out_a1, a1_length)
u_a2, _ = self.attention(
video_embedding, video_length, raw_out_a2, a2_length)
u_a3, _ = self.attention(
video_embedding, video_length, raw_out_a3, a3_length)
u_a4, _ = self.attention(
video_embedding, video_length, raw_out_a4, a4_length)
u_a5, _ = self.attention(
video_embedding, video_length, raw_out_a5, a5_length)
concat_a1 = torch.cat(
[video_embedding, u_a1, u_q, u_a1 * video_embedding, u_q * video_embedding], dim=-1
)
concat_a2 = torch.cat(
[video_embedding, u_a2, u_q, u_a2 * video_embedding, u_q * video_embedding], dim=-1
)
concat_a3 = torch.cat(
[video_embedding, u_a3, u_q, u_a3 * video_embedding, u_q * video_embedding], dim=-1
)
concat_a4 = torch.cat(
[video_embedding, u_a4, u_q, u_a4 * video_embedding, u_q * video_embedding], dim=-1
)
concat_a5 = torch.cat(
[video_embedding, u_a5, u_q, u_a5 * video_embedding, u_q * video_embedding], dim=-1
)
mature_out_a1, _ = self.lstm_mature(concat_a1, video_length)
mature_out_a2, _ = self.lstm_mature(concat_a2, video_length)
mature_out_a3, _ = self.lstm_mature(concat_a3, video_length)
mature_out_a4, _ = self.lstm_mature(concat_a4, video_length)
mature_out_a5, _ = self.lstm_mature(concat_a5, video_length)
matrue_maxout_a1 = self._pooling(mature_out_a1, keepdim=True)
matrue_maxout_a2 = self._pooling(mature_out_a2, keepdim=True)
matrue_maxout_a3 = self._pooling(mature_out_a3, keepdim=True)
matrue_maxout_a4 = self._pooling(mature_out_a4, keepdim=True)
matrue_maxout_a5 = self._pooling(mature_out_a5, keepdim=True)
mature_answers = torch.cat(
[matrue_maxout_a1, matrue_maxout_a2, matrue_maxout_a3,
matrue_maxout_a4, matrue_maxout_a5],
dim=1
)
out = self.classifier(mature_answers)
out = out.squeeze()
return out
else:
concat_q = torch.cat(
[video_embedding, u_q, video_embedding * u_q], dim=-1
)
mature_out_q, _ = self.lstm_mature(concat_q, video_length)
mature_maxout_q = self._pooling(mature_out_q, keepdim=False)
out = self.classifier(mature_maxout_q)
if self.task == 'count':
# out = out.round().clamp(1, 10).squeeze()
# out = torch.sigmoid(out)
out = out.squeeze()
return out
def _pooling(self, x: torch.Tensor, keepdim=False) -> torch.Tensor:
if self.pooling == 'max':
out, _ = x.max(1, keepdim=keepdim)
elif self.pooling == 'mean':
out = x.mean(1, keepdim=keepdim)
elif self.pooling == 'sum':
out = x.sum(1, keepdim=keepdim)
else:
raise Exception(f'No such pooling: {self.pooling}')
return out
|
SunDoge/L-GCN
|
utils/dictionary.py
|
import re
import pickle
from typing import Dict, List
import spacy
import numpy as np
import torch
class Dictionary:
def __init__(self, word2idx=None, idx2word=None):
if word2idx is None:
word2idx = {'<pad>': 0, '<unk>': 1, '<eos>': 2}
if idx2word is None:
idx2word = list(word2idx.keys())
self.word2idx = word2idx
self.idx2word = idx2word
self.nlp = spacy.load('en')
def _tokenize(self, sentence: str) -> List[str]:
sentence = sentence.lower()
sentence = sentence.replace('.', ' . ')
doc = self.nlp.tokenizer(sentence)
tokens = [token.text for token in doc if not token.is_space]
tokens.append('<eos>')
return tokens
def tokenize(self, sentence: str, add_word: bool = False, extra_dict: Dict[str, int] = None):
# print(self.word2idx)
# sentence = sentence.lower()
# sentence = sentence.replace(',', '').replace(
# '?', '').replace('\'s', ' \'s')
# words = sentence.split()
# words = self.nlp.tokenizer(sentence)
words = self._tokenize(sentence)
tokens = []
if add_word:
for w in words:
if extra_dict is None:
tokens.append(self.add_word(w))
else:
if w in extra_dict:
tokens.append(self.add_word(w))
else:
for w in words:
# print(words)
# w = w.lower()
if w in self.word2idx:
tokens.append(self.word2idx[w])
else:
tokens.append(self.word2idx['<unk>'])
return tokens
def dump_to_file(self, path):
with open(path, 'wb') as f:
pickle.dump([self.word2idx, self.idx2word], f)
print('dictionary dumped to %s' % path)
@classmethod
def load_from_file(cls, path):
print('loading dictionary from %s' % path)
with open(path, 'rb') as f:
word2idx, idx2word = pickle.load(f)
d = cls(word2idx, idx2word)
return d
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class CharDictionary(Dictionary):
def __init__(self, word2idx=None, idx2word=None, max_length=16):
if word2idx is None:
word2idx = {'<pad>': 0, '<oov>': 1}
if idx2word is None:
idx2word = list(word2idx.keys())
self.max_length = max_length
super().__init__(word2idx=word2idx, idx2word=idx2word)
def tokenize(self, sentence, add_word=False, extra_dict=None):
words = self._tokenize(sentence)
tokens = torch.zeros(len(words), self.max_length, dtype=torch.long)
if add_word:
for w in words:
if extra_dict is None:
# tokens.append(self.add_word(w))
for c in w[:self.max_length]:
self.add_word(c)
else:
if w in extra_dict:
# tokens.append(self.add_word(w))
for c in w[:self.max_length]:
self.add_word(c)
else:
for wi, w in enumerate(words):
# print(words)
for ci, c in enumerate(w):
if ci < self.max_length:
if c in self.word2idx:
tokens[wi, ci] = self.word2idx[c]
else:
tokens[wi, ci] = self.word2idx['<unk>']
return tokens
def clean_str(string, downcase=True):
"""
Currently we don't use it.
Tokenization/string cleaning for strings.
Taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`(_____)]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower() if downcase else string.strip()
|
SunDoge/L-GCN
|
scripts/extract_bboxes_with_maskrcnn.py
|
import argparse
import os
from collections import defaultdict
from typing import Dict, List
import ipdb
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from torch import nn
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from yacs.config import CfgNode
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.image_list import ImageList, to_image_list
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from utils.data import VideoDataset
from utils.io import dump_pickle, load_image, load_pickle
import numpy as np
# MSVD = True
# class Args(TypedArgs):
# def __init__(self):
# parser = argparse.ArgumentParser()
# self.frame_path = parser.add_argument(
# '-f', '--frame-path',
# )
# self.output = parser.add_argument(
# '-o', '--output'
# )
# self.batch_size = parser.add_argument(
# '-b', '--batch-size', type=int, default=16
# )
# self.num_workers = parser.add_argument(
# '-n', '--num-workers', type=int, default=4
# )
# self.parse_args_from(parser)
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--frame-path', help='path to frames')
parser.add_argument('-o', '--output', help='path to output pt file')
parser.add_argument('-b', '--batch-size', default=16)
parser.add_argument('-n', '--num-workers', type=int, default=4)
parser.add_argument('--msvd', action='store_true', help='for MSVD-QA')
parser.add_argument(
'-c', '--config', help='path to e2e_mask_rcnn_R_101_FPN_1x_caffe2.yaml')
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(
round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image):
size = self.get_size(image.size)
image = F.resize(image, size)
return image
class FrameDataset(Dataset):
def __init__(self, cfg: CfgNode, samples: List[str]):
self.cfg = cfg
self.transform = self.build_transform()
self.samples: List[str] = samples
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
sample = self.samples[index]
image = load_image(sample)
image = self.transform(image)
return image
def build_transform(self):
cfg = self.cfg
to_bgr = T.Lambda(lambda x: x[[2, 1, 0]] * 255)
normalize = T.Normalize(
cfg.INPUT.PIXEL_MEAN,
cfg.INPUT.PIXEL_STD
)
transform = T.Compose([
Resize(cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST),
T.ToTensor(),
to_bgr,
normalize
])
return transform
# def set_samples(self, samples):
# self.samples = samples
class GIFDataset(Dataset):
def __init__(self, cfg: CfgNode, args):
self.cfg = cfg
self.args = args
self.samples: List[str] = load_pickle(args.frame_path)
self.video_dict: Dict[str, List[str]] = defaultdict(list)
self.videos = []
for sample in tqdm(self.samples):
gif_name = sample.split('/')[-1]
self.videos.append(gif_name)
num_frames = len(os.listdir(sample))
# if MSVD:
if args.msvd:
selected_frames = np.linspace(
0, num_frames, 20 + 2)[1:20+1].astype(np.int) + 1
for n in selected_frames:
self.video_dict[gif_name].append(
# os.path.join(sample, f'{n}.jpg') # For TGIF-QA
os.path.join(sample, f'{n + 1:06}.jpg') # For MSVD-QA
)
else:
for n in range(num_frames):
self.video_dict[gif_name].append(
os.path.join(sample, f'{n}.jpg') # For TGIF-QA
# os.path.join(sample, f'{n + 1:06}.jpg') # For MSVD-QA
)
# self.frame_dataset = FrameDataset(cfg)
def __getitem__(self, index):
gif_name = self.videos[index]
# self.frame_dataset.set_samples(self.video_dict[gif_name])
dataset = FrameDataset(self.cfg, self.video_dict[gif_name])
loader = DataLoader(
dataset,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=self.args.num_workers,
pin_memory=True,
collate_fn=collate_fn
)
return loader, gif_name
def __len__(self):
return len(self.samples)
def collate_fn(batch: List[torch.Tensor]) -> ImageList:
return to_image_list(batch, size_divisible=cfg.DATALOADER.SIZE_DIVISIBILITY)
class Extractor:
def __init__(self, cfg: CfgNode, args):
self.args = args
self.cfg = cfg.clone()
self.model: nn.Module = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
# Load weight
save_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(
cfg, self.model, save_dir=save_dir
)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.cpu_device = torch.device('cpu')
# Keep all result
self.confidence_threshold = 0.
self.datasets = GIFDataset(cfg, args)
self.results: Dict[str, List] = defaultdict(list)
@torch.no_grad()
def compute_predictions(self, image_list: ImageList) -> List[BoxList]:
image_list = image_list.to(self.device)
predictions = self.model(image_list)
return predictions
def run_once(self):
for dataset, gif_name in self.datasets:
# ipdb.set_trace()
loader = DataLoader(
dataset,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=0,
pin_memory=True,
collate_fn=collate_fn
)
for image_list in loader:
predictions = self.compute_predictions(image_list)
self.save_predictions(gif_name, predictions)
ipdb.set_trace()
break
break
self.dump_result()
def run(self):
dataset_loader = DataLoader(
self.datasets,
batch_size=1,
collate_fn=lambda x: x[0]
)
for loader, gif_name in tqdm(dataset_loader):
# ipdb.set_trace()
for image_list in tqdm(loader):
predictions = self.compute_predictions(image_list)
self.save_predictions(gif_name, predictions)
self.dump_result()
def save_predictions(self, gif_name: str, predictions: List[BoxList]):
# preds = [
# {
# 'bbox': pred.bbox,
# 'scores': pred.scores,
# 'labels': pred.labels,
# }
# for pred in predictions
# ]
for pred in predictions:
pred: BoxList = pred.to(self.cpu_device).resize((1, 1))
self.results[gif_name].append(
{
'bbox': pred.bbox,
'scores': pred.get_field('scores'),
'labels': pred.get_field('labels'),
}
)
def dump_result(self):
torch.save(self.results, self.args.output)
def load_config(config_file='/home/huangdeng/Code/python/maskrcnn/maskrcnn-benchmark/configs/caffe2/e2e_mask_rcnn_R_101_FPN_1x_caffe2.yaml') -> CfgNode:
cfg.merge_from_file(config_file)
cfg.merge_from_list(["MODEL.MASK_ON", False])
return cfg
def main(args):
cfg = load_config(args.config)
extractor = Extractor(cfg, args)
# extractor.run_once()
extractor.run()
if __name__ == "__main__":
# args = Args()
args = parser.parse_args()
main(args)
|
flatsurf/ipyvue-async
|
setup.py
|
<reponame>flatsurf/ipyvue-async
# ******************************************************************************
# Copyright (c) 2021-2022 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ******************************************************************************
from __future__ import print_function
from setuptools import setup, find_packages
import os
from os.path import join as pjoin
from jupyter_packaging import (
create_cmdclass,
install_npm,
ensure_targets,
combine_commands,
)
here = os.path.dirname(os.path.abspath(__file__))
name = 'ipyvue-async'
LONG_DESCRIPTION = 'Asynchronous communication channels between Vue components in Jupyter and Python'
js_dir = pjoin(here, 'js')
# Representative files that should exist after a successful build
jstargets = [
pjoin(js_dir, 'dist', 'index.js'),
]
data_files_spec = [
('share/jupyter/nbextensions/ipyvue-async', 'ipyvue_async/nbextension', '*.*'),
('share/jupyter/labextensions/ipyvue-async', 'ipyvue_async/labextension', "**"),
("share/jupyter/labextensions/ipyvue-async", '.', "install.json"),
('etc/jupyter/nbconfig/notebook.d', '.', 'ipyvue-async.json'),
]
cmdclass = create_cmdclass('jsdeps', data_files_spec=data_files_spec)
cmdclass['jsdeps'] = combine_commands(
install_npm(js_dir, npm=['yarn'], build_cmd='build:prod'), ensure_targets(jstargets),
)
setup_args = dict(
name=name,
version="0.1.1",
description='Asynchronous communication channels between Vue components in Jupyter and Python',
long_description=LONG_DESCRIPTION,
include_package_data=True,
install_requires=[
'jupyter-ui-poll>=0.2.1,<0.3',
'ipyvue>=1.5.0',
],
packages=find_packages(),
zip_safe=False,
cmdclass=cmdclass,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/flatsurf/ipyvue-async',
keywords=[
'ipython',
'jupyter',
'widgets',
],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: IPython',
"License :: OSI Approved :: MIT License",
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
setup(**setup_args)
|
flatsurf/ipyvue-async
|
ipyvue_async/__init__.py
|
<reponame>flatsurf/ipyvue-async
#*******************************************************************************
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#*******************************************************************************
from .comm_widget import CommWidget
version_info = (0, 1, 1)
__version__ = "0.1.1"
def _jupyter_labextension_paths():
r"""
Called by JupyterLab to find out which JavaScript assets need to be copied.
"""
# The command `jupyter labextension build` creates a package in
# labextension/, see jupyterlab.outputDir in js/package.json
# These files are copied to extensions/ipyvue-async/ in
# JupyterLab when this package is pip-installed.
return [{
'src': 'labextension',
'dest': 'ipyvue-async',
}]
def _jupyter_nbextension_paths():
r"""
Called by Jupyter Notebook to find out which JavaScript assets need to be copied.
"""
# The command `yarn build:prod` creates JavaScript assets in nbextension/.
# These files need to be copied to the nbextensions/ipyvue-async/
# directory in Jupyter Notebook. The entrypoint in these files is
# extension.js.
return [{
'section': 'notebook',
'src': 'nbextension',
'dest': 'ipyvue-async',
'require': 'ipyvue-async/extension'
}]
|
flatsurf/ipyvue-async
|
ipyvue_async/force_load.py
|
#*******************************************************************************
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#*******************************************************************************
from traitlets import Unicode
from ipywidgets import DOMWidget
class ForceLoad(DOMWidget):
r"""
ipyvue-async includes this widget to force the `activate()` function in the
JavaScript part of ipyvue-async to run.
We need this to make sure that the `<comm/>` component gets
registered with Vue.js before any Vue code gets rendered by ipyvue.
"""
_model_name = Unicode('ForceLoadModel').tag(sync=True)
_model_module = Unicode('ipyvue-async').tag(sync=True)
_model_module_version = Unicode('^1.0.0').tag(sync=True)
force_load = ForceLoad()
|
flatsurf/ipyvue-async
|
ipyvue_async/comm_widget.py
|
#*******************************************************************************
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#*******************************************************************************
import asyncio
from ipywidgets.widgets.widget import widget_serialization
from ipywidgets import DOMWidget
from traitlets import Unicode, Any
from ipyvue_async.force_load import force_load
class CommWidget(DOMWidget):
r"""
A widget that has a direct Comm channel to each of its representations in the frontend.
Normally, widgets use the traitlet mechanism to talk between the
JavaScript frontend and the Python backend. While this is great for many
applications, this kind of communication is too slow for real time
streaming of measurement data in practice. Also Comms are easily saturated
when using the traitlets mechanism and it is hard to keep everything
responsive. Using a Comm directly, allows us to have better control over
these communication channels. However, actually directly using a Comm can
be a bit tedious, so this class provides a widget with a wrapper, namely a
`Client` for each frontend widget that is attached to this widget.
"""
__force = Any(force_load, read_only=True).tag(sync=True, **widget_serialization)
target = Unicode().tag(sync=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target = f"{self.model_id}-comm-widget"
# The currently registered clients, i.e., widget outputs in the frontend.
self._channels = set()
# A map of command names to callbacks.
self._commands = {
'register': self._register_client
}
# Start accepting connections from the frontend.
self._register_comm_target()
self._display_callbacks.register_callback(self._create_channel)
def _create_channel(self, *args, **kwargs):
from ipyvue_async.channel import Channel
self._channels.add(Channel(self.log))
async def call(self, target, endpoint, *args):
for channel in self._channels:
await channel.message("call", {
"target": target,
"endpoint": endpoint,
"args": args,
})
async def poll(self, coroutine):
future = asyncio.ensure_future(coroutine)
events = 1
delay = .001
import jupyter_ui_poll
async with jupyter_ui_poll.ui_events() as poll:
while not future.done():
await poll(events)
events = min(events + 1, 64)
await asyncio.sleep(delay)
# Wait for at most 250ms, the reaction time of most
# people, https://stackoverflow.com/a/44755058/812379.
delay = min(2*delay, .25)
return await future
async def query(self, target, endpoint, *args, return_when=asyncio.ALL_COMPLETED):
queries = [channel.query({
"target": target,
"endpoint": endpoint,
"args": args}) for channel in self._channels]
if not queries:
raise ValueError("Cannot query when there is nothing displayed in the frontend yet.")
results = [asyncio.get_running_loop().create_task(query) for query in queries]
done, pending = await asyncio.wait(results, return_when=return_when)
for awaitable in pending:
awaitable.cancel()
results = [result.result() for result in done]
if return_when == asyncio.FIRST_COMPLETED:
assert len(results) >= 1
return results[0]
else:
return results
def _receive(self, message):
r"""
Handle an incoming ``message``.
"""
self.log.debug(f'CommWidget received message: {message}')
try:
data = message.get("content", {}).get("data", {})
command = data.get("command", None)
if command not in self._commands:
raise NotImplementedError(f"Unsupported command {command}")
self._commands[command](data)
except Exception as e:
self.log.error(e)
def _register_client(self, data):
r"""
Called when the frontend sends a 'register' message.
Open a comm in the opposite direction to this specific widget and wrap
it in a client object.
Note that client are currently never deregistered. This is usually not
a big issue since all connections are essentially blocking and so
inactive clients do not consume bandwidth to the frontend.
"""
target = data["target"]
self.log.debug(f"Registering client for {target}")
from ipykernel.comm import Comm
comm = Comm(target, {})
for channel in self._channels:
if not channel.is_connected():
channel.connect(comm)
return
# This happens when a notebook is loaded with cells already
# executed, e.g., when refreshing in the browser or opening the
# same notebook twice.
# There is no hope to reconnect this frontend to an existing
# (orphaned) channel so we create a new one.
self._create_channel()
return self._register_client(data)
def _register_comm_target(self):
r"""
Register a name that the frontend can connect to with a Comm.
"""
def configure_comm(comm, open_msg):
r"""
Called when the initial message is received from the frontend.
"""
@comm.on_msg
def _recv(msg):
r"""
Called when any following message is received from the frontend.
"""
self._receive(msg)
# When doctesting, there is no kernel attached to the comm.
# Maybe there is a proper way to mock things, but here we just ignore
# this to make tests (in downstream projects) pass.
# See https://stackoverflow.com/a/22734497/812379
import sys
if '_pytest.doctest' in sys.modules:
return
if hasattr(sys.modules['__main__'], '_SpoofOut'):
return
if sys.modules['__main__'].__dict__.get('__file__', '').endswith('/pytest'):
return
self.comm.kernel.comm_manager.register_target(self.target, configure_comm)
|
flatsurf/ipyvue-async
|
ipyvue_async/channel.py
|
<reponame>flatsurf/ipyvue-async<filename>ipyvue_async/channel.py
#*******************************************************************************
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#*******************************************************************************
import asyncio
class Channel:
r"""
A channel from the server to a single frontend widget.
"""
def __init__(self, log):
self._comm = asyncio.get_running_loop().create_future()
self._log = log
self._queries = {}
self._commands = {
"callback": self._callback,
# TODO: Add heartbeat to destroy channels that don't react anymore.
}
def is_connected(self):
return self._comm.done()
def connect(self, comm):
if self.is_connected():
raise Exception("Cannot reconnect already connected channel.")
self._comm.set_result([comm])
@comm.on_msg
def _recv(msg):
self._receive(msg)
async def message(self, action, data):
r"""
Send ``data`` to the frontend.
Raises an except if the client has not sent an ACK for the previous
data.
"""
message = {
"action": action,
"data": data
}
self._log.debug(f"Sending message to client: {message}")
(await self._comm)[0].send(message)
def _receive(self, message):
r"""
Handla an incomming ``message``.
"""
self._log.debug(f"Backend received message: {message}")
try:
data = message.get("content", {}).get("data", {})
command = data.get("command", None)
if command not in self._commands:
raise NotImplementedError(f"Unsupported command {command}")
self._commands[command](data.get("data", {}))
except Exception as e:
self._log.error(e)
def _callback(self, data):
identifier = data["identifier"]
if identifier not in self._queries:
raise ValueError(f"No pending callback for {identifier}")
if "value" in data:
value = data["value"]
self._queries[identifier]._future.set_result(value)
elif "error" in data:
error = data["error"]
self._queries[identifier]._future.set_exception(Exception(error))
del self._queries[identifier]
async def query(self, data):
class Query:
def __init__(self):
import uuid
self._identifier = uuid.uuid1().hex
import asyncio
self._future = asyncio.get_running_loop().create_future()
query = Query()
self._queries[query._identifier] = query
try:
await self.message("query", {
"identifier": query._identifier,
"data": data
})
return await query._future
except asyncio.CancelledError:
await self.message("cancel", {
"identifier": query._identifier,
});
raise
|
summerysaturn/archive
|
Midi-to-VirtualPiano/Midi to VirtualPiano.py
|
import sys
keys = ["1", "!", "2", "\"", "3", "4", "$", "5", "%", "6", "^", "7", "8", "*", "9", "(", "0", "q", "Q", "w", "W", "e", "E", "r", "t", "T", "y", "Y", "u", "i", "I", "o", "O", "p", "P", "a", "s", "S", "d", "D", "f", "g", "G", "h", "H", "j", "J", "k", "l", "L", "z", "Z", "x", "c", "C", "v", "V", "b", "B", "n", "m"]
keysHex = ["20", "21", "22", "23", "24", "26", "27", "28", "29", "2A", "2B", "2C", "2E", "2F", "30", "31", "32", "34", "35", "36", "37", "38", "39", "3A", "3C", "3D", "3E", "3F", "40", "42", "43", "44", "45", "46", "47", "48", "4A", "4B", "4C", "4D", "4E", "50", "51", "52", "53", "54", "55", "56", "58", "59", "5A", "5B", "5C", "5E", "5F", "60", "61", "62", "63", "64", "66"]
def HexToPiano (n):
if n in keysHex:
return keys[keysHex.index(n)]
else:
return ""
def BytesToStr (*vals: int):
tempBytes = []
for val in vals:
tempBytes.append(data[val])
return "".join(map(chr, tempBytes))
def BytesToInt (*vals: int):
tempBytes = bytearray()
for val in vals:
tempBytes.append(data[val])
return int.from_bytes(tempBytes, byteorder='big')
def GetByte(val: int):
temp = format(data[val], '02X')
return temp
FirstByteDict = {'8':3,'9':3,'A':3,'B':3,'C':2,'D':2,'E':3}
def FirstByteSkip (val: str):
if val in FirstByteDict:
return FirstByteDict[val]
else:
return 0
SecondByteDict = {'00':5,'20':4,'2F':0,'51':6,'54':8,'58':7,'59':5}
def SecondByteSkip (val: str):
if val in SecondByteDict:
return SecondByteDict[val]
else:
return 1
#open file
checkedArg = False
while True:
#check to see args for filename, otherwise ask user
if len(sys.argv) == 1 or checkedArg:
filename = input("filename: ")
else:
filename = sys.argv[1]
checkedArg = True
# continue if filetype isn't .mid
if ".mid" not in filename:
continue
# continue if file doesn't exist
try:
input_file = open(filename,"rb")
except Exception:
continue
# create byte data from file
data = bytearray(input_file.read())
# check to see if midi file has a header, if not, continue
if BytesToStr(0, 1, 2, 3) != "MThd":
continue
break
# 0 = single track, no need to scan for
# 1 = multiple track
# 2 = multiple songs
trackFormat = BytesToInt(8,9)
if trackFormat == 2:
input("Midi file has multiple songs, output may be produced incorrectly. Press enter to continue.")
if (trackFormat == 0):
#single track
trackLength = BytesToInt(18,19,20,21)
print(trackLength)
if (trackFormat == 1):
input("todo, multi-track currently unsupported")
output = ""
i = 23
#timeSinceLastNote = 0
while True:
fn = GetByte(i)[0]
sn = GetByte(i)[1]
sb = GetByte(i+1)
#do switches, refer to evernote
if (fn == "9"):
print("found note")
key = HexToPiano(format(data[i+1], '02X'))
output += key
print(key)
#timeSinceLastNote = 0
#input()
if (fn == "F"):
if (sn == "0" or sn == "7"):
#print("skipped", str(2 + BytesToInt(i+1)))
i += 2 + BytesToInt(i+1)
else:
if (sb in ["01","02","03","04","05","06","07","7F"]):
#print("skipped", str(2 + BytesToInt(i+2)))
i += 2 + BytesToInt(i+2)
else:
#print("skipped", str(SecondByteSkip(sb)))
i += SecondByteSkip(sb)
else:
#print("skipped", str(FirstByteSkip(fn)))
i += FirstByteSkip(fn)
#print(fn, sn, sb)
if (fn == "F" and sn == "F" and sb == "2F"):
print("eof")
break
#timeSinceLastNote += BytesToInt(i+1)
i += 1
print(output)
|
summerysaturn/archive
|
BMP-Corruptor/image-corrupt.py
|
<gh_stars>0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ File Handling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import math, random
def GoTo(linenum):
global line
line = linenum
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
while True:
#open files
target = str(input("image file (example: 'target.bmp') >"))
if (target == ""):
target = "target"
input_file = open(target + ".bmp","rb")
target = str(input("output file (example: 'output.bmp') >"))
if (target == ""):
target = "output"
output_file = open(target + ".bmp","wb")
#create byte data
data = bytearray()
data.extend(input_file.read())
offset_hex = bytes([data[10]]) + bytes([data[11]]) + bytes([data[12]]) + bytes([data[13]])
offset = int.from_bytes(offset_hex, byteorder='little')
#offset = data[10] # start of file
#x = 15,14,13,12
#y = 19,18,17,16
img_x_hex = bytes([data[21]]) + bytes([data[20]]) + bytes([data[19]]) + bytes([data[18]])
img_y_hex = bytes([data[25]]) + bytes([data[24]]) + bytes([data[23]]) + bytes([data[22]])
img_x = int.from_bytes(img_x_hex, byteorder='big')
img_y = int.from_bytes(img_y_hex, byteorder='big')
print(str(img_x) + " by " + str(img_y))
#print(str(img_x * img_y) + " pixels")
print(convert_size(len(data)))
print(str(offset) + " offset")
print("predicted size " + convert_size(offset + (img_y + img_x*img_y) * 3))
proceed = input("proceed? (y/n) >")
if (proceed == "y") or (proceed == ""):
break
else:
input_file.close()
output_file.close()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Main Section ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
offset_y = 0
print("working...")
shift = 0
for xIndex in range (0,img_x):
for yIndex in range (0,img_y):
#b = data[offset + (y*3) + x + 0]
#g = data[offset + (y*3) + x + 1]
#r = data[offset + (y*3) + x + 2]
x = xIndex * 3
y = yIndex * 3
xy = (offset + (y + x*img_y))
if (random.randint(0,100000) > 99999):
shift = shift + random.randint(100,20000)
if (xy + 2 + shift*2 < len(data)):
data[xy + 0] = data[xy + 0 + shift * 2]
data[xy + 1] = data[xy + 1 + shift * 2]
data[xy + 2] = data[xy + 2 + shift * 2]
else:
data[xy + 0] = 0
data[xy + 1] = 0
data[xy + 2] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Output and Close ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
output_file.write(bytes(data))
input_file.close()
output_file.close()
input("done")
GoTo(1)
|
Jibanprakash/tensorflow
|
tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py
|
<reponame>Jibanprakash/tensorflow
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
import numpy as np
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import readers
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class TextLineDatasetTestBase(test.TestCase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
class TextLineDatasetSerializationTest(
TextLineDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, test_filenames, compression_type=None):
return core_readers.TextLineDataset(
test_filenames, compression_type=compression_type, buffer_size=10)
def testTextLineCore(self):
compression_types = [None, "GZIP", "ZLIB"]
num_files = 5
lines_per_file = 5
num_outputs = num_files * lines_per_file
for compression_type in compression_types:
test_filenames = self._createFiles(
num_files,
lines_per_file,
crlf=True,
compression_type=compression_type)
# pylint: disable=cell-var-from-loop
self.run_core_tests(
lambda: self._build_iterator_graph(test_filenames, compression_type),
lambda: self._build_iterator_graph(test_filenames), num_outputs)
# pylint: enable=cell-var-from-loop
class FixedLengthRecordReaderTestBase(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
class FixedLengthRecordDatasetSerializationTest(
FixedLengthRecordReaderTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, num_epochs, compression_type=None):
filenames = self._createFiles()
return core_readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes,
self._footer_bytes).repeat(num_epochs)
def testFixedLengthRecordCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
class TFRecordDatasetTestBase(test.TestCase):
def setUp(self):
super(TFRecordDatasetTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
self.filenames = array_ops.placeholder(dtypes.string, shape=[None])
self.num_epochs = array_ops.placeholder_with_default(
constant_op.constant(1, dtypes.int64), shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = core_readers.TFRecordDataset(
self.filenames, self.compression_type).repeat(self.num_epochs)
batch_dataset = repeat_dataset.batch(self.batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
self.init_op = iterator.make_initializer(repeat_dataset)
self.init_batch_op = iterator.make_initializer(batch_dataset)
self.get_next = iterator.get_next()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
class TFRecordDatasetSerializationTest(
TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type is "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type is "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return core_readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
lambda: self._build_iterator_graph(num_epochs * 2, batch_size),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0), None,
num_outputs * batch_size)
# pylint: enable=g-long-lambda
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
def _interleave(iterators, cycle_length):
pending_iterators = iterators
open_iterators = []
num_open = 0
for i in range(cycle_length):
if pending_iterators:
open_iterators.append(pending_iterators.pop(0))
num_open += 1
while num_open:
for i in range(min(cycle_length, len(open_iterators))):
if open_iterators[i] is None:
continue
try:
yield next(open_iterators[i])
except StopIteration:
if pending_iterators:
open_iterators[i] = pending_iterators.pop(0)
else:
open_iterators[i] = None
num_open -= 1
class ReadBatchFeaturesTest(test.TestCase):
def setUp(self):
super(ReadBatchFeaturesTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def _read_batch_features(self,
filenames,
num_epochs,
batch_size,
reader_num_threads=1,
parser_num_threads=1,
shuffle=False,
shuffle_seed=None,
drop_final_batch=False):
self.filenames = filenames
self.num_epochs = num_epochs
self.batch_size = batch_size
return readers.make_batched_features_dataset(
file_pattern=self.filenames,
batch_size=self.batch_size,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string)
},
reader=core_readers.TFRecordDataset,
num_epochs=self.num_epochs,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
drop_final_batch=drop_final_batch).make_one_shot_iterator(
).get_next()
def _record(self, f, r):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[f])),
"record":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[r])),
"keywords":
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=self._get_keywords(f, r)))
}))
return example.SerializeToString()
def _get_keywords(self, f, r):
num_keywords = 1 + (f + r) % 2
keywords = []
for index in range(num_keywords):
keywords.append(compat.as_bytes("keyword%d" % index))
return keywords
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def _run_actual_batch(self, outputs, sess):
file_op = outputs["file"]
keywords_indices_op = outputs["keywords"].indices
keywords_values_op = outputs["keywords"].values
keywords_dense_shape_op = outputs["keywords"].dense_shape
record_op = outputs["record"]
return sess.run([
file_op, keywords_indices_op, keywords_values_op,
keywords_dense_shape_op, record_op
])
def _next_actual_batch(self, sess):
return self._run_actual_batch(self.outputs, sess)
def _next_expected_batch(self,
file_indices,
batch_size,
num_epochs,
cycle_length=1):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i
def _next_record_interleaved(file_indices, cycle_length):
return _interleave([_next_record([i]) for i in file_indices],
cycle_length)
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
for _ in range(num_epochs):
if cycle_length == 1:
next_records = _next_record(file_indices)
else:
next_records = _next_record_interleaved(file_indices, cycle_length)
for record in next_records:
f = record[0]
r = record[1]
file_batch.append(f)
record_batch.append(r)
keywords = self._get_keywords(f, r)
keywords_batch_values.extend(keywords)
keywords_batch_indices.extend(
[[batch_index, i] for i in range(len(keywords))])
batch_index += 1
keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))
if len(file_batch) == batch_size:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[batch_size, keywords_batch_max_len], record_batch
]
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
if file_batch:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[len(file_batch), keywords_batch_max_len], record_batch
]
def _verify_records(self,
sess,
batch_size,
file_index=None,
num_epochs=1,
interleave_cycle_length=1):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(
file_indices, batch_size, num_epochs, interleave_cycle_length):
actual_batch = self._next_actual_batch(sess)
for i in range(len(expected_batch)):
self.assertAllEqual(expected_batch[i], actual_batch[i])
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 0.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 0, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 1.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[1],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 1, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from both files.
self.outputs = self._read_batch_features(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
def testReadWithEquivalentDataset(self):
features = {
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
}
dataset = (
core_readers.TFRecordDataset(self.test_filenames)
.map(lambda x: parsing_ops.parse_single_example(x, features))
.repeat(10).batch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for file_batch, _, _, _, record_batch in self._next_expected_batch(
range(self._num_files), 2, 10):
actual_batch = sess.run(next_element)
self.assertAllEqual(file_batch, actual_batch["file"])
self.assertAllEqual(record_batch, actual_batch["record"])
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testReadWithFusedShuffleRepeatDataset(self):
num_epochs = 5
total_records = num_epochs * self._num_records
for batch_size in [1, 2]:
# Test that shuffling with same seed produces the same result.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
outputs1 = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
outputs2 = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
for _ in range(total_records // batch_size):
batch1 = self._run_actual_batch(outputs1, sess)
batch2 = self._run_actual_batch(outputs2, sess)
for i in range(len(batch1)):
self.assertAllEqual(batch1[i], batch2[i])
# Test that shuffling with different seeds produces a different order.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
outputs1 = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
outputs2 = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=15)
all_equal = True
for _ in range(total_records // batch_size):
batch1 = self._run_actual_batch(outputs1, sess)
batch2 = self._run_actual_batch(outputs2, sess)
for i in range(len(batch1)):
all_equal = all_equal and np.array_equal(batch1[i], batch2[i])
self.assertFalse(all_equal)
def testParallelReadersAndParsers(self):
num_epochs = 5
for batch_size in [1, 2]:
for reader_num_threads in [2, 4]:
for parser_num_threads in [2, 4]:
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
self.outputs = self._read_batch_features(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads)
self._verify_records(
sess,
batch_size,
num_epochs=num_epochs,
interleave_cycle_length=reader_num_threads)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
def testDropFinalBatch(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
with ops.Graph().as_default():
# Basic test: read from file 0.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
drop_final_batch=True)
for _, tensor in self.outputs.items():
if isinstance(tensor, ops.Tensor): # Guard against SparseTensor.
self.assertEqual(tensor.shape[0], batch_size)
class MakeCsvDatasetTest(test.TestCase):
COLUMN_TYPES = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string
]
COLUMNS = ["col%d" % i for i in range(len(COLUMN_TYPES))]
DEFAULT_VALS = [[], [], [], [], ["NULL"]]
DEFAULTS = [
constant_op.constant([], dtype=dtypes.int32),
constant_op.constant([], dtype=dtypes.int64),
constant_op.constant([], dtype=dtypes.float32),
constant_op.constant([], dtype=dtypes.float64),
constant_op.constant(["NULL"], dtype=dtypes.string)
]
LABEL = COLUMNS[0]
def setUp(self):
super(MakeCsvDatasetTest, self).setUp()
self._num_files = 2
self._num_records = 11
self._test_filenames = self._create_files()
def _csv_values(self, fileno, recordno):
return [
fileno,
recordno,
fileno * recordno * 0.5,
fileno * recordno + 0.5,
"record %d" % recordno if recordno % 2 == 1 else "",
]
def _write_file(self, filename, rows):
for i in range(len(rows)):
if isinstance(rows[i], list):
rows[i] = ",".join(str(v) if v is not None else "" for v in rows[i])
fn = os.path.join(self.get_temp_dir(), filename)
f = open(fn, "w")
f.write("\n".join(rows))
f.close()
return fn
def _create_file(self, fileno, header=True):
rows = []
if header:
rows.append(self.COLUMNS)
for recno in range(self._num_records):
rows.append(self._csv_values(fileno, recno))
return self._write_file("csv_file%d.csv" % fileno, rows)
def _create_files(self):
filenames = []
for i in range(self._num_files):
filenames.append(self._create_file(i))
return filenames
def _make_csv_dataset(
self,
filenames,
defaults,
column_names=COLUMNS,
label_name=LABEL,
select_cols=None,
batch_size=1,
num_epochs=1,
shuffle=False,
shuffle_seed=None,
header=True,
na_value="",
):
return readers.make_csv_dataset(
filenames,
batch_size=batch_size,
column_names=column_names,
column_defaults=defaults,
label_name=label_name,
num_epochs=num_epochs,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
header=header,
na_value=na_value,
select_columns=select_cols,
)
def _next_actual_batch(self, file_indices, batch_size, num_epochs, defaults):
features = {col: list() for col in self.COLUMNS}
for _ in range(num_epochs):
for i in file_indices:
for j in range(self._num_records):
values = self._csv_values(i, j)
for n, v in enumerate(values):
if v == "": # pylint: disable=g-explicit-bool-comparison
values[n] = defaults[n][0]
values[-1] = values[-1].encode("utf-8")
# Regroup lists by column instead of row
for n, col in enumerate(self.COLUMNS):
features[col].append(values[n])
if len(list(features.values())[0]) == batch_size:
yield features
features = {col: list() for col in self.COLUMNS}
def _run_actual_batch(self, outputs, sess):
features, labels = sess.run(outputs)
batch = [features[k] for k in self.COLUMNS if k != self.LABEL]
batch.append(labels)
return batch
def _verify_records(
self,
sess,
dataset,
file_indices,
defaults=tuple(DEFAULT_VALS),
label_name=LABEL,
batch_size=1,
num_epochs=1,
):
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
for expected_features in self._next_actual_batch(file_indices, batch_size,
num_epochs, defaults):
actual_features = sess.run(get_next)
if label_name is not None:
expected_labels = expected_features.pop(label_name)
# Compare labels
self.assertAllEqual(expected_labels, actual_features[1])
actual_features = actual_features[0] # Extract features dict from tuple
for k in expected_features.keys():
# Compare features
self.assertAllEqual(expected_features[k], actual_features[k])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMakeCSVDataset(self):
defaults = self.DEFAULTS
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 0.
dataset = self._make_csv_dataset(self._test_filenames[0], defaults)
self._verify_records(sess, dataset, [0])
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 1.
dataset = self._make_csv_dataset(self._test_filenames[1], defaults)
self._verify_records(sess, dataset, [1])
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Read from both files.
dataset = self._make_csv_dataset(self._test_filenames, defaults)
self._verify_records(sess, dataset, range(self._num_files))
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Read from both files. Exercise the `batch` and `num_epochs` parameters
# of make_csv_dataset and make sure they work.
dataset = self._make_csv_dataset(
self._test_filenames, defaults, batch_size=2, num_epochs=10)
self._verify_records(
sess, dataset, range(self._num_files), batch_size=2, num_epochs=10)
def testMakeCSVDataset_withBadColumns(self):
"""Tests that exception is raised when input is malformed.
"""
dupe_columns = self.COLUMNS[:-1] + self.COLUMNS[:1]
defaults = self.DEFAULTS
# Duplicate column names
with self.assertRaises(ValueError):
self._make_csv_dataset(
self._test_filenames, defaults, column_names=dupe_columns)
# Label key not one of column names
with self.assertRaises(ValueError):
self._make_csv_dataset(
self._test_filenames, defaults, label_name="not_a_real_label")
def testMakeCSVDataset_withNoLabel(self):
"""Tests that CSV datasets can be created when no label is specified.
"""
defaults = self.DEFAULTS
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Read from both files. Make sure this works with no label key supplied.
dataset = self._make_csv_dataset(
self._test_filenames,
defaults,
batch_size=2,
num_epochs=10,
label_name=None)
self._verify_records(
sess,
dataset,
range(self._num_files),
batch_size=2,
num_epochs=10,
label_name=None)
def testMakeCSVDataset_withNoHeader(self):
"""Tests that datasets can be created from CSV files with no header line.
"""
defaults = self.DEFAULTS
file_without_header = self._create_file(
len(self._test_filenames), header=False)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
file_without_header,
defaults,
batch_size=2,
num_epochs=10,
header=False,
)
self._verify_records(
sess,
dataset,
[len(self._test_filenames)],
batch_size=2,
num_epochs=10,
)
def testMakeCSVDataset_withTypes(self):
"""Tests that defaults can be a dtype instead of a Tensor for required vals.
"""
defaults = [d for d in self.COLUMN_TYPES[:-1]]
defaults.append(constant_op.constant(["NULL"], dtype=dtypes.string))
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(self._test_filenames, defaults)
self._verify_records(sess, dataset, range(self._num_files))
def testMakeCSVDataset_withNoColNames(self):
"""Tests that datasets can be created when column names are not specified.
In that case, we should infer the column names from the header lines.
"""
defaults = self.DEFAULTS
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Read from both files. Exercise the `batch` and `num_epochs` parameters
# of make_csv_dataset and make sure they work.
dataset = self._make_csv_dataset(
self._test_filenames,
defaults,
column_names=None,
batch_size=2,
num_epochs=10)
self._verify_records(
sess, dataset, range(self._num_files), batch_size=2, num_epochs=10)
def testMakeCSVDataset_withTypeInferenceMismatch(self):
# Test that error is thrown when num fields doesn't match columns
with self.assertRaises(ValueError):
self._make_csv_dataset(
self._test_filenames,
column_names=self.COLUMNS + ["extra_name"],
defaults=None,
batch_size=2,
num_epochs=10)
def testMakeCSVDataset_withTypeInference(self):
"""Tests that datasets can be created when no defaults are specified.
In that case, we should infer the types from the first N records.
"""
# Test that it works with standard test files (with header, etc)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
self._test_filenames, defaults=None, batch_size=2, num_epochs=10)
self._verify_records(
sess,
dataset,
range(self._num_files),
batch_size=2,
num_epochs=10,
defaults=[[], [], [], [], [""]])
def testMakeCSVDataset_withTypeInferenceTricky(self):
# Test on a deliberately tricky file (type changes as we read more rows, and
# there are null values)
fn = os.path.join(self.get_temp_dir(), "file.csv")
expected_dtypes = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float32,
dtypes.string, dtypes.string
]
col_names = ["col%d" % i for i in range(len(expected_dtypes))]
rows = [[None, None, None, "NAN", "",
"a"], [1, 2**31 + 1, 2**64, 123, "NAN", ""],
['"123"', 2, 2**64, 123.4, "NAN", '"cd,efg"']]
expected = [[0, 0, 0, 0, "", "a"], [1, 2**31 + 1, 2**64, 123, "", ""],
[123, 2, 2**64, 123.4, "", "cd,efg"]]
for row in expected:
row[-1] = row[-1].encode("utf-8") # py3 expects byte strings
row[-2] = row[-2].encode("utf-8") # py3 expects byte strings
self._write_file("file.csv", [col_names] + rows)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=None,
column_names=None,
label_name=None,
na_value="NAN",
)
features = dataset.make_one_shot_iterator().get_next()
# Check that types match
for i in range(len(expected_dtypes)):
print(features["col%d" % i].dtype, expected_dtypes[i])
assert features["col%d" % i].dtype == expected_dtypes[i]
for i in range(len(rows)):
assert sess.run(features) == dict(zip(col_names, expected[i]))
def testMakeCSVDataset_withTypeInferenceAllTypes(self):
# Test that we make the correct inference for all types with fallthrough
fn = os.path.join(self.get_temp_dir(), "file.csv")
expected_dtypes = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.string, dtypes.string
]
col_names = ["col%d" % i for i in range(len(expected_dtypes))]
rows = [[1, 2**31 + 1, 1.0, 4e40, "abc", ""]]
expected = [[
1, 2**31 + 1, 1.0, 4e40, "abc".encode("utf-8"), "".encode("utf-8")
]]
self._write_file("file.csv", [col_names] + rows)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=None,
column_names=None,
label_name=None,
na_value="NAN",
)
features = dataset.make_one_shot_iterator().get_next()
# Check that types match
for i in range(len(expected_dtypes)):
self.assertAllEqual(features["col%d" % i].dtype, expected_dtypes[i])
for i in range(len(rows)):
self.assertAllEqual(
sess.run(features), dict(zip(col_names, expected[i])))
def testMakeCSVDataset_withSelectColsError(self):
data = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
col_names = ["col%d" % i for i in range(5)]
fn = self._write_file("file.csv", [col_names] + data)
with self.assertRaises(ValueError):
# Mismatch in number of defaults and number of columns selected,
# should raise an error
self._make_csv_dataset(
fn,
defaults=[[0]] * 5,
column_names=col_names,
label_name=None,
select_cols=[1, 3])
with self.assertRaises(ValueError):
# Invalid column name should raise an error
self._make_csv_dataset(
fn,
defaults=[[0]],
column_names=col_names,
label_name=None,
select_cols=["invalid_col_name"])
def testMakeCSVDataset_withSelectCols(self):
data = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
col_names = ["col%d" % i for i in range(5)]
fn = self._write_file("file.csv", [col_names] + data)
# If select_cols is specified, should only yield a subset of columns
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=[[0], [0]],
column_names=col_names,
label_name=None,
select_cols=[1, 3])
expected = [[1, 3], [6, 8]]
features = dataset.make_one_shot_iterator().get_next()
for i in range(len(data)):
self.assertAllEqual(
sess.run(features),
dict(zip([col_names[1], col_names[3]], expected[i])))
# Can still do default inference with select_cols
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=None,
column_names=col_names,
label_name=None,
select_cols=[1, 3])
expected = [[1, 3], [6, 8]]
features = dataset.make_one_shot_iterator().get_next()
for i in range(len(data)):
self.assertAllEqual(
sess.run(features),
dict(zip([col_names[1], col_names[3]], expected[i])))
# Can still do column name inference
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=None,
column_names=None,
label_name=None,
select_cols=[1, 3])
expected = [[1, 3], [6, 8]]
features = dataset.make_one_shot_iterator().get_next()
for i in range(len(data)):
self.assertAllEqual(
sess.run(features),
dict(zip([col_names[1], col_names[3]], expected[i])))
# Can specify column names instead of indices
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=None,
column_names=None,
label_name=None,
select_cols=[col_names[1], col_names[3]])
expected = [[1, 3], [6, 8]]
features = dataset.make_one_shot_iterator().get_next()
for i in range(len(data)):
self.assertAllEqual(
sess.run(features),
dict(zip([col_names[1], col_names[3]], expected[i])))
def testMakeCSVDataset_withShuffle(self):
total_records = self._num_files * self._num_records
defaults = self.DEFAULTS
for batch_size in [1, 2]:
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Test that shuffling with the same seed produces the same result
dataset1 = self._make_csv_dataset(
self._test_filenames,
defaults,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
dataset2 = self._make_csv_dataset(
self._test_filenames,
defaults,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
outputs1 = dataset1.make_one_shot_iterator().get_next()
outputs2 = dataset2.make_one_shot_iterator().get_next()
for _ in range(total_records // batch_size):
batch1 = self._run_actual_batch(outputs1, sess)
batch2 = self._run_actual_batch(outputs2, sess)
for i in range(len(batch1)):
self.assertAllEqual(batch1[i], batch2[i])
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Test that shuffling with a different seed produces different results
dataset1 = self._make_csv_dataset(
self._test_filenames,
defaults,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
dataset2 = self._make_csv_dataset(
self._test_filenames,
defaults,
batch_size=batch_size,
shuffle=True,
shuffle_seed=6)
outputs1 = dataset1.make_one_shot_iterator().get_next()
outputs2 = dataset2.make_one_shot_iterator().get_next()
all_equal = False
for _ in range(total_records // batch_size):
batch1 = self._run_actual_batch(outputs1, sess)
batch2 = self._run_actual_batch(outputs2, sess)
for i in range(len(batch1)):
all_equal = all_equal and np.array_equal(batch1[i], batch2[i])
self.assertFalse(all_equal)
class MakeTFRecordDatasetTest(TFRecordDatasetTestBase):
def _next_expected_batch(self,
file_indices,
batch_size,
num_epochs,
cycle_length,
drop_final_batch,
use_parser_fn):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i
def _next_record_interleaved(file_indices, cycle_length):
return _interleave([_next_record([i]) for i in file_indices],
cycle_length)
record_batch = []
batch_index = 0
for _ in range(num_epochs):
if cycle_length == 1:
next_records = _next_record(file_indices)
else:
next_records = _next_record_interleaved(file_indices, cycle_length)
for f, r in next_records:
record = self._record(f, r)
if use_parser_fn:
record = record[1:]
record_batch.append(record)
batch_index += 1
if len(record_batch) == batch_size:
yield record_batch
record_batch = []
batch_index = 0
if record_batch and not drop_final_batch:
yield record_batch
def _verify_records(self,
sess,
outputs,
batch_size,
file_index,
num_epochs,
interleave_cycle_length,
drop_final_batch,
use_parser_fn):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(
file_indices, batch_size, num_epochs, interleave_cycle_length,
drop_final_batch, use_parser_fn):
actual_batch = sess.run(outputs)
self.assertAllEqual(expected_batch, actual_batch)
def _read_test(self, batch_size, num_epochs, file_index=None,
num_parallel_reads=1, drop_final_batch=False, parser_fn=False):
if file_index is None:
file_pattern = self.test_filenames
else:
file_pattern = self.test_filenames[file_index]
if parser_fn:
fn = lambda x: string_ops.substr(x, 1, 999)
else:
fn = None
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
outputs = readers.make_tf_record_dataset(
file_pattern=file_pattern,
num_epochs=num_epochs,
batch_size=batch_size,
parser_fn=fn,
num_parallel_reads=num_parallel_reads,
drop_final_batch=drop_final_batch,
shuffle=False).make_one_shot_iterator().get_next()
self._verify_records(
sess, outputs, batch_size, file_index, num_epochs=num_epochs,
interleave_cycle_length=num_parallel_reads,
drop_final_batch=drop_final_batch, use_parser_fn=parser_fn)
with self.assertRaises(errors.OutOfRangeError):
sess.run(outputs)
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 3]:
# Basic test: read from file 0.
self._read_test(batch_size, num_epochs, 0)
# Basic test: read from file 1.
self._read_test(batch_size, num_epochs, 1)
# Basic test: read from both files.
self._read_test(batch_size, num_epochs)
# Basic test: read from both files, with parallel reads.
self._read_test(batch_size, num_epochs, num_parallel_reads=8)
def testDropFinalBatch(self):
for batch_size in [1, 2, 10]:
for num_epochs in [1, 3]:
# Read from file 0.
self._read_test(batch_size, num_epochs, 0, drop_final_batch=True)
# Read from both files.
self._read_test(batch_size, num_epochs, drop_final_batch=True)
# Read from both files, with parallel reads.
self._read_test(batch_size, num_epochs, num_parallel_reads=8,
drop_final_batch=True)
def testParserFn(self):
for batch_size in [1, 2]:
for num_epochs in [1, 3]:
for drop_final_batch in [False, True]:
self._read_test(batch_size, num_epochs, parser_fn=True,
drop_final_batch=drop_final_batch)
self._read_test(batch_size, num_epochs, num_parallel_reads=8,
parser_fn=True, drop_final_batch=drop_final_batch)
def _shuffle_test(self, batch_size, num_epochs, num_parallel_reads=1,
seed=None):
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = readers.make_tf_record_dataset(
file_pattern=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size,
num_parallel_reads=num_parallel_reads,
shuffle=True,
shuffle_seed=seed)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
sess.run(iterator.initializer)
first_batches = []
try:
while True:
first_batches.append(sess.run(next_element))
except errors.OutOfRangeError:
pass
sess.run(iterator.initializer)
second_batches = []
try:
while True:
second_batches.append(sess.run(next_element))
except errors.OutOfRangeError:
pass
self.assertEqual(len(first_batches), len(second_batches))
if seed is not None:
# if you set a seed, should get the same results
for i in range(len(first_batches)):
self.assertAllEqual(first_batches[i], second_batches[i])
expected = []
for f in range(self._num_files):
for r in range(self._num_records):
expected.extend([self._record(f, r)] * num_epochs)
for batches in (first_batches, second_batches):
actual = []
for b in batches:
actual.extend(b)
self.assertAllEqual(sorted(expected), sorted(actual))
def testShuffle(self):
for batch_size in [1, 2]:
for num_epochs in [1, 3]:
for num_parallel_reads in [1, 2]:
# Test that all expected elements are produced
self._shuffle_test(batch_size, num_epochs, num_parallel_reads)
# Test that elements are produced in a consistent order if
# you specify a seed.
self._shuffle_test(batch_size, num_epochs, num_parallel_reads,
seed=21345)
if __name__ == "__main__":
test.main()
|
Jibanprakash/tensorflow
|
tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss scaling optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
class LossScaleOptimizer(optimizer.Optimizer):
"""An optimizer that applies loss scaling in backprop.
This class is useful for mixed precision training on GPUs (or other potential
accelerators), which is an approach to improve compute throughput without loss
of model quality.
The commmon configuration of mixed precision models is the following:
* variables are kept in high precision (e.g. float32).
* computations are done in lower precision (e.g. float16). variables are
casted to lower precision before they're used.
* (in training), final gradients are casted back to variable precision and get
applied.
Because computations happen in lower precision, gradients in the backprop pass
might underflow in the smaller dynamic range, causing a model to converge at a
suboptimal level. This optimizer multiplies the loss by a factor before
backprop starts to prevent underflow. Before gradients are applied, they are
casted to higher precision and down-scaled by the same factor, so
mathematically the variable updates are no different from regular
same-precision training.
See [Nvidia's manual on mixed precision training](
https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)
for more details.
To use loss scale optimizer, one only needs choose a loss scale strategy and
wrap a regular optimizer. See examples below.
```
loss = loss_fn()
opt = tf.AdamOptimizer(learning_rate=...)
# Choose a loss scale manager which decides how to pick the right loss scale
# throughout the training process.
loss_scale_manger = tf.contrib.mixed_precision.FixedLossScaleManager(5000)
# Wraps the original optimizer in a LossScaleOptimizer.
loss_scale_optimizer = LossScaleOptimizer(opt, loss_scale_manager)
# Call minimize() on the loss scale optimizer.
train_op = loss_scale_optimizer.minimize(loss)
```
If gradients clipping is applied, one can call
`optimizer.compute_gradients()` and `optimizer.apply_gradients()`
seperately.
Notice the following way of using LossScaleOptimizer is not intended. Always
use `loss_scale_optimizer.compute_gradients()` to compute gradients instead of
`tf.gradients()` if doing mixed precision training.
```
# The following is a wrong way to use LossScaleOptimizer along with
# tf.gradients().
# Always use loss_scale_optimizer.compute_gradients() to compute grads, or
# loss scale is not correctly applied.
grads = tf.gradients(loss, ...)
# Do some custom grad clipping.
grads = clip_grads(grads, ...)
loss_scale_optimizer.apply(grads_and_vars)
```
"""
def __init__(self, opt, loss_scale_manager):
"""Construct a loss scaling optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be an implementation of the @{tf.train.Optimizer}
interface.
loss_scale_manager: A LossScaleManager object.
"""
self._opt = opt
self._loss_scale_manager = loss_scale_manager
def compute_gradients(self,
loss,
var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients. See base class @{tf.train.Optimizer}."""
loss_scale = self._loss_scale_manager.get_loss_scale()
if context.executing_eagerly():
def scaled_loss():
loss_val = loss()
return loss_val * math_ops.cast(loss_scale, loss_val.dtype.base_dtype)
else:
if callable(loss):
loss_val = loss()
else:
loss_val = loss
scaled_loss = loss_val * math_ops.cast(loss_scale,
loss_val.dtype.base_dtype)
grads_and_vars = self._opt.compute_gradients(
scaled_loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
return self._down_scale(grads_and_vars, loss_scale)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients. See base class @{tf.train.Optimizer}."""
grads = [g for (g, _) in grads_and_vars]
is_finite_grad = []
for g in grads:
is_finite_grad.append(math_ops.reduce_all(gen_math_ops.is_finite(g)))
is_overall_finite = math_ops.reduce_all(is_finite_grad)
# Only update gradients when all grads are finite.
def true_apply_gradients_fn():
return self._opt.apply_gradients(grads_and_vars, global_step, name)
update_vars = control_flow_ops.cond(
is_overall_finite, true_apply_gradients_fn, gen_control_flow_ops.no_op)
# Potentially adjust gradient scale in case of finite gradients.
return control_flow_ops.group(
update_vars,
self._loss_scale_manager.update_loss_scale(is_overall_finite))
def _down_scale(self, grads_vars, loss_scale):
# Down scale grads by the loss_scale.
gv = []
inv_loss_scale = gen_math_ops.reciprocal(loss_scale)
for g, v in grads_vars:
if g is not None:
gv.append((g * math_ops.cast(inv_loss_scale, g.dtype.base_dtype), v))
else:
gv.append((g, v))
return gv
|
Jibanprakash/tensorflow
|
tensorflow/python/keras/losses.py
|
<reponame>Jibanprakash/tensorflow
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Built-in loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.metrics.mean_squared_error',
'keras.losses.mean_squared_error')
def mean_squared_error(y_true, y_pred):
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
@tf_export('keras.metrics.mean_absolute_error',
'keras.losses.mean_absolute_error')
def mean_absolute_error(y_true, y_pred):
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@tf_export('keras.metrics.mean_absolute_percentage_error',
'keras.losses.mean_absolute_percentage_error')
def mean_absolute_percentage_error(y_true, y_pred):
diff = math_ops.abs(
(y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None))
return 100. * K.mean(diff, axis=-1)
@tf_export('keras.metrics.mean_squared_logarithmic_error',
'keras.losses.mean_squared_logarithmic_error')
def mean_squared_logarithmic_error(y_true, y_pred):
first_log = math_ops.log(K.clip(y_pred, K.epsilon(), None) + 1.)
second_log = math_ops.log(K.clip(y_true, K.epsilon(), None) + 1.)
return K.mean(math_ops.square(first_log - second_log), axis=-1)
@tf_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')
def squared_hinge(y_true, y_pred):
return K.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@tf_export('keras.metrics.hinge', 'keras.losses.hinge')
def hinge(y_true, y_pred):
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@tf_export('keras.losses.categorical_hinge')
def categorical_hinge(y_true, y_pred):
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
return math_ops.maximum(0., neg - pos + 1.)
@tf_export('keras.losses.logcosh')
def logcosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Arguments:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
Returns:
Tensor with one scalar loss entry per sample.
"""
def _logcosh(x):
return x + nn.softplus(-2. * x) - math_ops.log(2.)
return K.mean(_logcosh(y_pred - y_true), axis=-1)
@tf_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
def categorical_crossentropy(y_true, y_pred):
return K.categorical_crossentropy(y_true, y_pred)
@tf_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(y_true, y_pred):
return K.sparse_categorical_crossentropy(y_true, y_pred)
@tf_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
def binary_crossentropy(y_true, y_pred):
return K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)
@tf_export('keras.metrics.kullback_leibler_divergence',
'keras.losses.kullback_leibler_divergence')
def kullback_leibler_divergence(y_true, y_pred):
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@tf_export('keras.metrics.poisson', 'keras.losses.poisson')
def poisson(y_true, y_pred):
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
@tf_export('keras.metrics.cosine_proximity', 'keras.losses.cosine_proximity')
def cosine_proximity(y_true, y_pred):
y_true = nn.l2_normalize(y_true, axis=-1)
y_pred = nn.l2_normalize(y_pred, axis=-1)
return -math_ops.reduce_sum(y_true * y_pred, axis=-1)
# Aliases.
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
cosine = cosine_proximity
@tf_export('keras.losses.serialize')
def serialize(loss):
return serialize_keras_object(loss)
@tf_export('keras.losses.deserialize')
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
@tf_export('keras.losses.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'loss function identifier:', identifier)
|
Jibanprakash/tensorflow
|
tensorflow/contrib/lite/python/lite_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.lite.python import lite
from tensorflow.contrib.lite.python import lite_constants
from tensorflow.contrib.lite.python.interpreter import Interpreter
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class FromSessionTest(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testQuantization(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testBatchSizeInvalid(self):
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder_1:0\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertTrue(graphviz_output)
def testInferenceInputType(self):
in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3], dtype=dtypes.uint8)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.inference_input_type = lite_constants.QUANTIZED_UINT8
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
def testDefaultRangesStats(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
class FromFlatbufferFile(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py',
str(error.exception))
def testPbtxt(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFile(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
class FromSavedModelTest(test_util.TensorFlowTestCase):
def _createSavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def testSimpleModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testNoneBatchSize(self):
"""Test a SavedModel, with None in input tensor's shape."""
saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testOrderInputArrays(self):
"""Test a SavedModel ordering of input arrays."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TocoConverter.from_saved_model(
saved_model_dir, input_arrays=['inputB', 'inputA'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Check case where input shape is given.
converter = lite.TocoConverter.from_saved_model(
saved_model_dir,
input_arrays=['inputA'],
input_shapes={'inputA': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check case where input shape is None.
converter = lite.TocoConverter.from_saved_model(
saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
if __name__ == '__main__':
test.main()
|
Jibanprakash/tensorflow
|
tensorflow/contrib/distribute/python/combinations.py
|
<filename>tensorflow/contrib/distribute/python/combinations.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Facilities for creating multiple test combinations.
Here is an example of testing various optimizers in Eager and Graph mode:
class AdditionExample(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(mode=["graph", "eager"],
optimizer=[AdamOptimizer(),
GradientDescentOptimizer()]))
def testOptimizer(self, optimizer):
... f(optimizer)...
This will run `testOptimizer` 4 times with the specified optimizers: 2 in
Eager and 2 in Graph mode.
The test will be provided with arguments that match the arguments of combine
by name. It is necessary to request all arguments, except for `mode`, which is
optional.
`combine()` function is available for creating a cross product of various
options. `times()` function exists for creating a product of N `combine()`-ed
results. See below.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import sys
import types
import unittest
from absl.testing import parameterized
import six
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import one_device_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.contrib.optimizer_v2 import adam as adam_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.training import adam
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import tf_inspect
GPU_TEST = "test_gpu" in sys.argv[0]
TPU_TEST = "test_tpu" in sys.argv[0]
def generate(combinations):
"""A decorator for generating test cases of a test method or a test class.
Args:
combinations: a list of dictionaries created using combine() and times().
Restrictions:
-- the "mode" argument can be either "eager" or "graph". It's "graph" by
default.
-- arguments of the test method must match by name to get the corresponding
value of the combination. Tests must accept all arguments except the
"mode", "required_tpu" and "required_gpus".
-- "distribution" argument is special and optional. It is meant for passing
instances of DistributionStrategy. Each instance is to be passed as via
`NamedDistribution`. If using "distribution", "required_gpus" and
"required_tpu" should be specified via the NamedDistribution instance,
rather than as separate arguments.
-- "required_tpu" argument is special and optional. If not `None`, then the
test will be skipped if TPUs aren't available.
-- "required_gpus" argument is special and optional. If not `None`, then the
test will be skipped if the specified number of GPUs aren't available.
Returns:
a decorator that will cause the test method or the test class to be run
under the specified conditions.
Raises:
ValueError - if "mode" argument wasn't either "eager" or "graph" or if other
arguments were not accepted by the test method.
"""
def decorator(test_method_or_class):
"""The decorator to be returned."""
# Generate good test names that can be used with --test_filter.
named_combinations = []
for combination in combinations:
# We use OrderedDicts in `combine()` and `times()` to ensure stable
# order of keys in each dictionary.
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format(
"".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
if isinstance(test_method_or_class, type):
class_object = test_method_or_class
class_object._test_method_ids = test_method_ids = {}
for name, test_method in six.iteritems(class_object.__dict__.copy()):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
isinstance(test_method, types.FunctionType)):
delattr(class_object, name)
methods = {}
parameterized._update_class_dict_for_param_test_case(
class_object.__name__, methods, test_method_ids, name,
parameterized._ParameterizedTestIter(
_augment_with_special_arguments(test_method),
named_combinations, parameterized._NAMED, name))
for method_name, method in six.iteritems(methods):
setattr(class_object, method_name, method)
return class_object
else:
test_method = _augment_with_special_arguments(test_method_or_class)
return parameterized.named_parameters(*named_combinations)(test_method)
return decorator
def _augment_with_special_arguments(test_method):
def decorated(self, **kwargs):
"""A wrapped test method that treats some arguments in a special way."""
mode = kwargs.pop("mode", "graph")
distribution = kwargs.pop("distribution", None)
required_tpu = kwargs.pop("required_tpu", False)
required_gpus = kwargs.pop("required_gpus", None)
if distribution:
assert required_gpus is None, (
"Do not use `required_gpus` and `distribution` together.")
assert required_tpu is False, (
"Do not use `required_tpu` and `distribution` together.")
kwargs["distribution"] = distribution.strategy
required_gpus = distribution.required_gpus
required_tpu = distribution.required_tpu
if required_tpu and not TPU_TEST:
self.skipTest("Test requires a TPU, but it's not available.")
if not required_tpu and TPU_TEST:
self.skipTest("Test that doesn't require a TPU.")
if not required_gpus:
if GPU_TEST:
self.skipTest("Test that doesn't require GPUs.")
elif context.num_gpus() < required_gpus:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(required_gpus, context.num_gpus()))
# At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
# that the user might have specified. `kwargs` still has `mode`, which
# the test is allowed to accept or ignore.
requested_arguments = tf_inspect.getfullargspec(test_method).args
missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
set(requested_arguments + ["mode"]))
if missing_arguments:
raise ValueError("The test is missing arguments {} .".format(
missing_arguments))
kwargs_to_pass = {}
for arg in requested_arguments:
if arg == "self":
kwargs_to_pass[arg] = self
else:
kwargs_to_pass[arg] = kwargs[arg]
if mode == "eager":
with ops.Graph().as_default(), context.eager_mode():
test_method(**kwargs_to_pass)
elif mode == "graph":
with ops.Graph().as_default(), context.graph_mode():
test_method(**kwargs_to_pass)
else:
raise ValueError(
"'mode' has to be either 'eager' or 'graph' and not {}".format(
mode))
return decorated
def combine(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = combine(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
return [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
def times(*combined):
"""Generate a product of N sets of combinations.
times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4])
Args:
*combined: N lists of dictionaries that specify combinations.
Returns:
a list of dictionaries for each combination.
Raises:
ValueError: if some of the inputs have overlapping keys.
"""
assert combined
if len(combined) == 1:
return combined[0]
first = combined[0]
rest_combined = times(*combined[1:])
combined_results = []
for a in first:
for b in rest_combined:
if set(a.keys()).intersection(set(b.keys())):
raise ValueError("Keys need to not overlap: {} vs {}".format(
a.keys(), b.keys()))
combined_results.append(OrderedDict(list(a.items()) + list(b.items())))
return combined_results
class NamedObject(object):
"""A class that translates an object into a good test name."""
def __init__(self, name, obj):
self._name = name
self._obj = obj
def __getattr__(self, name):
return getattr(self._obj, name)
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def __repr__(self):
return self._name
class NamedDistribution(object):
"""Translates DistributionStrategy and its data into a good name."""
def __init__(self, name, distribution, required_gpus=None,
required_tpu=False):
self._distribution = distribution
self._name = name
self._required_gpus = required_gpus
self._required_tpu = required_tpu
def __repr__(self):
return self._name
@property
def strategy(self):
return self._distribution
@property
def required_gpus(self):
return self._required_gpus
@property
def required_tpu(self):
return self._required_tpu
default_strategy = NamedDistribution(
"Default",
distribute_lib._default_distribution_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = NamedDistribution(
"OneDeviceCPU", one_device_strategy.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
tpu_strategy_single_iteration = NamedDistribution(
"TPUSingleIteration",
tpu_strategy.TPUStrategy(iterations_per_step=1),
required_tpu=True)
tpu_strategy = NamedDistribution(
"TPU", tpu_strategy.TPUStrategy(), required_tpu=True)
# Note that we disable prefetching for testing since prefetching makes
# the input non-deterministic.
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"MirroredCPUAndGPU",
mirrored_strategy.MirroredStrategy(
["/gpu:0", "/cpu:0"], prefetch_on_device=False),
required_gpus=1)
mirrored_strategy_with_two_gpus = NamedDistribution(
"Mirrored2GPUs",
mirrored_strategy.MirroredStrategy(
["/gpu:0", "/gpu:1"], prefetch_on_device=False),
required_gpus=2)
adam_optimizer_v1_fn = NamedObject(
"AdamV1", lambda: adam.AdamOptimizer(0.2, epsilon=1))
gradient_descent_optimizer_v1_fn = NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adam_optimizer_v2_fn = NamedObject(
"AdamV2", lambda: adam_v2.AdamOptimizer(0.2, epsilon=1))
gradient_descent_optimizer_v2_fn = NamedObject(
"GradientDescentV2",
lambda: gradient_descent_v2.GradientDescentOptimizer(0.2))
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combine(
distribution=[
one_device_strategy, mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus
],
optimizer_fn=[adam_optimizer_v1_fn, gradient_descent_optimizer_v1_fn])
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combine(
distribution=[
one_device_strategy, mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus
],
optimizer_fn=[adam_optimizer_v2_fn, gradient_descent_optimizer_v2_fn])
|
Jibanprakash/tensorflow
|
tensorflow/contrib/lite/python/convert_saved_model.py
|
<gh_stars>1-10
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to convert SavedModel to frozen GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.lite.python.convert import tensor_name
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import loader
def _log_tensor_details(tensor_info):
"""Log tensor details: name, shape, and type."""
for key in tensor_info:
val = tensor_info[key]
dtype = types_pb2.DataType.Name(val.dtype)
if val.tensor_shape.unknown_rank:
shape = "unknown_rank"
else:
dims = [str(dim.size) for dim in val.tensor_shape.dim]
shape = "({})".format(", ".join(dims))
logging.info("Tensor's key in saved_model's tensor_map: %s", key)
logging.info(" tensor name: %s, shape: %s, type: %s", val.name, shape,
dtype)
def _get_meta_graph_def(saved_model_dir, tag_set):
"""Validate saved_model and extract MetaGraphDef.
Args:
saved_model_dir: saved_model path to convert.
tag_set: Set of tag(s) of the MetaGraphDef to load.
Returns:
The meta_graph_def used for tflite conversion.
Raises:
ValueError: No valid MetaGraphDef for given tag_set.
"""
saved_model = reader.read_saved_model(saved_model_dir)
tag_sets = []
result_meta_graph_def = None
for meta_graph_def in saved_model.meta_graphs:
meta_graph_tag_set = set(meta_graph_def.meta_info_def.tags)
tag_sets.append(meta_graph_tag_set)
if meta_graph_tag_set == tag_set:
result_meta_graph_def = meta_graph_def
logging.info("The given saved_model contains the following tags: %s",
tag_sets)
if result_meta_graph_def is not None:
return result_meta_graph_def
else:
raise ValueError("No valid MetaGraphDef for this tag_set '{}'. Possible "
"values are '{}'. ".format(tag_set, tag_sets))
def _get_signature_def(meta_graph, signature_key):
"""Get the signature def from meta_graph with given signature_key.
Args:
meta_graph: meta_graph_def.
signature_key: signature_def in the meta_graph_def.
Returns:
The signature_def used for tflite conversion.
Raises:
ValueError: Given signature_key is not valid for this meta_graph.
"""
signature_def_map = meta_graph.signature_def
signature_def_keys = set(signature_def_map.keys())
logging.info(
"The given SavedModel MetaGraphDef contains SignatureDefs with the "
"following keys: %s", signature_def_keys)
if signature_key not in signature_def_keys:
raise ValueError("No '{}' in the SavedModel\'s SignatureDefs. Possible "
"values are '{}'.".format(signature_key,
",".join(signature_def_keys)))
signature_def = signature_def_utils.get_signature_def_by_key(
meta_graph, signature_key)
return signature_def
def _get_inputs_outputs(signature_def):
"""Get inputs and outputs from SignatureDef.
Args:
signature_def: SignatureDef in the meta_graph_def for conversion.
Returns:
The inputs and outputs in the graph for conversion.
"""
inputs_tensor_info = signature_def.inputs
outputs_tensor_info = signature_def.outputs
logging.info("input tensors info: ")
_log_tensor_details(inputs_tensor_info)
logging.info("output tensors info: ")
_log_tensor_details(outputs_tensor_info)
def gather_names(tensor_info):
return [tensor_info[key].name for key in tensor_info]
inputs = gather_names(inputs_tensor_info)
outputs = gather_names(outputs_tensor_info)
return inputs, outputs
def _get_tensors(graph, signature_def_tensor_names=None,
user_tensor_names=None):
"""Gets the tensors associated with the tensor names.
Either signature_def_tensor_names or user_tensor_names should be provided. If
the user provides tensors, the tensors associated with the user provided
tensor names are provided. Otherwise, the tensors associated with the names in
the SignatureDef are provided.
Args:
graph: GraphDef representing graph.
signature_def_tensor_names: Tensor names stored in either the inputs or
outputs of a SignatureDef. (default None)
user_tensor_names: Tensor names provided by the user. (default None)
Returns:
List of tensors.
Raises:
ValueError:
signature_def_tensors and user_tensor_names are undefined or empty.
user_tensor_names are not valid.
"""
tensors = []
if user_tensor_names:
# Sort the tensor names.
user_tensor_names = sorted(user_tensor_names)
tensors = get_tensors_from_tensor_names(graph, user_tensor_names)
elif signature_def_tensor_names:
tensors = [
graph.get_tensor_by_name(name)
for name in sorted(signature_def_tensor_names)
]
else:
# Throw ValueError if signature_def_tensors and user_tensor_names are both
# either undefined or empty.
raise ValueError(
"Specify either signature_def_tensor_names or user_tensor_names")
return tensors
def get_tensors_from_tensor_names(graph, tensor_names):
"""Gets the Tensors associated with the `tensor_names` in the provided graph.
Args:
graph: TensorFlow Graph.
tensor_names: List of strings that represent names of tensors in the graph.
Returns:
A list of Tensor objects in the same order the names are provided.
Raises:
ValueError:
tensor_names contains an invalid tensor name.
"""
# Get the list of all of the tensors.
tensor_name_to_tensor = {
tensor_name(tensor): tensor for op in graph.get_operations()
for tensor in op.values()
}
# Get the tensors associated with tensor_names.
tensors = []
invalid_tensors = []
for name in tensor_names:
tensor = tensor_name_to_tensor.get(name)
if tensor is None:
invalid_tensors.append(name)
else:
tensors.append(tensor)
# Throw ValueError if any user input names are not valid tensors.
if invalid_tensors:
raise ValueError("Invalid tensors '{}' were found.".format(
",".join(invalid_tensors)))
return tensors
def set_tensor_shapes(tensors, shapes):
"""Sets Tensor shape for each tensor if the shape is defined.
Args:
tensors: TensorFlow ops.Tensor.
shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
"""
if shapes:
for tensor in tensors:
shape = shapes.get(tensor.name)
if shape is not None:
tensor.set_shape(shapes[tensor.name])
def freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key):
"""Converts a SavedModel to a frozen graph.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input arrays
from SignatureDef when none are provided.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
Returns:
frozen_graph_def: Frozen GraphDef.
in_tensors: List of input tensors for the graph.
out_tensors: List of output tensors for the graph.
Raises:
ValueError:
SavedModel doesn't contain a MetaGraphDef identified by tag_set.
signature_key is not in the MetaGraphDef.
input_shapes does not match the length of input_arrays.
input_arrays or output_arrays are not valid.
"""
# Read SignatureDef.
meta_graph = _get_meta_graph_def(saved_model_dir, tag_set)
signature_def = _get_signature_def(meta_graph, signature_key)
inputs, outputs = _get_inputs_outputs(signature_def)
graph = ops.Graph()
with session.Session(graph=graph) as sess:
# TODO(nupurgarg): Throw ValueError if SavedModel has assets/ directory.
loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir)
# Gets input and output tensors.
# TODO(zhixianyan): Use TFLite supported Op list to filter outputs.
in_tensors = _get_tensors(graph, inputs, input_arrays)
out_tensors = _get_tensors(graph, outputs, output_arrays)
set_tensor_shapes(in_tensors, input_shapes)
output_names = [node.split(":")[0] for node in outputs]
frozen_graph_def = tf_graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), output_names)
return frozen_graph_def, in_tensors, out_tensors
|
Jibanprakash/tensorflow
|
tensorflow/contrib/eager/python/examples/l2hmc/l2hmc_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests l2hmc fit to 2D strongly correlated Gaussian executed eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy.random as npr
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
def get_default_hparams():
return tf.contrib.training.HParams(
x_dim=2,
n_samples=200,
n_steps=10,
eps=.1,
n_iters=5,
learning_rate=.001,
n_warmup_iters=1)
class L2hmcTest(tf.test.TestCase):
"""Unit tests for l2hmc in both eager and graph mode."""
def testComputeLoss(self):
"""Testing function l2hmc.compute_loss in both graph and eager mode."""
# Eager mode testing
hparams = get_default_hparams()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
loglikelihood_fn=l2hmc.get_scg_energy_fn(),
n_steps=hparams.n_steps,
eps=hparams.eps)
samples = tf.random_normal(shape=[hparams.n_samples, hparams.x_dim])
loss, x_out = l2hmc.compute_loss(samples, dynamics)
# Check shape and numerical stability
self.assertEqual(x_out.shape, samples.shape)
self.assertEqual(loss.shape, [])
self.assertAllClose(loss.numpy(), loss.numpy(), rtol=1e-5)
# Graph mode testing
with tf.Graph().as_default():
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
loglikelihood_fn=l2hmc.get_scg_energy_fn(),
n_steps=hparams.n_steps,
eps=hparams.eps)
x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])
loss, x_out = l2hmc.compute_loss(x, dynamics)
samples = npr.normal(size=[hparams.n_samples, hparams.x_dim])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss_np, x_out_np = sess.run([loss, x_out], feed_dict={x: samples})
# Check shape and numerical stability
self.assertEqual(x_out_np.shape, samples.shape)
self.assertEqual(loss_np.shape, ())
self.assertAllClose(loss_np, loss_np, rtol=1e-5)
class L2hmcBenchmark(tf.test.Benchmark):
"""Eager and graph benchmarks for l2hmc."""
def benchmarkEagerL2hmc(self):
"""Benchmark Eager performance."""
hparams = get_default_hparams()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
loglikelihood_fn=l2hmc.get_scg_energy_fn(),
n_steps=hparams.n_steps,
eps=hparams.eps)
# TODO(lxuechen): Add learning rate decay
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
# Warmup to reduce initialization effect when timing
l2hmc.warmup(dynamics, optimizer, n_iters=hparams.n_warmup_iters)
# Time
start_time = time.time()
l2hmc.fit(
dynamics,
optimizer,
n_samples=hparams.n_samples,
n_iters=hparams.n_iters)
wall_time = time.time() - start_time
examples_per_sec = hparams.n_samples / wall_time
self.report_benchmark(
name="eager_train_%s" % ("gpu" if tfe.num_gpus() > 0 else "cpu"),
iters=hparams.n_iters,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
def benchmarkGraphL2hmc(self):
"""Benchmark Graph performance."""
hparams = get_default_hparams()
with tf.Graph().as_default():
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
loglikelihood_fn=l2hmc.get_scg_energy_fn(),
n_steps=hparams.n_steps,
eps=hparams.eps)
x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])
loss, x_out = l2hmc.compute_loss(x, dynamics)
global_step = tf.Variable(0., name="global_step", trainable=False)
learning_rate = tf.train.exponential_decay(
hparams.learning_rate, global_step, 1000, 0.96, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Warmup to reduce initialization effect when timing
samples = npr.normal(size=[hparams.n_samples, hparams.x_dim])
for _ in range(hparams.n_warmup_iters):
samples, _, _, _ = sess.run(
[x_out, loss, train_op, learning_rate], feed_dict={x: samples})
# Time
start_time = time.time()
for _ in range(hparams.n_iters):
samples, _, _, _ = sess.run(
[x_out, loss, train_op, learning_rate], feed_dict={x: samples})
wall_time = time.time() - start_time
examples_per_sec = hparams.n_samples / wall_time
self.report_benchmark(
name="graph_train_%s" % ("gpu"
if tf.test.is_gpu_available() else "cpu"),
iters=hparams.n_iters,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
Jibanprakash/tensorflow
|
tensorflow/python/training/device_util_test.py
|
<reponame>Jibanprakash/tensorflow
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for device utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import device_util
class DeviceUtilTest(test.TestCase):
def testCurrentDeviceWithGlobalGraph(self):
with ops.device("/cpu:0"):
self.assertEqual(device_util.current(), "/device:CPU:0")
with ops.device("/job:worker"):
with ops.device("/cpu:0"):
self.assertEqual(device_util.current(), "/job:worker/device:CPU:0")
with ops.device("/cpu:0"):
with ops.device("/gpu:0"):
self.assertEqual(device_util.current(), "/device:GPU:0")
def testCurrentDeviceWithNonGlobalGraph(self):
with ops.Graph().as_default():
with ops.device("/cpu:0"):
self.assertEqual(device_util.current(), "/device:CPU:0")
def testCurrentDeviceWithEager(self):
with context.eager_mode():
with ops.device("/cpu:0"):
self.assertEqual(device_util.current(),
"/job:localhost/replica:0/task:0/device:CPU:0")
def testCanonicalizeWithoutDefaultDevice(self):
self.assertEqual(
device_util.canonicalize("/cpu:0"),
"/job:localhost/replica:0/task:0/device:CPU:0")
self.assertEqual(
device_util.canonicalize("/job:worker/cpu:0"),
"/job:worker/replica:0/task:0/device:CPU:0")
self.assertEqual(
device_util.canonicalize("/job:worker/task:1/cpu:0"),
"/job:worker/replica:0/task:1/device:CPU:0")
def testCanonicalizeWithDefaultDevice(self):
self.assertEqual(
device_util.canonicalize("/job:worker/task:1/cpu:0", default="/gpu:0"),
"/job:worker/replica:0/task:1/device:CPU:0")
self.assertEqual(
device_util.canonicalize("/job:worker/task:1", default="/gpu:0"),
"/job:worker/replica:0/task:1/device:GPU:0")
self.assertEqual(
device_util.canonicalize("/cpu:0", default="/job:worker"),
"/job:worker/replica:0/task:0/device:CPU:0")
def testResolveWithDeviceScope(self):
with ops.device("/gpu:0"):
self.assertEqual(
device_util.resolve("/job:worker/task:1/cpu:0"),
"/job:worker/replica:0/task:1/device:CPU:0")
self.assertEqual(
device_util.resolve("/job:worker/task:1"),
"/job:worker/replica:0/task:1/device:GPU:0")
with ops.device("/job:worker"):
self.assertEqual(
device_util.resolve("/cpu:0"),
"/job:worker/replica:0/task:0/device:CPU:0")
if __name__ == "__main__":
test.main()
|
Jibanprakash/tensorflow
|
tensorflow/contrib/distribute/python/cross_tower_utils.py
|
<gh_stars>10-100
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_tower_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
from tensorflow.contrib import nccl
from tensorflow.contrib.distribute.python import values as value_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
def aggregate_gradients_using_nccl(tower_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*tower_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, tower_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all towers and the variable is chosen from the first tower.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*tower_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one tower.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(tower_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
tower_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_tower_grads, packing where new_tower_grads is identical to
tower_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each tower, and packing contains
the data necessary to restore the tower_grads structure.
Look through the first tower for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in towers must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(tower_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(tower_grads[0])
packing = {}
if small_ranges:
new_tower_grads = []
for dev_idx, gv_list in enumerate(tower_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_tower_grads.append(new_gv_list)
return new_tower_grads, packing
else:
return tower_grads, None
def unpack_small_tensors(tower_grads, packing):
"""Undo the structure alterations to tower_grads done by pack_small_tensors.
Args:
tower_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to tower_grads.
Returns:
new_tower_grads: identical to tower_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return tower_grads
new_tower_grads = []
num_devices = len(tower_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(tower_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in xrange(0, num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_tower_grads.append(new_gv_list)
return new_tower_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
elif isinstance(value, value_lib.MapOutput):
return contains_indexed_slices(value.get())
else:
return False
|
artudi54/programs-integrator
|
programs_integrator/desktoputils/__init__.py
|
<reponame>artudi54/programs-integrator
from programs_integrator.desktoputils.StructureMaker import *
|
artudi54/programs-integrator
|
programs_integrator/config/__init__.py
|
from programs_integrator.config.Config import *
from programs_integrator.config.ApplicationDir import *
|
artudi54/programs-integrator
|
programs_integrator/ProgramsIntegrator.py
|
import sys
import signal
import pathlib
import pkg_resources
import dbus
import dbus.bus
import dbus.service
import dbus.mainloop.glib
from PySide2 import QtWidgets
from PySide2 import QtCore
from PySide2 import QtGui
from programs_integrator.desktoputils import StructureMaker
from programs_integrator.config import Config
from programs_integrator import user
DBUS_NAME = "com.ProgramsIntegrator"
DBUS_OBJECT_NAME = "/ProgramsIntegrator"
class ProgramsIntegratorWorker(QtCore.QObject):
def __init__(self):
super().__init__()
self.configuration = Config()
self.structure_maker = StructureMaker(self.configuration)
self.timer = QtCore.QTimer(self)
self.config_dialog = None
self.timer.timeout.connect(self.structure_maker.update)
self.timer.setInterval(1000 * self.configuration.update_delay)
self.configuration.write_config()
def start(self):
self.configuration.print()
self.structure_maker.update()
self.timer.start()
def show_window(self):
if self.config_dialog is not None:
self.config_dialog.raise_()
self.config_dialog.activateWindow()
return
self.config_dialog = user.ConfigDialog(self.configuration)
self.config_dialog.setAttribute(QtCore.Qt.WidgetAttribute.WA_DeleteOnClose)
self.config_dialog.destroyed.connect(self._handle_dialog_closed)
self.config_dialog.update_requested.connect(self.structure_maker.update)
self.config_dialog.config_changed.connect(self._handle_config_changed)
self.config_dialog.show()
@QtCore.Slot()
def _handle_dialog_closed(self):
self.config_dialog = None
@QtCore.Slot()
def _handle_config_changed(self):
self.timer.setInterval(1000 * self.configuration.update_delay)
self.configuration.write_config()
class ProgramsIntegrator(dbus.service.Object):
def __init__(self, session_bus):
dbus.service.Object.__init__(self, session_bus, DBUS_OBJECT_NAME)
self.worker = ProgramsIntegratorWorker()
@dbus.service.method(DBUS_NAME, in_signature='', out_signature='')
def show_window(self):
self.worker.show_window()
def start(self):
self.worker.start()
def handle_exception(exc):
print(str(pathlib.Path(sys.argv[0]).name) + ": " + str(exc), file=sys.stderr)
return 1
def run():
try:
signal.signal(signal.SIGINT, signal.SIG_DFL)
application = QtWidgets.QApplication(sys.argv)
application.setQuitOnLastWindowClosed(False)
icon_path = pkg_resources.resource_filename(__name__, "ProgramsIntegrator.png")
application.setWindowIcon(QtGui.QIcon(icon_path))
dbus_loop = dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
session_bus = dbus.SessionBus()
if session_bus.request_name(DBUS_NAME) != dbus.bus.REQUEST_NAME_REPLY_PRIMARY_OWNER:
raise RuntimeError("application already running")
programs_integrator = ProgramsIntegrator(session_bus)
programs_integrator.start()
return application.exec_()
except Exception as exc:
return handle_exception(exc)
def notify():
try:
session_bus = dbus.SessionBus()
if session_bus.request_name(DBUS_NAME) == dbus.bus.REQUEST_NAME_REPLY_PRIMARY_OWNER:
raise RuntimeError("programs-integrator is not running")
remote_object = session_bus.get_object(DBUS_NAME, DBUS_OBJECT_NAME)
programs_integrator = dbus.Interface(remote_object, DBUS_NAME)
programs_integrator.show_window()
except Exception as exc:
return handle_exception(exc)
|
artudi54/programs-integrator
|
programs_integrator/config/UserApplicationDir.py
|
<reponame>artudi54/programs-integrator<gh_stars>1-10
from programs_integrator.config.ApplicationDir import ApplicationDir
import pathlib
class UserApplicationDir(ApplicationDir):
def __init__(self, home_path: pathlib.Path):
super().__init__()
self.name = "UserApplications"
self.path = home_path / ".local" / "share" / "applications"
|
artudi54/programs-integrator
|
setup.py
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name="programs-integrator",
version="0.2",
author="<NAME>",
author_email="<EMAIL>",
description="Daemon program for dynamically generating 'Programs' directory",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/artudi54/programs-integrator",
packages=find_packages(),
package_data={"": ["*.ui", "*.png"]},
scripts=["bin/programs-integrator", "bin/programs-integrator-ctl"],
data_files=[("share/icons", ["resources/share/icons/programs-integrator.svg"]),
("share/applications", ["resources/share/applications/programs-integrator.desktop"]),
("share/systemd/user", ["resources/share/systemd/user/programs-integrator.service"])],
install_requires=open('requirements.txt').read().splitlines(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux"],
)
|
artudi54/programs-integrator
|
programs_integrator/user/__init__.py
|
<filename>programs_integrator/user/__init__.py
from programs_integrator.user.ConfigDialog import ConfigDialog
|
artudi54/programs-integrator
|
programs_integrator/desktoputils/DesktopEntry.py
|
<filename>programs_integrator/desktoputils/DesktopEntry.py
import pathlib
import configparser
class DesktopEntry:
def __init__(self, path, ):
self.path = path
self.filename = self.path.name
self.name = DesktopEntry._read_name(self.path)
def is_valid(self):
return self.name is not None
def make_filename(self, append_extension=True, use_original_filename=True):
if not self.is_valid():
return None
name = self.name
if use_original_filename:
name = self.path.stem
if append_extension:
name += self.path.suffix
return name
@staticmethod
def _read_name(path):
section_desktop_entry = "Desktop Entry"
key_name = "Name"
try:
parser = configparser.ConfigParser()
parser.read(path)
if section_desktop_entry in parser:
desktop_entry = parser[section_desktop_entry]
if key_name in desktop_entry:
return desktop_entry[key_name]
return None
except (configparser.Error, OSError):
return None
|
artudi54/programs-integrator
|
programs_integrator/__init__.py
|
from programs_integrator.ProgramsIntegrator import run, notify
|
artudi54/programs-integrator
|
programs_integrator/user/ConfigDialog.py
|
import pkg_resources
import sortedcontainers
import numpy
from PySide2 import QtWidgets
from PySide2 import QtCore
from programs_integrator.user.SelfUiLoader import SelfUiLoader
class ConfigDialog(QtWidgets.QDialog):
config_changed = QtCore.Signal()
update_requested = QtCore.Signal()
def __init__(self, configuration):
super().__init__()
ui_file_path = pkg_resources.resource_filename(__name__, "ConfigDialog.ui")
SelfUiLoader(self).load(str(ui_file_path))
self.configuration = configuration
self._original_excluded_text = "\n".join(self.configuration.excluded_desktop_entries)
self._original_title = self.windowTitle()
self._unsaved_title = '*' + self._original_title
self._fill_widgets()
self._connect_signals()
def _connect_signals(self):
self.ok_button.clicked.connect(self._accept_dialog)
self.apply_button.clicked.connect(self._apply_config)
self.cancel_button.clicked.connect(self.close)
self.update_button.clicked.connect(self.update_requested)
self.update_delay_input.textChanged.connect(self._update_window_name)
self.append_extension_button.toggled.connect(self._update_window_name)
self.use_original_filename_button.toggled.connect(self._update_window_name)
self.excluded_desktop_entries_input.textChanged.connect(self._update_window_name)
def _fill_widgets(self):
self.update_delay_input.setText(str(self.configuration.update_delay))
self.append_extension_button.setChecked(self.configuration.append_extension)
self.use_original_filename_button.setChecked(self.configuration.use_original_filename)
self.excluded_desktop_entries_input.setPlainText("\n".join(self.configuration.excluded_desktop_entries))
for i in range(len(self.configuration.application_dirs)):
name_item = QtWidgets.QTableWidgetItem(self.configuration.application_dirs[i].name)
path_item = QtWidgets.QTableWidgetItem(str(self.configuration.application_dirs[i].path))
self.application_directories_table.insertRow(i)
self.application_directories_table.setItem(i, 0, name_item)
self.application_directories_table.setItem(i, 1, path_item)
@QtCore.Slot()
def _update_window_name(self):
if (self.update_delay_input.text() != str(self.configuration.update_delay) or
self.append_extension_button.isChecked() != self.configuration.append_extension or
self.use_original_filename_button.isChecked() != self.configuration.use_original_filename or
self.excluded_desktop_entries_input.toPlainText() != self._original_excluded_text):
self.setWindowTitle(self._unsaved_title)
else:
self.setWindowTitle(self._original_title)
@QtCore.Slot()
def _apply_config(self):
try:
update_delay = int(self.update_delay_input.text())
if update_delay <= 0 or (update_delay * 1000) > numpy.iinfo(numpy.int32).max:
raise ValueError()
self.configuration.update_delay = update_delay
except ValueError:
QtWidgets.QMessageBox.warning(self, "Invalid input", "Entered update delay is invalid or too big")
return False
self.configuration.append_extension = self.append_extension_button.isChecked()
self.configuration.use_original_filename = self.use_original_filename_button.isChecked()
self.configuration.excluded_desktop_entries =\
sortedcontainers.SortedSet(filter(None, self.excluded_desktop_entries_input.toPlainText().splitlines()))
self.config_changed.emit()
self.setWindowTitle(self._original_title)
return True
@QtCore.Slot()
def _accept_dialog(self):
if self._apply_config():
self.accept()
|
artudi54/programs-integrator
|
programs_integrator/config/Config.py
|
import os
import sys
import contextlib
import pathlib
import configparser
import distutils.util
import numpy
import sortedcontainers
from programs_integrator.config.ApplicationDir import ApplicationDir
from programs_integrator.config.UserApplicationDir import UserApplicationDir
class Config:
_SECTION_CONFIG = "Config"
_KEY_APPEND_EXTENSION = "AppendExtension"
_KEY_USE_ORIGINAL_FILENAME = "UseOriginalFilename"
_KEY_UPDATE_DELAY = "UpdateDelay"
class User:
def __init__(self):
self.home_path = pathlib.Path.home()
self.autostart_path = self.home_path / ".config" / "autostart"
self.config_dir = self.home_path / ".config" / "programs-integrator"
self.config_file = self.config_dir / "config.ini"
self.excluded_file = self.config_dir / "excluded.txt"
def __init__(self):
self.user = Config.User()
self.application_dirs = Config._read_application_dirs(self.user.home_path)
self.append_extension = True
self.use_original_filename = True
self.update_delay = 10
self.excluded_desktop_entries = set()
if not self.user.config_dir.exists():
self.user.config_dir.mkdir(parents=True, exist_ok=True)
if not self.user.config_file.exists():
self.user.config_file.touch(exist_ok=True)
if not self.user.excluded_file.exists():
self.user.excluded_file.touch(exist_ok=True)
self.read_config()
@staticmethod
def _read_application_dirs(home_path):
application_dirs = []
xdg_dirs_str = os.environ["XDG_DATA_DIRS"]
xdg_dirs = xdg_dirs_str.split(":")
for xdg_dir in xdg_dirs:
application_dir = ApplicationDir(xdg_dir)
if application_dir.exists():
application_dirs.append(application_dir)
user_application_dir = UserApplicationDir(home_path)
if user_application_dir.exists():
application_dirs.append(user_application_dir)
return application_dirs
def read_config(self):
parser = configparser.ConfigParser()
parser.optionxform = str
parser.read(self.user.config_file)
if Config._SECTION_CONFIG in parser:
config_section = parser[Config._SECTION_CONFIG]
if Config._KEY_APPEND_EXTENSION in config_section:
with contextlib.suppress(ValueError):
self.append_extension = bool(distutils.util.strtobool(config_section[Config._KEY_APPEND_EXTENSION]))
if Config._KEY_USE_ORIGINAL_FILENAME in config_section:
with contextlib.suppress(ValueError):
self.use_original_filename = bool(distutils.util.strtobool(config_section[Config._KEY_USE_ORIGINAL_FILENAME]))
if Config._KEY_UPDATE_DELAY in config_section:
with contextlib.suppress(ValueError):
update_delay = int(config_section[Config._KEY_UPDATE_DELAY])
if update_delay <= 0 or (update_delay * 1000) > numpy.iinfo(numpy.int32).max:
raise ValueError()
self.update_delay = update_delay
self.excluded_desktop_entries =\
sortedcontainers.SortedSet(filter(None, open(self.user.excluded_file).read().splitlines()))
def write_config(self):
parser = configparser.ConfigParser()
parser.optionxform = str
parser[Config._SECTION_CONFIG] = {
Config._KEY_APPEND_EXTENSION: str(self.append_extension),
Config._KEY_USE_ORIGINAL_FILENAME: str(self.use_original_filename),
Config._KEY_UPDATE_DELAY: str(self.update_delay)
}
with self.user.config_file.open("w") as config_file:
parser.write(config_file)
with self.user.excluded_file.open("w") as excluded_file:
for entry in self.excluded_desktop_entries:
excluded_file.write(entry)
excluded_file.write("\n")
def update_application_dirs(self):
self.application_dirs = Config._read_application_dirs(self.user.home_path)
def print(self, file=sys.stdout):
print("Config:", file=file)
print("\t" + Config._KEY_APPEND_EXTENSION + "=" + str(self.append_extension), file=file)
print("\t" + Config._KEY_USE_ORIGINAL_FILENAME + "=" + str(self.use_original_filename), file=file)
print("\t" + Config._KEY_UPDATE_DELAY + "=" + str(self.update_delay), file=file)
print("\tApplicationDirs:")
for application_dir in self.application_dirs:
print("\t\t" + application_dir.name + ":", str(application_dir.path), file=file)
print("\tExcludedDesktopEntries:")
for excluded_entry in self.excluded_desktop_entries:
print("\t\t" + excluded_entry)
print(file=file)
|
artudi54/programs-integrator
|
programs_integrator/user/SelfUiLoader.py
|
from PySide2 import QtUiTools
class SelfUiLoader(QtUiTools.QUiLoader):
def __init__(self, widget):
QtUiTools.QUiLoader.__init__(self, widget)
self.widget = widget
def createWidget(self, class_name, parent=None, name=''):
if parent is None and self.widget:
return self.widget
else:
widget = QtUiTools.QUiLoader.createWidget(self, class_name, parent, name)
if self.widget:
setattr(self.widget, name, widget)
return widget
|
artudi54/programs-integrator
|
programs_integrator/desktoputils/StructureMaker.py
|
import os
import sys
from programs_integrator.desktoputils.DesktopEntry import DesktopEntry
class StructureMaker:
class Programs:
def __init__(self, configuration):
self.path = configuration.user.home_path / "Programs"
self.autostart_path = self.path / "Autostart"
self.application_dirs_path = self.path / "ApplicationDirs"
def __init__(self, configuration):
self.configuration = configuration
self.programs = StructureMaker.Programs(self.configuration)
self.create_directories()
def create_directories(self):
if not self.programs.path.exists():
try:
self.programs.path.mkdir(exist_ok=True)
except OSError as exc:
print("Warning: " + str(exc), file=sys.stderr)
if not self.programs.autostart_path.exists():
try:
os.symlink(self.configuration.user.autostart_path, self.programs.autostart_path)
except OSError as exc:
print("Warning: " + str(exc), file=sys.stderr)
elif self.programs.autostart_path.resolve() != self.configuration.user.autostart_path:
try:
os.remove(self.programs.autostart_path)
os.symlink(self.configuration.user.autostart_path, self.programs.autostart_path)
except OSError as exc:
print("Warning: " + str(exc), file=sys.stderr)
if not self.programs.application_dirs_path.exists():
try:
self.programs.application_dirs_path.mkdir(exist_ok=True)
except OSError as exc:
print("Warning: " + str(exc), file=sys.stderr)
def update(self):
self.create_directories()
self.configuration.update_application_dirs()
self.update_application_dirs()
self.update_desktop_entries()
def update_application_dirs(self):
entries_dict = StructureMaker._directory_symlinks_dict(self.programs.application_dirs_path)
if entries_dict is None:
print("Error: updating application directories failed")
return
for application_dir in self.configuration.application_dirs:
if application_dir.name in entries_dict:
symlink_path = entries_dict[application_dir.name]
if symlink_path != application_dir.path:
try:
os.remove(self.programs.application_dirs_path / application_dir.name)
os.symlink(application_dir.path, self.programs.application_dirs_path / application_dir.name)
except OSError as exc:
print("Waring: " + str(exc), file=sys.stderr)
entries_dict.pop(application_dir.name)
else:
try:
os.symlink(application_dir.path, self.programs.application_dirs_path / application_dir.name)
except OSError as exc:
print("Waring: " + str(exc), file=sys.stderr)
for entry in entries_dict:
try:
os.remove(self.programs.application_dirs_path / entry)
except OSError as exc:
print("Waring: " + str(exc), file=sys.stderr)
def update_desktop_entries(self):
entries_dict = StructureMaker._directory_symlinks_dict(self.programs.path)
if entries_dict is None:
print("Error: updating desktop entries failed", file=sys.stderr)
return
desktop_entries = self._list_desktop_entries()
for desktop_entry in desktop_entries:
if desktop_entry.filename in self.configuration.excluded_desktop_entries:
continue
filename = desktop_entry.make_filename(self.configuration.append_extension,
self.configuration.use_original_filename)
if filename in entries_dict:
symlink_path = entries_dict[filename]
if symlink_path != desktop_entry.path:
try:
os.remove(self.programs.path / filename)
os.symlink(desktop_entry.path, self.programs.path / filename)
except OSError as exc:
print("Warning: " + str(exc), file=sys.stderr)
entries_dict.pop(filename)
else:
try:
os.symlink(desktop_entry.path, self.programs.path / filename)
except OSError as exc:
print("Warning: " + str(exc), file=sys.stderr)
for (entry, path) in entries_dict.items():
if not path.is_dir():
try:
os.remove(self.programs.path / entry)
except OSError as exc:
print("Warning: " + str(exc), file=sys.stderr)
@staticmethod
def _directory_symlinks_dict(directory):
try:
entries = os.listdir(directory)
entries = [entry for entry in entries if (directory / entry).is_symlink()]
return dict((entry, (directory / entry).resolve()) for entry in entries)
except OSError as exc:
print("Warning: " + str(exc), file=sys.stderr)
return None
def _list_desktop_entries(self):
desktop_entries = []
for application_dir in self.configuration.application_dirs:
try:
files = os.listdir(application_dir.path)
except OSError as exc:
print("Waring: " + str(exc), file=sys.stderr)
continue
for file in files:
file_path = application_dir.path / file
if file_path in self.configuration.excluded_desktop_entries:
continue
desktop_entry = DesktopEntry(application_dir.path / file)
if desktop_entry.is_valid():
desktop_entries.append(desktop_entry)
return desktop_entries
|
artudi54/programs-integrator
|
programs_integrator/config/ApplicationDir.py
|
<filename>programs_integrator/config/ApplicationDir.py
import pathlib
class ApplicationDir:
_MAPPING = {
"/usr/share": "SystemApplications",
"/usr/local/share": "LocalSystemApplications",
"/var/lib/snapd/desktop": "SnapApplications"
}
def __init__(self, path: pathlib.Path = None):
if path is None:
self.name = None
self.path = None
return
path = str(path)
if path.endswith("/"):
path = path[:-1]
self.name = ApplicationDir._make_name(path)
self.path = ApplicationDir._make_path(path)
def exists(self):
return self.path is not None and self.path.exists()
@staticmethod
def _make_name(path):
if path in ApplicationDir._MAPPING:
return ApplicationDir._MAPPING[path]
parts = pathlib.Path(path).parts
name = parts[-1]
if parts[-1] == 'share':
name = parts[-2]
name = name[0].upper() + name[1:]
return name + "Applications"
@staticmethod
def _make_path(path):
return pathlib.Path(path) / "applications"
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo2/Repetições em Python (while)/python_056.py
|
'''
Crie um programa que leia números inteiros pelo teclado. O programa só vai parar quando o usuário digitar o valor 999, que é a condição de parada. No final, mostre quantos números foram digitados e qual foi a soma entre elas (desconsiderando o flag).
'''
# Iniciar as variaveis
n = s = 0
contador = 0
# Laço while para pedir somar e ver quantos numeros tem
while True:
n = int(input('Digite um numero: '))
if n == 999:
# Quebra do laço while, quando se digita 999
break
s += n
contador += 1
# Usando f string para formatar o print
print(f'A soma vale {s}.')
print(f'Foram digitados {contador}', end='')
print(' números.' if contador > 1 else ' número.')
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Test/aula20a.py
|
def soma(a, b):
print(f'A = {a} e B = {b}.')
s = a + b
print(f'A soma de A + B = {s}.')
# Programa Principal
soma(a=4, b=5)
soma(b=8, a=9)
soma(2, 1)
# Empacotamento
def contador(* num):
for valor in num:
print(f'{valor} ', end=' ')
print('FIM!')
tam = len(num)
print(f'O total de números é {tam}!')
contador(2, 1, 7)
contador(8, 0)
contador(4, 4, 7, 6, 2)
# Trabalhando com listas
def dobra(lst):
pos = 0
while pos < len(lst):
lst[pos] *= 2
pos += 1
valores = [6, 3, 9, 1, 0, 2]
dobra(valores)
print(valores)
# Desempacotamento
def soma(* valores):
s = 0
for num in valores:
s += num
print(f'Somando os valores {valores} temos {s}')
soma(5, 2)
soma(2, 9, 4)
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo2/Repetições em Python (for)/python_044.py
|
<reponame>jbauermanncode/Curso_Em_Video_Python<gh_stars>0
'''
Faça um programa que calcule a soma entre todos os números que são múltiplos de três e que se encontram no intervalo de 1 até 500.
'''
# Acumulador para fazer a soma total
s = 0
# Contador para ver quantos números foram somados
c = 0
# Laço for para saber os números múltiplos de 3 a partir do número 1 até o 50.
for i in range(1, 501, 2):
if i % 3 == 0:
c = c + 1
s = s + i
print('A soma de todos os multiplos de 3 ímpares é {}, e foram contados {} números.'.format(s, c))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo1/Utilizando Módulos/python_018.py
|
<reponame>jbauermanncode/Curso_Em_Video_Python
'''
Faça um programa que leia um ângulo qualquer e mostre na tela o valor do seno, cosseno e tangente desse ângulo.
'''
# Importar math library
from math import radians, sin, cos, tan
# Leia o ângulo
angulo = float(input('Digite o valor de um ângulo: '))
# Calcular o seno
seno = sin(radians(angulo))
# Calcular o cosseno
cosseno = cos(radians(angulo))
# Calcular a tangente
tangente = tan(radians(angulo))
# Imprimir na tela
print('O ângulo de {} tem o SENO de {:.2f}.'.format(angulo, seno))
print('O ângulo de {} tem o COSSENO de {:.2}.'.format(angulo, cosseno))
print('O ângulo de {} tem a TANGENTE de {:.2f}.'.format(angulo, tangente))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Test/aula10b.py
|
<gh_stars>0
# Ler duas notas
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
# Calcular a média das notas
m = (n1 + n2) / 2
# Imprimir a média
print('A sua média foi {:.1f}.'.format(m))
# Analisar a condição da média
if m >= 6.0:
print('Sua média foi boa! PARABÉNS')
else:
print('Sua média foi ruim! ESTUDE MAIS!')
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo1/Utilizando Módulos/python_017.py
|
<reponame>jbauermanncode/Curso_Em_Video_Python
'''
Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um triângulo retângulo, calcule e mostre o comprimento da hipotenusa.
'''
# Importar math
from math import hypot
# Leia os catetos
co= float(input('Valor do cateto oposto: '))
ca= float(input('Valor do cateto adjacente: '))
# Calcular Hipotenusa
h = hypot(co, ca)
# Imprimir a hipotenusa
print('O valor da hipotenusa é {:.2f}' .format(h))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo3/Funções em Python/python_084.py
|
'''
Faça um programa que tenha uma função notas() que pode receber várias notas de alunos e vai retornar um dicionário com as seguintes informações:
– Quantidade de notas
– A maior nota
– A menor nota
– A média da turma
– A situação (opcional)
'''
def notas(*n, sit = False):
r = dict()
r['total'] = len(n)
r['maior'] = max(n)
r['menor'] = min(n)
r['média'] = sum(n)/len(n)
if r['média'] >= 7:
r['situação'] = 'BOA'
elif r['média'] >= 5:
r['situação'] = 'RAZOÁVEL'
else:
r['situação'] = 'RUIM'
return r
# Programa Principal
resposta = notas(5.5, 2.5, 8.5, sit = True)
print(resposta)
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo3/Tuplas em Python/python_060.py
|
'''
Desenvolva um programa que leia quatro valores pelo teclado e guarde-os em uma tupla. No final, mostre:
A) Quantas vezes apareceu o valor 9.
B) Em que posição foi digitado o primeiro valor 3.
C) Quais foram os números pares.
'''
# Fazer uma Tupla ler 4 números
n = (int(input('Digite um número: ')),
int(input('Digite outro número: ')),
int(input('Digite um número: ')),
int(input('Digite um número: ')))
print(f'Você digitou os valores {n}.')
# Contar as vezes que aparece o 9
if 9 in n:
print(f'O numero 9 aparece {n.count(9)} vezes')
else:
print('O valor 3 não foi digitado')
# Posição do numero 3
if 3 in n:
print(f'O numero 3 apareceu na {n.index(3)+1}ª posição.')
else:
print('O valor 3 não foi digitado')
# Para saber os numeros pares
print('Os valores pares digitados foram: ', end='')
for par in n:
if n % 2 == 0:
print(n, end=' ')
else:
print('Nenhum número par foi digitado.')
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo1/Condições em Python (if..else)/python_028.py
|
<reponame>jbauermanncode/Curso_Em_Video_Python
'''
Escreva um programa que faça o computador "pensar" em um número inteiro entre 0 e 5 e peça para o usuário tentar descobrir qual foi o número escolhido pelo computador. O programa deverá escrever na tela se o usuário venceu ou perdeu.
'''
# Importar função randint do módulo random
from random import randint
# Número que o computador pensa
computador = randint(0 , 5)
print('-=-'*20)
print('Olá! Pensei em um número de 0 a 5. Consegue adivinhar?')
print('-=-'*20)
# A tentativa do jogador de adivinhar
jogador = int(input('Em que número eu pensei? '))
print('###'*20)
# Estrutura Condicional if/else
if jogador == computador:
print('____PARABÉNS! Você VENCEU!!!____')
else:
print('____HAHAHA! Você PERDEU!!!____')
# Imprimir o número que o computador pensou
print('Eu pensei no número {}'.format(computador))
print('###'*20)
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_038.py
|
<reponame>jbauermanncode/Curso_Em_Video_Python
'''
Escreva um programa que leia dois números inteiros e compare- os, mostrando na tela uma mensagem:
- O primeiro valor é maior
- O segundo valor é maior
- não existe valor maior, os dois são iguais
'''
# Ler dois números inteiros
n1 = int(input('Informe o primeiro número: '))
n2 = int(input('Informe o segundo número: '))
# Operadores Lógicos
n1_maior = n1 > n2
n2_maior = n2 > n1
# Estrutura Condicional if, elif, else.
if n1_maior:
print('O número {} é o maior!'.format(n1))
elif n2_maior:
print('O número {} é o maior!'.format(n2))
else:
print('Os números são iguais!')
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Test/aula14a.py
|
<reponame>jbauermanncode/Curso_Em_Video_Python<gh_stars>0
for c in range(1, 10):
print(c)
print('FIM')
w = 1
while w < 10:
print(w)
w = w +1
print('FIM')
for i in range(1, 5):
n = int(input('Digite um número: '))
print('FIM')
num = 1
while num != 0:
num = int(input('Digite um número: '))
print('FIM')
r = 'S'
while r == 'S':
number = int(input('Digite um número: '))
r = str(input('Continuar? [S/N]: ')).upper()
print('FIM')
numero = 1
par = impar = 0
while numero != 0:
numero = int(input('Digite um número: '))
if numero != 0:
if numero % 2 == 0:
par +=1
else:
impar += 1
print('Você digitou {} números pares, e {} números ímpares.'.format(par, impar))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Test/aula13.a.py
|
# Estrutura de Repetição for
# Dando oi 6 vezes
for c in range(0, 6):
print('OI!!!')
print('FIM!!!')
print()
# Contar de 1 a 6
for c in range(1, 7):
print(c)
print('FIM')
print()
# Contagem regressiva a partir de 6
for c in range(6, -1, -1):
print(c)
print('FIM')
# Contar de 2 em 2
for c in range(0, 7, 2):
print(c)
print('FIM')
print()
# Fazer uma contagem a partir de um número digitado
n = int(input('Digite um número: '))
for c in range(0, n+1):
print(c)
print('FIM')
print()
# Ler o inicio, o passo e o fim
i = int(input('Inicio: '))
p = int(input('Passo: '))
f = int(input('Fim: '))
for c in range(i, f+1, p):
print(c)
print('FIM')
print()
# Ler um número n vezes e fazer a soma
s = 0
for c in range(0, 4):
n = int(input('Digite um número: '))
s += n
print('A soma dos valores é {}.'.format(s))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo1/Tipos Primitivos e Saída de Dados/python_007.py
|
<reponame>jbauermanncode/Curso_Em_Video_Python
'''
Desenvolva um programa que leia as duas notas de um aluno, calcule e mostre a sua média.
'''
# Ler notas
n1 = float(input('Digite uma nota: '))
n2 = float(input('Digite outra nota: '))
# Calcular média
m = float((n1 + n2)/2)
# Imprimir média
print('Nota da Prova: {:.1f}'.format(n1))
print('Nota do Trabalho: {:.1f}'.format(n2))
print('------------------------------------')
print('Média: {:.1f}'.format(m))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_039.py
|
'''
Faça um programa que leia o ano de nascimento de um jovem e informe, de acordo com a sua idade, se ele ainda vai se alistar ao serviço militar, se é a hora exata de se alistar ou se já passou do tempo do alistamento. Seu programa também deverá mostrar o tempo que falta ou que passou do prazo.
'''
# Importar datatime para atualizar o ano
from datetime import date
# Ler o ano de nascimento
nascimento = int(input('Em que ano você nasceu? '))
# Calcular a idade
atual = date.today().year
idade = atual - nascimento
print('A sua idade é {} anos.'.format(idade))
# Estrutura Condicional if, elif, else
if idade == 18:
print('Você deve se alistar!')
elif idade < 18:
print('Você deve se alistar daqui {} anos!'.format(18 - idade))
else:
print('Você se alistou, ou deveria ter se alistado a {} anos!'.format(idade - 18))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo2/Repetições em Python (for)/python_043.py
|
'''
Crie um programa que mostre na tela todos os números pares que estão no intervalo entre 1 e 50.
'''
# Laço for para descobrir os numeros pares
for i in range(1 + 1, 51, 2):# 1 + 1 para a contagem começar a partir de 2
print(i)
print('Esses são os números pares de 1 a 50.')
print()
print('**'*40)
print()
# Outra maneira de fazer
for i in range(1 , 51):
print('.', end= '')
if i % 2 == 0:
print(i, end=' ')
print('Acabou')
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Test/aula17.py
|
<filename>Python_Test/aula17.py
print('Lista 1')
num = [2, 5, 9, 1]
print(num)
print('*'*40)
print('Lista 2')
num[2] = 3
num.append(7)
print(num)
num.sort()
print(num)
print('*'*40)
print('Lista 3')
num.sort(reverse=True)
print(num)
num.insert(2, 0)
print(num)
print(f'Essa lista tem {len(num)} elementos.')
print('*'*40)
print('Lista 4')
num.pop()
num.pop(2)
print(num)
num.insert(2, 2)
print(num)
num.remove(2)
print(num)
if 4 in num:
num.remove(4)
print(num)
else:
print('Esse elemento não existe na lista.')
print('*'*60)
print()
valores = []
valores.append(5)
valores.append(9)
valores.append(4)
for v in valores:
print(f'{v}...', end=' ')
# Para pegar os valores e a posição deles
for c, v in enumerate(valores):
print(f'\nNa posição {c} encontra- se o valor {v}!')
print('Cheguei ao final da lista.')
print('*'*60)
print()
print('Lista 5')
# Ler valores pelo teclado e adicionar a lista
valores = list()
for cont in range(0, 5):
valores.append(int(input('Digite um valor: ')))
for c, v in enumerate(valores):
print(f'Na posição {c} encontra- se o valor {v}!')
print('Cheguei ao final da lista.')
print('*'*60)
print()
print('Lista 6')
a = [2, 3, 4, 7]
print(a)
# Para b fazer uma cópia de a
b = a[:]
# Para b igualar a a
#b = a
b[2] = 8
print(f'Lista A: {a}')
print(f'Lista B: {b}')
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo1/Tipos Primitivos e Saída de Dados/python_014.py
|
'''
Escreva um programa que converta uma temperatura digitada em Celsius para Farenheit.
'''
# Ler Temperatura
c = float(input('Informe a temperatura em °C: '))
# Converter para °F
f = ((9 * c) / 5) + 32
# Imprimir na tela
print('A temperatura de {}°C corresponde a {}°F!'.format(c, f))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo1/Utilizando Módulos/python_020.py
|
<reponame>jbauermanncode/Curso_Em_Video_Python<filename>Python_Exercicios/Mundo1/Utilizando Módulos/python_020.py
'''
Sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.
'''
# Importar Biblioteca Random
from random import shuffle
# Leia os 4 nomes
n1 = str(input('Digite um nome: '))
n2 = str(input('Digite um nome: '))
n3 = str(input('Digite um nome: '))
n4 = str(input('Digite um nome: '))
# Lista de alunos
lista = [n1, n2, n3, n4]
# Embaralhar a lista
shuffle(lista)
# Imprimir
print('A ordem de apresentação é: {}.'.format(lista))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo1/Tipos Primitivos e Saída de Dados/python_006.py
|
<reponame>jbauermanncode/Curso_Em_Video_Python<filename>Python_Exercicios/Mundo1/Tipos Primitivos e Saída de Dados/python_006.py
'''
Crie um algoritmo que leia um número e mostre o seu dobro, triplo e raiz quadrada.
'''
# Leia um número inteiro
n = int(input('Digite um valor: '))
# Mostrar o dobro
d = n * 2
# Mostrar o triplo com outra maneira
t = n * 3
# Mostrar a raiz quadrada
rq = float(n ** (1/2))
print('Qual o dobro, o triplo e a raiz quadrada de {}?'.format(n))
print('{} é o dobro, {} é o triplo e {:.2f} é a raiz quadrada.'.format(d, t, rq))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Test/aula12a.py
|
<filename>Python_Test/aula12a.py
nome = str(input('Qual o seu nome: '))
if nome == 'Josué':
print('Que nome bonito!')
elif nome == 'Rosicléia' or nome=='Astolfo':
print('Que nome feio!')
elif nome == 'Maria' or nome == 'João' or nome == 'Pedro' or nome == 'Ana':
print('Seu nome é bem popular!')
elif nome == 'Kevin' or nome == 'Kelly' or nome == 'Cristhian':
print('Seu nome tem origem estrangeira!')
elif nome in ('<NAME>'):
print('Que belo nome feminino você tem!')
else:
print('Seu nome é bem normal!'.format(nome))
print('Tenha um bom dia, {}!'.format(nome))
|
jbauermanncode/Curso_Em_Video_Python
|
Python_Exercicios/Mundo3/Listas em Python/python_069.py
|
'''
Faça um programa que leia nome e peso de várias pessoas, guardando tudo em uma lista. No final, mostre:
A) Quantaspessoasforamcadastradas. B) Uma listagem com as pessoas mais pesadas.
C) Uma listagem com as pessoas mais leves.
'''
pessoas = list()
inf = list()
cont = 0
maior = menor = 0
# Fazer um laço while para ler nome de varias pessoas e colocar uma lista dentro da outra
while True:
inf.append(str(input('Nome: ')))
cont += 1
inf.append(int(input('Peso: ')))
# Para saber as pessoas mais pesadas e mais leves
if len(pessoas) == 0:
maior = menor = inf[1]
else:
if inf[1] > maior:
maior = inf[1]
if inf[1] < menor:
menor = inf[1]
# Adicionar a lista inf na lista pessoas
pessoas.append(inf[:])
# Limpar depois de cadastrar
inf.clear()
# Pedir se quer continuar ou Não
resp = str(input('Deseja continuar? Se sim digite S, se não digite N: '))
if resp in 'nN':
break
print('-='*30)
print()
print(f'Os dados cadastrados foram {pessoas}.')
# Pode usar um contador ou um len da lista pessoas
print(f'Ao todo você cadastrou {cont} pessoas.')
print(f'Ao todo você cadastrou {len(pessoas)} pessoas.', )
#-------------------------------------------------
# Descobrindo quais são as pessoas mais leves e pesadas
print(f'O maior peso foi de {maior}kg.', end= ' ')
for p in pessoas:
if p[1] == maior:
print(f'[{p[0]}]', end='')
print()
print(f'O menor peso foi de {menor}kg.', end= ' ')
for p in pessoas:
if p[1] == menor:
print(f'[{p[0]}]', end='')
print()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.