repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
pegasus-isi/pegasus-cycles | src/pegasus_cycles/_gldas.py | <gh_stars>0
# -*- coding: utf-8 -*-
import csv
import io
from pathlib import Path
import numpy as np
from netCDF4 import Dataset
def iterlocations(location_file):
# X is Longitude, Y is Latitude.
with Path(location_file).open("r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
yield row["LATITUDE"], row["LONGITUDE"]
def closest(lat, lon, elevation):
nc = Dataset(elevation, "r")
best_y = (np.abs(nc.variables["lat"][:] - float(lat))).argmin()
best_x = (np.abs(nc.variables["lon"][:] - float(lon))).argmin()
grid_lat = nc["lat"][best_y]
grid_lon = nc["lon"][best_x]
elevation = nc["GLDAS_elevation"][0, best_y, best_x]
if grid_lat < 0.0:
lat_str = "%.2fS" % (abs(grid_lat))
else:
lat_str = "%.2fN" % (abs(grid_lat))
if grid_lon < 0.0:
lon_str = "%.2fW" % (abs(grid_lon))
else:
lon_str = "%.2fE" % (abs(grid_lon))
fname = "met" + lat_str + "x" + lon_str + ".weather"
return fname
|
pegasus-isi/pegasus-cycles | dax_generator.py | #!/usr/bin/env python3
import os
import pegasus_cycles
from pegasus_cycles.__main__ import *
from string import Template
def _generate_tc():
with open("tc.template") as t_tc_file:
src = Template(t_tc_file.read())
tc_data = {
"work_dir": os.getcwd()
}
result = src.substitute(tc_data)
with open("tc.txt", "w") as f:
f.write(result)
if __name__ == '__main__':
logging.info("Create transformation catalog")
_generate_tc()
dax()
|
pegasus-isi/pegasus-cycles | bin/cycles-wrapper.py | <filename>bin/cycles-wrapper.py
#!/usr/bin/env python3
"""Cycles Executor."""
import argparse
import csv
import logging
import os
import shutil
import subprocess
import sys
from string import Template
log = logging.getLogger()
def _generate_inputs(
prefix,
start_year,
end_year,
baseline,
fertilizer_increase,
unique_id,
crop,
start_planting_date,
end_planting_date,
planting_date_fixed,
fertilizer_rate,
weed_fraction,
forcing,
weather_file,
reinit_file,
crop_file,
soil_file,
template_weed,
template_ctrl,
template_op,
params_file,
**kwargs):
# write params file
with open(params_file, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow([
'unique_id',
'crop',
'location',
'start_planting_date',
'end_planting_date',
'planting_date_fixed',
'nitrogen_rate',
'weed_fraction',
'forcing',
'season_file'
])
fertilization_rate = fertilizer_rate if fertilizer_increase == "False" else float(fertilizer_rate) * 1.1
csvwriter.writerow([
unique_id,
crop,
weather_file.replace('.weather', '').replace('met', '').replace('x', ' x ').replace('N', ' North').replace('E', ' East'),
start_planting_date,
end_planting_date,
planting_date_fixed,
fertilization_rate,
weed_fraction,
forcing,
"cycles_season-" + unique_id + ".dat"
])
os.mkdir("input")
ctrl_file = prefix + "cycles_" + unique_id + ".ctrl"
op_file = prefix + "cycles_" + unique_id + ".operation"
shutil.copyfile(weather_file, "./input/" + weather_file)
shutil.copyfile(crop_file, "./input/" + crop_file)
shutil.copyfile(soil_file, "./input/" + soil_file)
if reinit_file:
shutil.copyfile(reinit_file, "./input/cycles.reinit")
# process CTRL file
with open(template_ctrl) as t_ctrl_file:
src = Template(t_ctrl_file.read())
ctrl_data = {
"start_year": start_year,
"end_year": end_year,
"rotation_size": 1,
"crop_file": crop_file,
"operation_file": op_file,
"soil_file": soil_file,
"weather_file": weather_file,
"reinit": 0 if baseline == "True" else 1,
}
result = src.substitute(ctrl_data)
with open("./input/" + ctrl_file, "w") as f:
f.write(result)
# process Operation file
operation_contents = ""
with open(template_op) as t_op_file:
src = Template(t_op_file.read())
op_data = {
"year_count": 1,
"crop_name": crop,
"fertilization_date": int(start_planting_date) - 10,
"fertilization_rate": fertilizer_rate,
"start_planting_date": start_planting_date,
"end_planting_date": end_planting_date,
"tillage_date": int(start_planting_date) + 20,
}
if fertilizer_increase == "True":
op_data["fertilization_rate"] = float(op_data["fertilization_rate"]) * 1.1
if baseline == "True":
op_data["start_planting_date"] = 100
op_data["fertilization_rate"] = 156.25
result = src.substitute(op_data)
operation_contents += result + "\n"
# handling weeds
if float(weed_fraction) > 0:
with open(template_weed) as t_wd_file:
wd_src = Template(t_wd_file.read())
wd_data = {
"year_count": 1,
"weed_planting_date": int(start_planting_date) + 7,
"weed_fraction": weed_fraction
}
wd_result = wd_src.substitute(wd_data)
operation_contents += wd_result + "\n"
# writing operations file
with open("./input/" + op_file, "w") as f:
f.write(operation_contents)
def _launch(prefix, baseline, unique_id, **kwargs):
cmd = "Cycles -s -l 1 " + prefix + "cycles_" + unique_id if baseline == "True" else "Cycles " + prefix + "cycles_" + unique_id
try:
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT, shell=True, universal_newlines=True)
except subprocess.CalledProcessError as exc:
print("Status : FAIL", exc.returncode, exc.output)
exit(1)
else:
print("Output: \n{}\n".format(output))
def _prepare_outputs(prefix, baseline, fertilizer_increase, unique_id, crop, **kwargs):
shutil.copyfile("./output/" + prefix + "cycles_" + unique_id + "/annualSoilProfileC.dat", prefix + "cycles_soilProfile-" + unique_id + ".dat")
shutil.copyfile("./output/" + prefix + "cycles_" + unique_id + "/annualSOM.dat", prefix + "cycles_som-" + unique_id + ".dat")
shutil.copyfile("./output/" + prefix + "cycles_" + unique_id + "/N.dat", prefix + "cycles_nitrogen-" + unique_id + ".dat")
shutil.copyfile("./output/" + prefix + "cycles_" + unique_id + "/season.dat", prefix + "cycles_season-" + unique_id + ".dat")
shutil.copyfile("./output/" + prefix + "cycles_" + unique_id + "/summary.dat", prefix + "cycles_summary-" + unique_id + ".dat")
shutil.copyfile("./output/" + prefix + "cycles_" + unique_id + "/weather.dat", prefix + "cycles_weatherOutput-" + unique_id + ".dat")
shutil.copyfile("./output/" + prefix + "cycles_" + unique_id + "/water.dat", prefix + "cycles_water-" + unique_id + ".dat")
shutil.copyfile("./output/" + prefix + "cycles_" + unique_id + "/" + crop + ".dat", prefix + "cycles_crop-" + unique_id + ".dat")
if baseline == "True":
shutil.copyfile("./output/" + prefix + "cycles_" + unique_id + "/reinit.dat", prefix + "cycles_reinit-" + unique_id + ".dat")
# generate zip for input/output folder
outputs_folder = crop.lower() + "_" + prefix + "cycles_" + unique_id
os.mkdir(outputs_folder)
shutil.move("input", outputs_folder + "/input")
shutil.move("output", outputs_folder + "/output")
shutil.make_archive(prefix + "cycles_outputs-" + unique_id, 'zip', outputs_folder)
def _main():
parser = argparse.ArgumentParser(
description="Cycles executor."
)
parser.add_argument("--start-year", dest="start_year", default=2000, help="Simulation start year")
parser.add_argument("--end-year", dest="end_year", default=2017, help="Simulation end year")
parser.add_argument("-b", "--baseline", dest="baseline", default=False, help="Whether this is a baseline execution")
parser.add_argument("-x", "--fertilizer-increase", dest="fertilizer_increase", default=False, help="Whether this is an execution with increased fertilizer")
parser.add_argument("-i", "--id", dest="unique_id", default=None, help="Unique ID")
parser.add_argument("-c", "--crop", dest="crop", default="Maize", help="Crop name")
parser.add_argument("-s", "--start-planting-date", dest="start_planting_date", default=100, help="Start planting date")
parser.add_argument("-e", "--end-planting-date", dest="end_planting_date", default=149, help="End planting date")
parser.add_argument("-p", "--planting-date-fixed", dest="planting_date_fixed", default=True, help="Whether the planting data is fixed")
parser.add_argument("-n", "--fertilizer-rate", dest="fertilizer_rate", default=0.00, help="Fertilizer rate")
parser.add_argument("-w", "--weed-fraction", dest="weed_fraction", default=0.0, help="Weed fraction")
parser.add_argument("-f", "--forcing", dest="forcing", default=False, help="Whether it uses forcing data from PIHM")
parser.add_argument("-l", "--weather-file", dest="weather_file", default=None, help="Weather file")
parser.add_argument("-r", "--reinit-file", dest="reinit_file", default=None, help="Cycles reinitialization file")
parser.add_argument("-m", "--params-file", dest="params_file", default=None, help="Cycles parameters file")
parser.add_argument("crop_file", help="crops file")
parser.add_argument("soil_file", help="Soil file")
parser.add_argument("template_weed", help="Template weed file")
parser.add_argument("template_ctrl", help="Template control file")
parser.add_argument("template_op", help="Template operation file")
args = parser.parse_args()
if args.baseline == "True" and args.fertilizer_increase == "True":
log.error("Error: Cannot run baseline with increased fertilizer")
exit(1)
# set end planting date if fixed
if args.planting_date_fixed:
args.end_planting_date = -999
# setting prefix
prefix = "baseline_" if args.baseline == "True" else "fertilizer_increase_" if args.fertilizer_increase == "True" else ""
_generate_inputs(prefix, **vars(args))
_launch(prefix, **vars(args))
_prepare_outputs(prefix, **vars(args))
if __name__ == "__main__":
_main()
|
pegasus-isi/pegasus-cycles | src/pegasus_cycles/_adag.py | <reponame>pegasus-isi/pegasus-cycles
# -*- coding: utf-8 -*-
from pegasus_cycles._pegasus import *
import os
import html
a = ADAG("pegasus-cycles", auto=True)
# input files
crops_file = File("crops.crop")
crops_file.addPFN(PFN("file://" + os.getcwd() + "/data/crops.crop", "local"))
soil_file = File("pongo.soil")
soil_file.addPFN(PFN("file://" + os.getcwd() + "/data/pongo.soil", "local"))
template_weed = File("template-weed.operation")
template_weed.addPFN(PFN("file://" + os.getcwd() + "/data/template-weed.operation", "local"))
template_ctrl = File("template.ctrl")
template_ctrl.addPFN(PFN("file://" + os.getcwd() + "/data/template.ctrl", "local"))
template_op = File("template.operation")
template_op.addPFN(PFN("file://" + os.getcwd() + "/data/template.operation", "local"))
a.addFile(crops_file)
a.addFile(soil_file)
a.addFile(template_weed)
a.addFile(template_ctrl)
a.addFile(template_op)
# Cycles' output files list
season_files = {}
params_files = {}
fi_files = {}
# @a.job()
def gldas_to_cycles(
latitude,
longitude,
output_file,
start_date="2000-01-01",
end_date="2017-12-31",
gldas_path="/raw-data/GLDAS",
):
"""Transform GLDAS to Cycles."""
j = Job("gldas_to_cycles")
j.addProfile(Profile(Namespace.CONDOR, key="+SingularityImage", value=html.unescape(""/cvmfs/singularity.opensciencegrid.org/mintproject/cycles:0.9.4-alpha"")))
j.addArguments("--start-date", start_date)
j.addArguments("--end-date", end_date)
j.addArguments("--latitude", latitude)
j.addArguments("--longitude", longitude)
j.addArguments("--gldas-path", gldas_path)
j.addArguments("--output", output_file)
j.uses(File(output_file), link=Link.OUTPUT, transfer=True)
return j
# @a.transformation()
# @a.resource_info(cpu=0.25)
# def baseline_transformation():
# """Cycles Baseline Transformation."""
# e1 = Executable("cycles-baseline")
# return [e1]
#
#
# @a.transformation()
# @a.resource_info(cpu=0.25)
# def cycles_transformation():
# """Cycles Transformation."""
# e1 = Executable("cycles")
# e1.addPFN(PFN("file://path/run", "a"))
# e2 = Executable("io.sh")
# e2.addPFN(PFN("file://path/io.sh", "a"))
# return [e1, e2]
# @a.job()
def cycles(
unique_id,
crop,
start_planting_date,
end_planting_date,
planting_date_fixed,
fertilizer_rate,
weed_fraction,
forcing,
weather_file,
reinit_file=None,
baseline=False,
fertilizer_increase=False,
weather=None
):
"""Cycles."""
prefix = "baseline_" if baseline else "fertilizer_increase_" if fertilizer_increase else ""
params_file = File(prefix + "cycles_params-" + unique_id + ".csv")
season_output_file = File(prefix + "cycles_season-" + unique_id + ".dat")
j = Job(prefix + "cycles")
j.addProfile(Profile(Namespace.CONDOR, key="+SingularityImage", value=html.unescape(""/cvmfs/singularity.opensciencegrid.org/mintproject/cycles:0.9.4-alpha"")))
j.addArguments("--baseline", str(baseline))
j.addArguments("--fertilizer-increase", str(fertilizer_increase))
j.addArguments("--id", unique_id)
j.addArguments("--crop", crop)
j.addArguments("--start-planting-date", start_planting_date)
j.addArguments("--end-planting-date", end_planting_date)
j.addArguments("--planting-date-fixed", planting_date_fixed)
j.addArguments("--fertilizer-rate", fertilizer_rate)
j.addArguments("--weed-fraction", weed_fraction)
j.addArguments("--forcing", forcing)
j.addArguments("--weather-file", weather_file)
j.addArguments("--params-file", params_file)
j.addArguments(crops_file)
j.addArguments(soil_file)
j.addArguments(template_weed)
j.addArguments(template_ctrl)
j.addArguments(template_op)
j.uses(File(weather_file), link=Link.INPUT)
j.uses(crops_file, link=Link.INPUT)
j.uses(soil_file, link=Link.INPUT)
j.uses(template_weed, link=Link.INPUT)
j.uses(template_ctrl, link=Link.INPUT)
j.uses(template_op, link=Link.INPUT)
j.uses(File(prefix + "cycles_crop-" + unique_id + ".dat"), link=Link.OUTPUT, transfer=False)
j.uses(File(prefix + "cycles_nitrogen-" + unique_id + ".dat"), link=Link.OUTPUT, transfer=False)
j.uses(File(prefix + "cycles_soilProfile-" + unique_id + ".dat"), link=Link.OUTPUT, transfer=False)
j.uses(File(prefix + "cycles_som-" + unique_id + ".dat"), link=Link.OUTPUT, transfer=False)
j.uses(File(prefix + "cycles_summary-" + unique_id + ".dat"), link=Link.OUTPUT, transfer=False)
j.uses(File(prefix + "cycles_water-" + unique_id + ".dat"), link=Link.OUTPUT, transfer=False)
j.uses(File(prefix + "cycles_weatherOutput-" + unique_id + ".dat"), link=Link.OUTPUT, transfer=False)
if not baseline:
j.uses(season_output_file, link=Link.OUTPUT)
j.uses(File(prefix + "cycles_outputs-" + unique_id + ".zip"), link=Link.OUTPUT)
j.uses(params_file, link=Link.OUTPUT)
j.addArguments("--reinit-file", reinit_file)
j.uses(File(reinit_file), Link.INPUT)
if not fertilizer_increase:
if weather not in season_files:
season_files[weather] = {}
params_files[weather] = {}
if crop not in season_files[weather]:
season_files[weather][crop] = []
params_files[weather][crop] = []
season_files[weather][crop].append(season_output_file)
params_files[weather][crop].append(params_file)
else:
j.uses(season_output_file, link=Link.OUTPUT, transfer=False)
j.uses(File(prefix + "cycles_outputs-" + unique_id + ".zip"), link=Link.OUTPUT, transfer=False)
j.uses(params_file, link=Link.OUTPUT, transfer=False)
j.uses(File(prefix + "cycles_reinit-" + unique_id + ".dat"), link=Link.OUTPUT, transfer=False)
return j
def cycles_fertilizer_increase_output_parser(
unique_id,
crop,
weather=None
):
"""Cycles Fertilizer Increase Output Parser."""
j = Job("cycles_fertilizer_increase_output_parser")
j.addProfile(Profile(Namespace.CONDOR, key="+SingularityImage", value=html.unescape(""/cvmfs/singularity.opensciencegrid.org/mintproject/cycles:0.9.4-alpha"")))
# input files
params_file = File("cycles_params-" + unique_id + ".csv")
params_file_fi = File("fertilizer_increase_cycles_params-" + unique_id + ".csv")
season_file = File("cycles_season-" + unique_id + ".dat")
season_file_fi = File("fertilizer_increase_cycles_season-" + unique_id + ".dat")
j.addArguments("--params-file", params_file)
j.addArguments("--params-file-fi", params_file_fi)
j.addArguments("--season-file", season_file)
j.addArguments("--season-file-fi", season_file_fi)
j.uses(params_file, link=Link.INPUT)
j.uses(params_file_fi, link=Link.INPUT)
j.uses(season_file, link=Link.INPUT)
j.uses(season_file_fi, link=Link.INPUT)
# output file
output_file = File("cycles_fertilizer_increase_output_parsed-" + unique_id + ".csv")
j.addArguments("--output-file", output_file)
j.uses(output_file, link=Link.OUTPUT, transfer=False)
if weather not in fi_files:
fi_files[weather] = {}
if crop not in fi_files[weather]:
fi_files[weather][crop] = []
fi_files[weather][crop].append(output_file)
return j
def cycles_fertilizer_increase_output_summary(weather, crop):
"""Cycles Output Summary."""
if weather not in fi_files or crop not in fi_files[weather]: #temp
return
j = Job("cycles_fertilizer_increase_output_summary")
j.addProfile(Profile(Namespace.CONDOR, key="+SingularityImage", value=html.unescape(""/cvmfs/singularity.opensciencegrid.org/mintproject/cycles:0.9.4-alpha"")))
# inputs
for f in fi_files[weather][crop]:
j.addArguments("-p", f)
j.uses(f, Link.INPUT)
# output
output_file = File("cycles_fi_output_summary_" + crop.lower() + "_" + weather[2].replace("met", "").replace(".weather", "") + ".csv")
j.uses(output_file, link=Link.OUTPUT, transfer=True)
j.addArguments("--output-file", output_file)
return j
# @a.job()
def cycles_output_parser(weather, crop):
"""Cycles Output Parser."""
if weather not in season_files or crop not in season_files[weather]: #temp
return
j = Job("cycles_output_parser")
j.addProfile(Profile(Namespace.CONDOR, key="+SingularityImage", value=html.unescape(""/cvmfs/singularity.opensciencegrid.org/mintproject/cycles:0.9.4-alpha"")))
output_file = File("cycles_output_summary_" + crop.lower() + "_" + weather[2].replace("met", "").replace(".weather", "") + ".csv")
for f in season_files[weather][crop]:
j.uses(f, Link.INPUT)
for f in params_files[weather][crop]:
j.addArguments("-p", f)
j.uses(f, Link.INPUT)
j.uses(output_file, link=Link.OUTPUT, transfer=True)
j.addArguments("--output-file", output_file)
return j
# @a.transformation()
# @a.resource_info(cpu=0.25)
# def merge_transformation():
# """Cycles Baseline Transformation."""
# e1 = Executable("merge")
# e1.addPFN(PFN("file://path/run", "a"))
# return e1
#
#
# @a.job()
# def merge():
# """Merge."""
# return Job("merge")
#
#
# @a.transformation()
# @a.resource_info(cpu=0.25)
# def visualize_transformation():
# """Cycles Baseline Transformation."""
# e1 = Executable("visualize")
# e1.addPFN(PFN("file://path/run", "a"))
# return e1
#
#
# @a.job()
# def visualize():
# """Cycles Visualize."""
# return Job("visualize")
|
pegasus-isi/pegasus-cycles | bin/GLDAS-Cycles-transformation.py | <gh_stars>0
#!/usr/bin/env python3
import argparse
import math
import os
import shutil
from datetime import datetime, timedelta
import numpy as np
from netCDF4 import Dataset
def Closest(lat, lon, path):
elevation_fp = path + "/GLDASp4_elevation_025d.nc4"
nc = Dataset(elevation_fp, "r")
best_y = (np.abs(nc.variables["lat"][:] - lat)).argmin()
best_x = (np.abs(nc.variables["lon"][:] - lon)).argmin()
return (
best_y,
best_x,
nc["lat"][best_y],
nc["lon"][best_x],
nc["GLDAS_elevation"][0, best_y, best_x],
)
def ReadVar(y, x, nc_name):
with Dataset(nc_name, "r") as nc:
_prcp = nc["Rainf_f_tavg"][0, y, x]
_temp = nc["Tair_f_inst"][0, y, x]
_wind = nc["Wind_f_inst"][0, y, x]
_solar = nc["SWdown_f_tavg"][0, y, x]
_pres = nc["Psurf_f_inst"][0, y, x]
_spfh = nc["Qair_f_inst"][0, y, x]
es = 611.2 * math.exp(17.67 * (_temp - 273.15) / (_temp - 273.15 + 243.5))
ws = 0.622 * es / (_pres - es)
w = _spfh / (1.0 - _spfh)
_rh = w / ws
if _rh > 1.0:
_rh = 1.0
return (_prcp, _temp, _wind, _solar, _rh)
def satvp(temp):
return 0.6108 * math.exp(17.27 * temp / (temp + 237.3))
def ea(patm, q):
return patm * q / (0.622 * (1 - q) + q)
def process_day(t, y, x, path):
"""
process one day of GLDAS data and convert it to Cycles input
"""
prcp = 0.0
tx = -999.0
tn = 999.0
wind = 0.0
solar = 0.0
rhx = -999.0
rhn = 999.0
counter = 0
print(datetime.strftime(t, "%Y-%m-%d"))
nc_path = "%s/%4.4d/%3.3d/" % (path, t.timetuple().tm_year, t.timetuple().tm_yday)
for nc_name in os.listdir(nc_path):
if nc_name.endswith(".nc4"):
(_prcp, _temp, _wind, _solar, _rh) = ReadVar(
y, x, os.path.join(nc_path, nc_name)
)
prcp += _prcp
if _temp > tx:
tx = _temp
if _temp < tn:
tn = _temp
wind += _wind
solar += _solar
if _rh > rhx:
rhx = _rh
if _rh < rhn:
rhn = _rh
counter += 1
prcp /= float(counter)
prcp *= 86400.0
wind /= float(counter)
solar /= float(counter)
solar *= 86400.0 / 1.0e6
rhx *= 100.0
rhn *= 100.0
tx -= 273.15
tn -= 273.15
data = "%-16s%-8.4f%-8.2f%-8.2f%-8.4f%-8.2f%-8.2f%-8.2f\n" % (
t.strftime("%Y %j"),
prcp,
tx,
tn,
solar,
rhx,
rhn,
wind,
)
return data
def main(
start_date,
end_date,
gldas_path,
output_prefix,
latitude=None,
longitude=None,
coord_file=None,
):
start_date = datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.strptime(end_date, "%Y-%m-%d")
data_path = gldas_path
if latitude and longitude:
coords = [(latitude, longitude, output_prefix)]
elif coord_file:
coords = []
with open(coord_file) as fp:
for cnt, line in enumerate(fp):
li = line.strip()
if not (li.startswith("#") or li.startswith("L")):
nums = line.split()
lat = float(nums[0])
lon = float(nums[1])
coords.append((lat, lon, "%s-%d.weather" % (output_prefix, cnt)))
else:
raise ValueError("Invalid coordinates")
memoize = {}
for lat, lon, fname in coords:
print("Processing data for {0}, {1}".format(lat, lon))
(y, x, grid_lat, grid_lon, elevation) = Closest(lat, lon, data_path)
if grid_lat < 0.0:
lat_str = "%.2fS" % (abs(grid_lat))
else:
lat_str = "%.2fN" % (abs(grid_lat))
if grid_lon < 0.0:
lon_str = "%.2fW" % (abs(grid_lon))
else:
lon_str = "%.2fE" % (abs(grid_lon))
if (lat_str, lon_str) in memoize:
shutil.copyfile(memoize[(lat_str, lon_str)], fname)
continue
else:
memoize[(lat_str, lon_str)] = fname
# fname = "met" + lat_str + "x" + lon_str + ".weather"
outfp = open(fname, "w")
outfp.write("LATITUDE %.2f\n" % (grid_lat))
outfp.write("ALTITUDE %.2f\n" % (elevation))
outfp.write("SCREENING_HEIGHT 2\n")
outfp.write(
"YEAR DOY PP TX TN SOLAR RHX RHN WIND\n"
)
cday = start_date
while cday <= end_date:
outfp.write(process_day(cday, y, x, data_path))
cday += timedelta(days=1)
outfp.close()
def _main():
parser = argparse.ArgumentParser(description="GLDAS to Cycles Transform")
parser.add_argument(
"-s",
"--start-date",
dest="start_date",
required=True,
default="2000-01-01",
help="Start Date",
)
parser.add_argument(
"-e",
"--end-date",
dest="end_date",
required=True,
default="2018-01-31",
help="End Date",
)
parser.add_argument(
"-g",
"--gldas-path",
dest="gldas_path",
required=True,
help="Path to GLDAS files",
)
parser.add_argument(
"-lat", "--latitude", type=float, dest="latitude", help="Latitude"
)
parser.add_argument(
"-lon", "--longitude", type=float, dest="longitude", help="Longitude"
)
parser.add_argument(
"-c",
"--coord-file",
dest="coord_file",
help="File with list of Latitude, Longitude pairs.",
)
parser.add_argument(
"-o", "--output", dest="output_prefix", required=True, help="Output prefix"
)
args = parser.parse_args()
main(**vars(args))
if __name__ == "__main__":
_main()
|
pegasus-isi/pegasus-cycles | src/pegasus_cycles/_combinations.py | <gh_stars>0
# -*- coding: utf-8 -*-
from itertools import product
country = ["South Sudan"]
# "Cassava"
# crops = ["Maize", "Sorghum", "Peanut", "Sesame", "Cassava"]
crops = ["Maize", "Sorghum", "Sesame"]
soil = ["pongo.soil"]
start_planting_date = ["100", "107", "114", "121", "128", "135", "142"]
end_planting_date = ["149"]
planting_date_fixed = ["True", "False"]
fertilizer = ["urea"]
nitrogen_rate = ["0", "25", "50", "100", "200", "400"]
fertilizer_rate = ["0.00", "78.13", "156.25", "312.50", "625.00", "1250.00"]
# forcing = ["True", "False"]
forcing = ["False"]
weed_fraction = ["0.0", "0.05", "0.1", "0.2", "0.4", "1.5", "2.0"]
def itercombinations(distinct_locations):
# dot product for fertilizers
fertilizers = zip(nitrogen_rate, fertilizer_rate)
combinations = list(
product(
country,
crops,
distinct_locations,
soil,
start_planting_date,
end_planting_date,
fertilizer,
fertilizers,
forcing,
planting_date_fixed,
weed_fraction,
)
)
peanut_combinations = list(
product(
country,
["Peanut"],
distinct_locations,
soil,
start_planting_date,
end_planting_date,
fertilizer,
[("0.0", "0.00")],
forcing,
planting_date_fixed,
weed_fraction,
)
)
all_combinations = combinations + peanut_combinations
for row in all_combinations:
yield row
|
pegasus-isi/pegasus-cycles | setup.py | <reponame>pegasus-isi/pegasus-cycles
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages, setup
install_requires = ["Click", "netCDF4"]
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def find_package_data(dirname):
def find_paths(dirname):
items = []
for fname in os.listdir(dirname):
path = os.path.join(dirname, fname)
if os.path.isdir(path):
items += find_paths(path)
elif not path.endswith(".py") and not path.endswith(".pyc"):
items.append(path)
return items
items = find_paths(dirname)
return [os.path.relpath(path, dirname) for path in items]
setup(
name="pegasus_cycles",
version="0.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="Pegasus Cycles",
long_description=read("README.md"),
license="Apache",
url="https://pegasus.isi.edu",
classifiers=[
"Topic :: Internet :: WWW/HTTP :: Application",
"License :: OSI Approved :: Apache Software License",
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Intended Audience :: Science/Research",
"Operating System :: Unix",
],
entry_points={"console_scripts": ["pegasus-cycles = pegasus_cycles.__main__:cli"]},
package_dir={"": "src"},
packages=find_packages(where="src", exclude=["pegasus_cycles.tests*"]),
package_data={"pegasus_cycles": find_package_data("src/pegasus_cycles")},
exclude_package_data={"pegasus_cycles": ["tests/*"]},
zip_safe=False,
install_requires=install_requires,
test_suite="pegasus_cycles.tests",
)
|
pegasus-isi/pegasus-cycles | bin/cycles-fertilizer-increase-output-summary.py | <gh_stars>0
#!/usr/bin/env python3
"""Cycles Fertilizer Increase Output Summary."""
import argparse
import csv
import os
def parse_outputs(output_file, parsed_files, **kwargs):
with open(output_file, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow([
'unique_id',
'crop',
'location',
'planting_date',
'end_planting_date',
'planting_date_fixed',
'nitrogen_rate',
'increased_nitrogen_rate',
'weed_fraction',
'forcing',
'year',
'total_biomass',
'root_biomass',
'grain_yield',
'forage_yield',
'ag_residue',
'harvest_index',
'potential_tr',
'actual_tr',
'soil_evap',
'total_n',
'root_n',
'grain_n',
'forage_n',
'cum_n_stress',
'n_in_harvest',
'n_in_residue',
'n_concn_forage',
'increased_total_biomass',
'increased_root_biomass',
'increased_grain_yield',
'increased_forage_yield',
'increased_ag_residue',
'increased_harvest_index',
'increased_potential_tr',
'increased_actual_tr',
'increased_soil_evap',
'increased_total_n',
'increased_root_n',
'increased_grain_n',
'increased_forage_n',
'increased_cum_n_stress',
'increased_n_in_harvest',
'increased_n_in_residue',
'increased_n_concn_forage'
])
for f in parsed_files:
params = []
with open(f) as parsed_file:
reader = csv.reader(parsed_file, skipinitialspace=True, quotechar="'")
next(reader)
for row in reader:
csvwriter.writerow(row)
def _main():
parser = argparse.ArgumentParser(
description="Generate CSV file from Cycles Fertilizer Increase Outputs."
)
parser.add_argument("-o", "--output-file", dest="output_file", default="output-fi-summary.csv", help="Summary CSV file")
parser.add_argument("-p", "--parsed-file", action="append", dest="parsed_files", help="List of parsed season files")
args = parser.parse_args()
parse_outputs(**vars(args))
if __name__ == "__main__":
_main()
|
pegasus-isi/pegasus-cycles | bin/cycles-fertilizer-increase-output-parser.py | <reponame>pegasus-isi/pegasus-cycles<filename>bin/cycles-fertilizer-increase-output-parser.py
#!/usr/bin/env python3
"""Cycles Output Parser."""
import argparse
import csv
import os
def parse_outputs(params_file, params_file_fi, season_file, season_file_fi, output_file, **kwargs):
with open(output_file, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow([
'unique_id',
'crop',
'location',
'planting_date',
'end_planting_date',
'planting_date_fixed',
'nitrogen_rate',
'increased_nitrogen_rate',
'weed_fraction',
'forcing',
'year',
'total_biomass',
'root_biomass',
'grain_yield',
'forage_yield',
'ag_residue',
'harvest_index',
'potential_tr',
'actual_tr',
'soil_evap',
'total_n',
'root_n',
'grain_n',
'forage_n',
'cum_n_stress',
'n_in_harvest',
'n_in_residue',
'n_concn_forage',
'increased_total_biomass',
'increased_root_biomass',
'increased_grain_yield',
'increased_forage_yield',
'increased_ag_residue',
'increased_harvest_index',
'increased_potential_tr',
'increased_actual_tr',
'increased_soil_evap',
'increased_total_n',
'increased_root_n',
'increased_grain_n',
'increased_forage_n',
'increased_cum_n_stress',
'increased_n_in_harvest',
'increased_n_in_residue',
'increased_n_concn_forage'
])
params = []
params_fi = []
with open(params_file) as f:
reader = csv.reader(f, skipinitialspace=True, quotechar="'")
for row in reader:
params = row.copy()
with open(params_file_fi) as fi:
reader = csv.reader(fi, skipinitialspace=True, quotechar="'")
for row in reader:
params_fi = row.copy()
values = {}
with open(season_file) as s:
csvreader = csv.reader(s, delimiter='\t')
next(csvreader)
next(csvreader)
for row in csvreader:
year = row[0][:4].strip()
values[year] = []
values[year].append(row[2].strip())
values[year].append(row[3].strip())
values[year].append(row[4].strip())
values[year].append(row[5].strip())
values[year].append(row[6].strip())
values[year].append(row[7].strip())
values[year].append(row[8].strip())
values[year].append(row[9].strip())
values[year].append(row[10].strip())
values[year].append(row[11].strip())
values[year].append(row[12].strip())
values[year].append(row[13].strip())
values[year].append(row[14].strip())
values[year].append(row[15].strip())
values[year].append(row[16].strip())
values[year].append(row[17].strip())
values[year].append(row[18].strip())
with open(season_file_fi) as s:
csvreader = csv.reader(s, delimiter='\t')
next(csvreader)
next(csvreader)
for row in csvreader:
year = row[0][:4].strip()
values[year].append(row[2].strip())
values[year].append(row[3].strip())
values[year].append(row[4].strip())
values[year].append(row[5].strip())
values[year].append(row[6].strip())
values[year].append(row[7].strip())
values[year].append(row[8].strip())
values[year].append(row[9].strip())
values[year].append(row[10].strip())
values[year].append(row[11].strip())
values[year].append(row[12].strip())
values[year].append(row[13].strip())
values[year].append(row[14].strip())
values[year].append(row[15].strip())
values[year].append(row[16].strip())
values[year].append(row[17].strip())
values[year].append(row[18].strip())
for y in values:
csvwriter.writerow([
params[0],
params[1],
params[2],
params[3],
params[4],
params[5],
params[6],
params_fi[6],
params[7],
params[8],
y,
values[y][0],
values[y][1],
values[y][2],
values[y][3],
values[y][4],
values[y][5],
values[y][6],
values[y][7],
values[y][8],
values[y][9],
values[y][10],
values[y][11],
values[y][12],
values[y][13],
values[y][14],
values[y][15],
values[y][16],
values[y][17],
values[y][18],
values[y][19],
values[y][20],
values[y][21],
values[y][22],
values[y][23],
values[y][24],
values[y][25],
values[y][26],
values[y][27],
values[y][28],
values[y][29],
values[y][30],
values[y][31],
values[y][32],
values[y][33]
])
def _main():
parser = argparse.ArgumentParser(
description="Generate CSV file from Cycles Outputs."
)
parser.add_argument("-p", "--params-file", dest="params_file", default="cycles_params.csv", help="Params file for Cycles run")
parser.add_argument("-i", "--params-file-fi", dest="params_file_fi", default="fertilizer_increase_cycles_params.csv", help="Params file for Cycles run with increased fertilizer rate")
parser.add_argument("-s", "--season-file", dest="season_file", default="cycles_season.dat", help="Season file for Cycles run")
parser.add_argument("-f", "--season-file-fi", dest="season_file_fi", default="fertilizer_increase_cycles_season.csv", help="Season file for Cycles run with increased fertilizer rate")
parser.add_argument("-o", "--output-file", dest="output_file", default="output-summary.csv", help="Summary CSV file")
args = parser.parse_args()
parse_outputs(**vars(args))
if __name__ == "__main__":
_main()
|
pegasus-isi/pegasus-cycles | src/pegasus_cycles/__main__.py | <filename>src/pegasus_cycles/__main__.py
# -*- coding: utf-8 -*-
"""
Pegasus Cycles.
:license: Apache 2.0
"""
import click
import hashlib
import logging
import pegasus_cycles
import os
import sys
from pathlib import Path
from pegasus_cycles._adag import *
from pegasus_cycles._combinations import itercombinations
from pegasus_cycles._combinations import crops
from pegasus_cycles._gldas import closest, iterlocations
@click.group()
@click.option("--verbose", "-v", default=0, count=True)
def cli(verbose):
logging.basicConfig()
@cli.command()
def version():
click.echo(f"{Path(sys.argv[0]).name} v{pegasus_cycles.__version__}")
@cli.command()
@click.option(
"--locations",
"-l",
type=click.Path(file_okay=True, dir_okay=False, readable=True),
required=True,
)
@click.option(
"--elevation",
"-e",
type=click.Path(file_okay=True, dir_okay=False, readable=True),
required=True,
)
@click.argument("out", type=click.File("w"), default=sys.stdout)
def dax(locations, elevation, out=sys.stdout):
logging.info("Generate weather grids")
weather = set()
subwf_dir = "subwfs"
os.mkdir(subwf_dir)
prev_subwf_job = None
for _lat, _lon in iterlocations(locations):
xy = closest(_lat, _lon, elevation)
if xy not in weather:
_w = (_lat, _lon, xy)
weather.add(_w)
# GLDAS to Cycles job
gldas_job = gldas_to_cycles(_lat, _lon, xy)
a.addJob(gldas_job)
# generate subworkflow per location
logging.info("Generating subworkflows")
subwf_id = "subwf_" + _w[2].replace("met", "").replace(".weather", "").replace(".", "_")
subwf = ADAG(subwf_id)
# Cycles jobs
for _row in itercombinations([_w]):
fertilizers = _row[7]
coordinates = _row[2]
id = "_".join([_row[1], _row[4], _row[5], _row[9], fertilizers[1], _row[10], _row[8], coordinates[2]])
unique_id = hashlib.md5(id.encode('utf-8')).hexdigest()
reinit_file = "baseline_cycles_reinit-" + unique_id + ".dat"
# baseline job
subwf.addJob(cycles(
unique_id=unique_id,
crop=_row[1],
start_planting_date=_row[4],
end_planting_date=_row[5],
planting_date_fixed=_row[9],
fertilizer_rate=fertilizers[1],
weed_fraction=_row[10],
forcing=_row[8],
weather_file=coordinates[2],
reinit_file=None,
baseline=True,
fertilizer_increase=False,
weather=_row[2]
))
# cycles job
subwf.addJob(cycles(
unique_id=unique_id,
crop=_row[1],
start_planting_date=_row[4],
end_planting_date=_row[5],
planting_date_fixed=_row[9],
fertilizer_rate=fertilizers[1],
weed_fraction=_row[10],
forcing=_row[8],
weather_file=coordinates[2],
reinit_file=reinit_file,
baseline=False,
fertilizer_increase=False,
weather=_row[2]
))
# fertilizer increase job
if _row[1] != "Peanut":
subwf.addJob(cycles(
unique_id=unique_id,
crop=_row[1],
start_planting_date=_row[4],
end_planting_date=_row[5],
planting_date_fixed=_row[9],
fertilizer_rate=fertilizers[1],
weed_fraction=_row[10],
forcing=_row[8],
weather_file=coordinates[2],
reinit_file=reinit_file,
baseline=False,
fertilizer_increase=True,
weather=_row[2]
))
subwf.addJob(cycles_fertilizer_increase_output_parser(
unique_id=unique_id,
crop=_row[1],
weather=_row[2]
))
# Cycles output parser job
for crop in crops:
subwf.addJob(cycles_output_parser(_w, crop))
subwf.addJob(cycles_fertilizer_increase_output_summary(_w, crop))
subwf.addJob(cycles_output_parser(_w, "Peanut"))
# write subworkflow DAX file
with open(subwf_dir + "/" + subwf_id + ".xml", "w") as subwf_out:
subwf.writeXML(subwf_out)
subwf_dax = File(subwf_id + ".xml")
subwf_dax.addPFN(PFN("file://" + os.getcwd() + "/" + subwf_dir + "/" + subwf_id + ".xml", "local"))
a.addFile(subwf_dax)
subwf_job = DAX(subwf_id + ".xml", id=subwf_id)
subwf_job.addProfile(Profile("dagman", "CATEGORY", "subwf"))
subwf_job.uses(File(xy), Link.INPUT)
subwf_job.uses(subwf_dax)
subwf_job.addArguments("-Dpegasus.catalog.site.file=" + os.getcwd() + "/sites.xml",
"--sites", "condor_pool",
"--output-site", "local",
"--cluster", "horizontal",
"--cleanup", "inplace")
a.addDAX(subwf_job)
a.depends(parent=gldas_job, child=subwf_job)
# add depenency for previous subworkflow
# if prev_subwf_job:
# a.depends(parent=prev_subwf_job, child=subwf_job)
prev_subwf_job = subwf_job
# write top level DAX
a.writeXML(out)
click.secho(f"Success", fg="green")
|
pegasus-isi/pegasus-cycles | src/pegasus_cycles/_pegasus.py | <gh_stars>0
# -*- coding: utf-8 -*-
from functools import wraps
from Pegasus.DAX3 import ADAG as Base
from Pegasus.DAX3 import *
class ADAG(Base):
def job(self):
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
rv = f(*args, **kwargs)
if rv:
self.addJob(rv)
return rv
return wrapped_f
return wrap
def transformation(self):
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
rv = f(*args, **kwargs)
if rv:
rv = rv if isinstance(rv, (list, tuple)) else [rv]
t = Transformation(rv[0].name)
for e in rv:
t.uses(e)
try:
if isinstance(e, Executable):
self.addExecutable(e)
else:
self.addFile(e)
except DuplicateError:
pass
self.addTransformation(t)
return rv
return wrapped_f
return wrap
def resource_info(self, cpu=1, core=1, memory=1):
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
rv = f(*args, **kwargs)
if rv:
_rv = rv[0] if isinstance(rv, (list, tuple)) else rv
if cpu != 1:
_rv.profile(Namespace.PEGASUS, "cpu", cpu)
if core != 1:
_rv.profile(Namespace.PEGASUS, "core", core)
if memory != 1:
_rv.profile(Namespace.PEGASUS, "memory", memory)
return rv
return wrapped_f
return wrap
|
pegasus-isi/pegasus-cycles | bin/cycles-output-parser.py | <gh_stars>0
#!/usr/bin/env python3
"""Cycles Output Parser."""
import argparse
import csv
import os
def parse_outputs(output_file, params_files, **kwargs):
with open(output_file, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow([
'unique_id',
'crop',
'location',
'planting_date',
'end_planting_date',
'planting_date_fixed',
'nitrogen_rate',
'weed_fraction',
'forcing',
'year',
'total_biomass',
'root_biomass',
'grain_yield',
'forage_yield',
'ag_residue',
'harvest_index',
'potential_tr',
'actual_tr',
'soil_evap',
'total_n',
'root_n',
'grain_n',
'forage_n',
'cum_n_stress',
'n_in_harvest',
'n_in_residue',
'n_concn_forage'
])
for f in params_files:
params = []
with open(f) as params:
reader = csv.reader(params, skipinitialspace=True, quotechar="'")
for row in reader:
params = row.copy()
with open(params[9]) as season_file:
csvreader = csv.reader(season_file, delimiter='\t')
next(csvreader)
next(csvreader)
for row in csvreader:
csvwriter.writerow([
params[0],
params[1],
params[2],
params[3],
params[4],
params[5],
params[6],
params[7],
params[8],
row[0][:4].strip(),
row[2].strip(),
row[3].strip(),
row[4].strip(),
row[5].strip(),
row[6].strip(),
row[7].strip(),
row[8].strip(),
row[9].strip(),
row[10].strip(),
row[11].strip(),
row[12].strip(),
row[13].strip(),
row[14].strip(),
row[15].strip(),
row[16].strip(),
row[17].strip(),
row[18].strip()
])
def _main():
parser = argparse.ArgumentParser(
description="Generate CSV file from Cycles Outputs."
)
parser.add_argument("-o", "--output-file", dest="output_file", default="output-summary.csv", help="Summary CSV file")
parser.add_argument("-p", "--params-file", action="append", dest="params_files", help="Summary CSV file")
args = parser.parse_args()
parse_outputs(**vars(args))
if __name__ == "__main__":
_main()
|
jfsantos/stable-baselines | stable_baselines/common/input.py | <filename>stable_baselines/common/input.py
import tensorflow as tf
from gym.spaces import Discrete, Box
def observation_input(ob_space, batch_size=None, name='Ob'):
"""
Build observation input with encoding depending on the observation space type
:param ob_space: (Gym Space) The observation space
:param batch_size: (int) batch size for input
(default is None, so that resulting input placeholder can take tensors with any batch size)
:param name: (str) tensorflow variable name for input placeholder
:return: (TensorFlow Tensor, TensorFlow Tensor) input_placeholder, processed_input_tensor
"""
if isinstance(ob_space, Discrete):
input_x = tf.placeholder(shape=(batch_size,), dtype=tf.int32, name=name)
processed_x = tf.to_float(tf.one_hot(input_x, ob_space.n))
return input_x, processed_x
elif isinstance(ob_space, Box):
input_shape = (batch_size,) + ob_space.shape
input_x = tf.placeholder(shape=input_shape, dtype=ob_space.dtype, name=name)
processed_x = tf.to_float(input_x)
return input_x, processed_x
else:
raise NotImplementedError("Error: the model does not support input space of type {}".format(
type(ob_space).__name__))
|
jfsantos/stable-baselines | stable_baselines/common/policies.py | import numpy as np
import tensorflow as tf
from gym.spaces import Discrete
from stable_baselines.a2c.utils import conv, linear, conv_to_fc, batch_to_seq, seq_to_batch, lstm
from stable_baselines.common.distributions import make_proba_dist_type
from stable_baselines.common.input import observation_input
def nature_cnn(unscaled_images, **kwargs):
"""
CNN from Nature paper.
:param unscaled_images: (TensorFlow Tensor) Image input placeholder
:param kwargs: (dict) Extra keywords parameters for the convolutional layers of the CNN
:return: (TensorFlow Tensor) The CNN output layer
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
layer_1 = activ(conv(scaled_images, 'c1', n_filters=32, filter_size=8, stride=4, init_scale=np.sqrt(2), **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=64, filter_size=4, stride=2, init_scale=np.sqrt(2), **kwargs))
layer_3 = activ(conv(layer_2, 'c3', n_filters=64, filter_size=3, stride=1, init_scale=np.sqrt(2), **kwargs))
layer_3 = conv_to_fc(layer_3)
return activ(linear(layer_3, 'fc1', n_hidden=512, init_scale=np.sqrt(2)))
class ActorCriticPolicy(object):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256, reuse=False):
"""
Policy object that implements actor critic
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for reccurent policies)
:param reuse: (bool) If the policy is reusable or not
"""
self.n_env = n_env
self.n_steps = n_steps
self.obs_ph, self.processed_x = observation_input(ob_space, n_batch)
self.masks_ph = tf.placeholder(tf.float32, [n_batch]) # mask (done t-1)
self.states_ph = tf.placeholder(tf.float32, [self.n_env, n_lstm * 2]) # states
self.pdtype = make_proba_dist_type(ac_space)
self.sess = sess
self.reuse = reuse
self.is_discrete = isinstance(ac_space, Discrete)
self.policy = None
self.proba_distribution = None
self.value_fn = None
def _setup_init(self):
"""
sets up the distibutions, actions, and value
"""
assert self.policy is not None and self.proba_distribution is not None and self.value_fn is not None
self.action = self.proba_distribution.sample()
self.neglogp = self.proba_distribution.neglogp(self.action)
self.policy_proba = self.policy
if self.is_discrete:
self.policy_proba = tf.nn.softmax(self.policy_proba)
self._value = self.value_fn[:, 0]
def step(self, obs, state=None, mask=None):
"""
Returns the policy for a single step
:param obs: ([float] or [int]) The current observation of the environment
:param state: ([float]) The last states (used in reccurent policies)
:param mask: ([float]) The last masks (used in reccurent policies)
:return: ([float], [float], [float], [float]) actions, values, states, neglogp
"""
raise NotImplementedError
def proba_step(self, obs, state=None, mask=None):
"""
Returns the action probability for a single step
:param obs: ([float] or [int]) The current observation of the environment
:param state: ([float]) The last states (used in reccurent policies)
:param mask: ([float]) The last masks (used in reccurent policies)
:return: ([float]) the action probability
"""
raise NotImplementedError
def value(self, obs, state=None, mask=None):
"""
Returns the value for a single step
:param obs: ([float] or [int]) The current observation of the environment
:param state: ([float]) The last states (used in reccurent policies)
:param mask: ([float]) The last masks (used in reccurent policies)
:return: ([float]) The associated value of the action
"""
raise NotImplementedError
class LstmPolicy(ActorCriticPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256, reuse=False, layers=None,
cnn_extractor=nature_cnn, layer_norm=False, feature_extraction="cnn", **kwargs):
"""
Policy object that implements actor critic, using LSTMs
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for reccurent policies)
:param reuse: (bool) If the policy is reusable or not
:param layers: ([int]) The size of the Neural network before the LSTM layer (if None, default to [64, 64])
:param cnn_extractor: (function (TensorFlow Tensor, **kwargs): (TensorFlow Tensor)) the CNN feature extraction
:param layer_norm: (bool) Whether or not to use layer normalizing LSTMs
:param feature_extraction: (str) The feature extraction type ("cnn" or "mlp")
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
super(LstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse)
if layers is None:
layers = [64, 64]
with tf.variable_scope("model", reuse=reuse):
if feature_extraction == "cnn":
extracted_features = cnn_extractor(self.processed_x, **kwargs)
else:
activ = tf.tanh
extracted_features = tf.layers.flatten(self.processed_x)
for i, layer_size in enumerate(layers):
extracted_features = activ(linear(extracted_features, 'pi_fc' + str(i), n_hidden=layer_size,
init_scale=np.sqrt(2)))
input_sequence = batch_to_seq(extracted_features, self.n_env, n_steps)
masks = batch_to_seq(self.masks_ph, self.n_env, n_steps)
rnn_output, self.snew = lstm(input_sequence, masks, self.states_ph, 'lstm1', n_hidden=n_lstm,
layer_norm=layer_norm)
rnn_output = seq_to_batch(rnn_output)
value_fn = linear(rnn_output, 'vf', 1)
self.proba_distribution, self.policy, self.q_value = \
self.pdtype.proba_distribution_from_latent(rnn_output, rnn_output)
self.value_fn = value_fn
self.initial_state = np.zeros((self.n_env, n_lstm * 2), dtype=np.float32)
self._setup_init()
def step(self, obs, state=None, mask=None):
return self.sess.run([self.action, self._value, self.snew, self.neglogp],
{self.obs_ph: obs, self.states_ph: state, self.masks_ph: mask})
def proba_step(self, obs, state=None, mask=None):
return self.sess.run(self.policy_proba, {self.obs_ph: obs, self.states_ph: state, self.masks_ph: mask})
def value(self, obs, state=None, mask=None):
return self.sess.run(self._value, {self.obs_ph: obs, self.states_ph: state, self.masks_ph: mask})
class FeedForwardPolicy(ActorCriticPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, layers=None,
cnn_extractor=nature_cnn, feature_extraction="cnn", **kwargs):
"""
Policy object that implements actor critic, using a feed forward neural network
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param layers: ([int]) The size of the Neural network for the policy (if None, default to [64, 64])
:param cnn_extractor: (function (TensorFlow Tensor, **kwargs): (TensorFlow Tensor)) the CNN feature extraction
:param feature_extraction: (str) The feature extraction type ("cnn" or "mlp")
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
super(FeedForwardPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256,
reuse=reuse)
if layers is None:
layers = [64, 64]
with tf.variable_scope("model", reuse=reuse):
if feature_extraction == "cnn":
extracted_features = cnn_extractor(self.processed_x, **kwargs)
value_fn = linear(extracted_features, 'vf', 1)
pi_latent = extracted_features
vf_latent = extracted_features
else:
activ = tf.tanh
processed_x = tf.layers.flatten(self.processed_x)
pi_h = processed_x
vf_h = processed_x
for i, layer_size in enumerate(layers):
pi_h = activ(linear(pi_h, 'pi_fc' + str(i), n_hidden=layer_size, init_scale=np.sqrt(2)))
vf_h = activ(linear(vf_h, 'vf_fc' + str(i), n_hidden=layer_size, init_scale=np.sqrt(2)))
value_fn = linear(vf_h, 'vf', 1)
pi_latent = pi_h
vf_latent = vf_h
self.proba_distribution, self.policy, self.q_value = \
self.pdtype.proba_distribution_from_latent(pi_latent, vf_latent, init_scale=0.01)
self.value_fn = value_fn
self.initial_state = None
self._setup_init()
def step(self, obs, state=None, mask=None):
action, value, neglogp = self.sess.run([self.action, self._value, self.neglogp], {self.obs_ph: obs})
return action, value, self.initial_state, neglogp
def proba_step(self, obs, state=None, mask=None):
return self.sess.run(self.policy_proba, {self.obs_ph: obs})
def value(self, obs, state=None, mask=None):
return self.sess.run(self._value, {self.obs_ph: obs})
class CnnPolicy(FeedForwardPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, **_kwargs):
"""
Policy object that implements actor critic, using a CNN (the nature CNN)
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
super(CnnPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,
feature_extraction="cnn", **_kwargs)
class CnnLstmPolicy(LstmPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256, reuse=False, **_kwargs):
"""
Policy object that implements actor critic, using LSTMs with a CNN feature extraction
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for reccurent policies)
:param reuse: (bool) If the policy is reusable or not
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
super(CnnLstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse,
layer_norm=False, feature_extraction="cnn", **_kwargs)
class CnnLnLstmPolicy(LstmPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256, reuse=False, **_kwargs):
"""
Policy object that implements actor critic, using a layer normalized LSTMs with a CNN feature extraction
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for reccurent policies)
:param reuse: (bool) If the policy is reusable or not
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
super(CnnLnLstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse,
layer_norm=True, feature_extraction="cnn", **_kwargs)
class MlpPolicy(FeedForwardPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, **_kwargs):
"""
Policy object that implements actor critic, using an MLP (2 layers of 64)
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
super(MlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,
feature_extraction="mlp", **_kwargs)
class MlpLstmPolicy(LstmPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256, reuse=False, **_kwargs):
"""
Policy object that implements actor critic, using LSTMs with a MLP feature extraction
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for reccurent policies)
:param reuse: (bool) If the policy is reusable or not
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
super(MlpLstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse,
layer_norm=False, feature_extraction="mlp", **_kwargs)
class MlpLnLstmPolicy(LstmPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256, reuse=False, **_kwargs):
"""
Policy object that implements actor critic, using a layer normalized LSTMs with a MLP feature extraction
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for reccurent policies)
:param reuse: (bool) If the policy is reusable or not
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
super(MlpLnLstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse,
layer_norm=True, feature_extraction="mlp", **_kwargs)
|
jfsantos/stable-baselines | stable_baselines/ddpg/__init__.py | from stable_baselines.ddpg.ddpg import DDPG
|
jfsantos/stable-baselines | tests/test_identity.py | <filename>tests/test_identity.py
import pytest
from stable_baselines.a2c import A2C
from stable_baselines.acer import ACER
from stable_baselines.acktr import ACKTR
from stable_baselines.deepq import DeepQ
from stable_baselines.ppo1 import PPO1
from stable_baselines.ppo2 import PPO2
from stable_baselines.trpo_mpi import TRPO
from stable_baselines.common.identity_env import IdentityEnv
from stable_baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.deepq import models as deepq_models
learn_func_list = [
lambda e: A2C(policy=MlpPolicy, learning_rate=1e-3, n_steps=1,
gamma=0.7, env=e).learn(total_timesteps=10000, seed=0),
lambda e: ACER(policy=MlpPolicy, env=e,
n_steps=1, replay_ratio=1).learn(total_timesteps=10000, seed=0),
lambda e: ACKTR(policy=MlpPolicy, env=e, learning_rate=5e-4, n_steps=1).learn(total_timesteps=20000, seed=0),
lambda e: DeepQ(policy=deepq_models.mlp([16]), batch_size=16, gamma=0.1,
exploration_fraction=0.001, env=e).learn(total_timesteps=30000, seed=0),
lambda e: PPO1(policy=MlpPolicy, env=e, lam=0.7,
optim_batchsize=16, optim_stepsize=1e-3).learn(total_timesteps=10000, seed=0),
lambda e: PPO2(policy=MlpPolicy, env=e, learning_rate=1.5e-3,
lam=0.8).learn(total_timesteps=20000, seed=0),
lambda e: TRPO(policy=MlpPolicy, env=e, max_kl=0.05, lam=0.7).learn(total_timesteps=10000, seed=0),
]
@pytest.mark.slow
@pytest.mark.parametrize("learn_func", learn_func_list)
def test_identity(learn_func):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
:param learn_func: (lambda (Gym Environment): A2CPolicy) the policy generator
"""
env = DummyVecEnv([lambda: IdentityEnv(10)])
model = learn_func(env)
n_trials = 1000
reward_sum = 0
obs = env.reset()
for _ in range(n_trials):
action, _ = model.predict(obs)
obs, reward, _, _ = env.step(action)
reward_sum += reward
assert reward_sum > 0.9 * n_trials
# Free memory
del model, env
|
jfsantos/stable-baselines | tests/test_action_space.py | <reponame>jfsantos/stable-baselines
import pytest
from stable_baselines.a2c import A2C
from stable_baselines.ppo1 import PPO1
from stable_baselines.ppo2 import PPO2
from stable_baselines.trpo_mpi import TRPO
from stable_baselines.common.identity_env import IdentityEnvMultiBinary, IdentityEnvMultiDiscrete
from stable_baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from stable_baselines.common.policies import MlpPolicy
MODEL_FUNC_LIST = [
lambda e: A2C(policy=MlpPolicy, env=e),
lambda e: PPO1(policy=MlpPolicy, env=e),
lambda e: PPO2(policy=MlpPolicy, env=e),
lambda e: TRPO(policy=MlpPolicy, env=e),
]
@pytest.mark.slow
@pytest.mark.parametrize("model_func", MODEL_FUNC_LIST)
def test_identity_multidiscrete(model_func):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
with a multidiscrete action space
:param model_func: (lambda (Gym Environment): BaseRLModel) the model generator
"""
env = DummyVecEnv([lambda: IdentityEnvMultiDiscrete(10)])
model = model_func(env)
model.learn(total_timesteps=1000, seed=0)
n_trials = 1000
reward_sum = 0
obs = env.reset()
for _ in range(n_trials):
action, _ = model.predict(obs)
obs, reward, _, _ = env.step(action)
reward_sum += reward
@pytest.mark.slow
@pytest.mark.parametrize("model_func", MODEL_FUNC_LIST)
def test_identity_multibinary(model_func):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
with a multibinary action space
:param model_func: (lambda (Gym Environment): BaseRLModel) the model generator
"""
env = DummyVecEnv([lambda: IdentityEnvMultiBinary(10)])
model = model_func(env)
model.learn(total_timesteps=1000, seed=0)
n_trials = 1000
reward_sum = 0
obs = env.reset()
for _ in range(n_trials):
action, _ = model.predict(obs)
obs, reward, _, _ = env.step(action)
reward_sum += reward
|
jfsantos/stable-baselines | stable_baselines/common/base_class.py | <filename>stable_baselines/common/base_class.py
from abc import ABC, abstractmethod
import os
import cloudpickle
import numpy as np
import gym
from stable_baselines.common import set_global_seeds
from stable_baselines.common.policies import LstmPolicy
from stable_baselines.common.vec_env import VecEnvWrapper, VecEnv, DummyVecEnv
from stable_baselines import logger
class BaseRLModel(ABC):
def __init__(self, policy, env, requires_vec_env, verbose=0):
"""
The base RL model
:param policy: (Object) Policy object
:param env: (Gym environment) The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param requires_vec_env: (bool)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
"""
super(BaseRLModel, self).__init__()
self.policy = policy
self.env = env
self.verbose = verbose
self._requires_vec_env = requires_vec_env
self.observation_space = None
self.action_space = None
self.n_envs = None
self._vectorize_action = False
if env is not None:
if isinstance(env, str):
if self.verbose >= 1:
print("Creating environment from the given name, wrapped in a DummyVecEnv.")
self.env = env = DummyVecEnv([lambda: gym.make(env)])
self.observation_space = env.observation_space
self.action_space = env.action_space
if requires_vec_env:
if isinstance(env, VecEnv):
self.n_envs = env.num_envs
else:
raise ValueError("Error: the model requires a vectorized environment, please use a VecEnv wrapper.")
else:
if isinstance(env, VecEnv):
if env.num_envs == 1:
self.env = _UnvecWrapper(env)
self.n_envs = 1
self._vectorize_action = True
else:
raise ValueError("Error: the model requires a non vectorized environment or a single vectorized"
" environment.")
def get_env(self):
"""
returns the current environment (can be None if not defined)
:return: (Gym Environment) The current environment
"""
return self.env
def set_env(self, env):
"""
Checks the validity of the environment, and if it is coherent, set it as the current environment.
:param env: (Gym Environment) The environment for learning a policy
"""
if env is None and self.env is None:
if self.verbose >= 1:
print("Loading a model without an environment, "
"this model cannot be trained until it has a valid environment.")
return
elif env is None:
raise ValueError("Error: trying to replace the current environment with None")
# sanity checking the environment
assert self.observation_space == env.observation_space, \
"Error: the environment passed must have at least the same observation space as the model was trained on."
assert self.action_space == env.action_space, \
"Error: the environment passed must have at least the same action space as the model was trained on."
if self._requires_vec_env:
assert isinstance(env, VecEnv), \
"Error: the environment passed is not a vectorized environment, however {} requires it".format(
self.__class__.__name__)
assert not issubclass(self.policy, LstmPolicy) or self.n_envs == env.num_envs, \
"Error: the environment passed must have the same number of environments as the model was trained on." \
"This is due to the Lstm policy not being capable of changing the number of environments."
self.n_envs = env.num_envs
# for models that dont want vectorized environment, check if they make sense and adapt them.
# Otherwise tell the user about this issue-
if not self._requires_vec_env and isinstance(env, VecEnv):
if env.num_envs == 1:
env = _UnvecWrapper(env)
self.n_envs = 1
self._vectorize_action = True
else:
raise ValueError("Error: the model requires a non vectorized environment or a single vectorized "
"environment.")
else:
self._vectorize_action = False
self.env = env
@abstractmethod
def setup_model(self):
"""
Create all the functions and tensorflow graphs necessary to train the model
"""
pass
def _setup_learn(self, seed):
"""
check the environment, set the seed, and set the logger
:param seed: (int) the seed value
"""
if self.env is None:
raise ValueError("Error: cannot train the model without a valid environment, please set an environment with"
"set_env(self, env) method.")
if seed is not None:
set_global_seeds(seed)
@abstractmethod
def learn(self, total_timesteps, callback=None, seed=None, log_interval=100):
"""
Return a trained model.
:param total_timesteps: (int) The total number of samples to train on
:param seed: (int) The initial seed for training, if None: keep current seed
:param callback: (function (dict, dict)) function called at every steps with state of the algorithm.
It takes the local and global variables.
:param log_interval: (int) The number of timesteps before logging.
:return: (BaseRLModel) the trained model
"""
pass
@abstractmethod
def predict(self, observation, state=None, mask=None):
"""
Get the model's action from an observation
:param observation: (numpy Number) the input observation
:param state: (numpy Number) The last states (can be None, used in reccurent policies)
:param mask: (numpy Number) The last masks (can be None, used in reccurent policies)
:return: (numpy Number, numpy Number) the model's action and the next state (used in reccurent policies)
"""
pass
@abstractmethod
def action_probability(self, observation, state=None, mask=None):
"""
Get the model's action probability distribution from an observation
:param observation: (numpy Number) the input observation
:param state: (numpy Number) The last states (can be None, used in reccurent policies)
:param mask: (numpy Number) The last masks (can be None, used in reccurent policies)
:return: (numpy Number) the model's action probability distribution
"""
pass
@abstractmethod
def save(self, save_path):
"""
Save the current parameters to file
:param save_path: (str) the save location
"""
# self._save_to_file(save_path, data={}, params=None)
raise NotImplementedError()
@classmethod
@abstractmethod
def load(cls, load_path, env=None, **kwargs):
"""
Load the model from file
:param load_path: (str) the saved parameter location
:param env: (Gym Envrionment) the new environment to run the loaded model on
(can be None if you only need prediction from a trained model)
:param kwargs: extra arguments to change the model when loading
"""
# data, param = cls._load_from_file(load_path)
raise NotImplementedError()
@staticmethod
def _save_to_file(save_path, data=None, params=None):
_, ext = os.path.splitext(save_path)
if ext == "":
save_path += ".pkl"
with open(save_path, "wb") as file:
cloudpickle.dump((data, params), file)
@staticmethod
def _load_from_file(load_path):
if not os.path.exists(load_path):
if os.path.exists(load_path + ".pkl"):
load_path += ".pkl"
else:
raise ValueError("Error: the file {} could not be found".format(load_path))
with open(load_path, "rb") as file:
data, params = cloudpickle.load(file)
return data, params
@staticmethod
def _softmax(x_input):
"""
An implementation of softmax.
:param x_input: (numpy float) input vector
:return: (numpy float) output vector
"""
x_exp = np.exp(x_input.T - np.max(x_input.T, axis=0))
return (x_exp / x_exp.sum(axis=0)).T
class _UnvecWrapper(VecEnvWrapper):
def __init__(self, venv):
"""
Unvectorize a vectorized environment, for vectorized environment that only have one environment
:param venv: (VecEnv) the vectorized environment to wrap
"""
super().__init__(venv)
assert venv.num_envs == 1, "Error: cannot unwrap a environment wrapper that has more than one environment."
def reset(self):
return self.venv.reset()[0]
def step_async(self, actions):
self.venv.step_async([actions])
def step_wait(self):
actions, values, states, information = self.venv.step_wait()
return actions[0], values[0], states[0], information[0]
def render(self, mode='human'):
return self.venv.render(mode)[0]
class SetVerbosity:
def __init__(self, verbose=0):
"""
define a region of code for certain level of verbosity
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
"""
self.verbose = verbose
def __enter__(self):
self.tf_level = os.environ.get('TF_CPP_MIN_LOG_LEVEL', '0')
self.log_level = logger.get_level()
self.gym_level = gym.logger.MIN_LEVEL
if self.verbose <= 1:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if self.verbose <= 0:
logger.set_level(logger.DISABLED)
gym.logger.set_level(gym.logger.DISABLED)
def __exit__(self, exc_type, exc_val, exc_tb):
if self.verbose <= 1:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = self.tf_level
if self.verbose <= 0:
logger.set_level(self.log_level)
gym.logger.set_level(self.gym_level)
|
jfsantos/stable-baselines | stable_baselines/common/__init__.py | # flake8: noqa F403
from stable_baselines.common.console_util import fmt_row, fmt_item, colorize
from stable_baselines.common.dataset import Dataset
from stable_baselines.common.math_util import discount, discount_with_boundaries, explained_variance, explained_variance_2d,\
flatten_arrays, unflatten_vector
from stable_baselines.common.misc_util import zipsame, unpack, EzPickle, set_global_seeds, pretty_eta, RunningAvg,\
boolean_flag, get_wrapper_by_name, relatively_safe_pickle_dump, pickle_load
from stable_baselines.common.base_class import BaseRLModel, SetVerbosity
|
jfsantos/stable-baselines | stable_baselines/ppo1/pposgd_simple.py | <gh_stars>0
from collections import deque
import time
import tensorflow as tf
import numpy as np
from mpi4py import MPI
from stable_baselines.common import Dataset, explained_variance, fmt_row, zipsame, BaseRLModel, SetVerbosity
from stable_baselines import logger
import stable_baselines.common.tf_util as tf_util
from stable_baselines.common.policies import LstmPolicy
from stable_baselines.common.mpi_adam import MpiAdam
from stable_baselines.common.mpi_moments import mpi_moments
from stable_baselines.trpo_mpi.utils import traj_segment_generator, add_vtarg_and_adv, flatten_lists
class PPO1(BaseRLModel):
def __init__(self, policy, env, gamma=0.99, timesteps_per_actorbatch=256, clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64, lam=0.95, adam_epsilon=1e-5,
schedule='linear', verbose=0, _init_setup_model=True):
"""
Learning PPO with Stochastic Gradient Descent
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param policy: (function (str, Gym Spaces, Gym Spaces): TensorFlow Tensor) creates the policy
:param timesteps_per_actorbatch: (int) timesteps per actor per update
:param clip_param: (float) clipping parameter epsilon
:param entcoeff: (float) the entropy loss weight
:param optim_epochs: (float) the optimizer's number of epochs
:param optim_stepsize: (float) the optimizer's stepsize
:param optim_batchsize: (int) the optimizer's the batch size
:param gamma: (float) discount factor
:param lam: (float) advantage estimation
:param adam_epsilon: (float) the epsilon value for the adam optimizer
:param schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',
'double_linear_con', 'middle_drop' or 'double_middle_drop')
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
"""
super().__init__(policy=policy, env=env, requires_vec_env=False, verbose=verbose)
self.gamma = gamma
self.timesteps_per_actorbatch = timesteps_per_actorbatch
self.clip_param = clip_param
self.entcoeff = entcoeff
self.optim_epochs = optim_epochs
self.optim_stepsize = optim_stepsize
self.optim_batchsize = optim_batchsize
self.lam = lam
self.adam_epsilon = adam_epsilon
self.schedule = schedule
self.graph = None
self.sess = None
self.policy_pi = None
self.loss_names = None
self.lossandgrad = None
self.adam = None
self.assign_old_eq_new = None
self.compute_losses = None
self.params = None
self.step = None
self.proba_step = None
self.initial_state = None
if _init_setup_model:
self.setup_model()
def setup_model(self):
with SetVerbosity(self.verbose):
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf_util.single_threaded_session(graph=self.graph)
# Construct network for new policy
with tf.variable_scope("pi", reuse=False):
self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False)
# Network for old policy
with tf.variable_scope("oldpi", reuse=False):
old_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False)
# Target advantage function (if applicable)
atarg = tf.placeholder(dtype=tf.float32, shape=[None])
# Empirical return
ret = tf.placeholder(dtype=tf.float32, shape=[None])
# learning rate multiplier, updated with schedule
lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[])
# Annealed cliping parameter epislon
clip_param = self.clip_param * lrmult
obs_ph = self.policy_pi.obs_ph
action_ph = self.policy_pi.pdtype.sample_placeholder([None])
kloldnew = old_pi.proba_distribution.kl(self.policy_pi.proba_distribution)
ent = self.policy_pi.proba_distribution.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
pol_entpen = (-self.entcoeff) * meanent
# pnew / pold
ratio = tf.exp(self.policy_pi.proba_distribution.logp(action_ph) -
old_pi.proba_distribution.logp(action_ph))
# surrogate from conservative policy iteration
surr1 = ratio * atarg
surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg
# PPO's pessimistic surrogate (L^CLIP)
pol_surr = - tf.reduce_mean(tf.minimum(surr1, surr2))
vf_loss = tf.reduce_mean(tf.square(self.policy_pi.value_fn[:, 0] - ret))
total_loss = pol_surr + pol_entpen + vf_loss
losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]
self.loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
self.params = tf_util.get_trainable_vars("pi")
self.lossandgrad = tf_util.function([obs_ph, old_pi.obs_ph, action_ph, atarg, ret, lrmult],
losses + [tf_util.flatgrad(total_loss, self.params)])
self.adam = MpiAdam(self.params, epsilon=self.adam_epsilon, sess=self.sess)
self.assign_old_eq_new = tf_util.function(
[], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in
zipsame(tf_util.get_globals_vars("oldpi"), tf_util.get_globals_vars("pi"))])
self.compute_losses = tf_util.function([obs_ph, old_pi.obs_ph, action_ph, atarg, ret, lrmult], losses)
self.step = self.policy_pi.step
self.proba_step = self.policy_pi.proba_step
self.initial_state = self.policy_pi.initial_state
tf_util.initialize(sess=self.sess)
def learn(self, total_timesteps, callback=None, seed=None, log_interval=100):
with SetVerbosity(self.verbose):
self._setup_learn(seed)
with self.sess.as_default():
self.adam.sync()
# Prepare for rollouts
seg_gen = traj_segment_generator(self.policy_pi, self.env, self.timesteps_per_actorbatch)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
t_start = time.time()
# rolling buffer for episode lengths
lenbuffer = deque(maxlen=100)
# rolling buffer for episode rewards
rewbuffer = deque(maxlen=100)
while True:
if callback:
callback(locals(), globals())
if total_timesteps and timesteps_so_far >= total_timesteps:
break
if self.schedule == 'constant':
cur_lrmult = 1.0
elif self.schedule == 'linear':
cur_lrmult = max(1.0 - float(timesteps_so_far) / total_timesteps, 0)
else:
raise NotImplementedError
logger.log("********** Iteration %i ************" % iters_so_far)
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, self.gamma, self.lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
obs_ph, action_ph, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
# predicted value function before udpate
vpredbefore = seg["vpred"]
# standardized advantage function estimate
atarg = (atarg - atarg.mean()) / atarg.std()
dataset = Dataset(dict(ob=obs_ph, ac=action_ph, atarg=atarg, vtarg=tdlamret),
shuffle=not issubclass(self.policy, LstmPolicy))
optim_batchsize = self.optim_batchsize or obs_ph.shape[0]
# set old parameter values to new parameter values
self.assign_old_eq_new(sess=self.sess)
logger.log("Optimizing...")
logger.log(fmt_row(13, self.loss_names))
# Here we do a bunch of optimization epochs over the data
for _ in range(self.optim_epochs):
# list of tuples, each of which gives the loss for a minibatch
losses = []
for batch in dataset.iterate_once(optim_batchsize):
*newlosses, grad = self.lossandgrad(batch["ob"], batch["ob"], batch["ac"], batch["atarg"],
batch["vtarg"], cur_lrmult, sess=self.sess)
self.adam.update(grad, self.optim_stepsize * cur_lrmult)
losses.append(newlosses)
logger.log(fmt_row(13, np.mean(losses, axis=0)))
logger.log("Evaluating losses...")
losses = []
for batch in dataset.iterate_once(optim_batchsize):
newlosses = self.compute_losses(batch["ob"], batch["ob"], batch["ac"], batch["atarg"],
batch["vtarg"], cur_lrmult, sess=self.sess)
losses.append(newlosses)
mean_losses, _, _ = mpi_moments(losses, axis=0)
logger.log(fmt_row(13, mean_losses))
for (loss_val, name) in zipsame(mean_losses, self.loss_names):
logger.record_tabular("loss_" + name, loss_val)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
# local values
lrlocal = (seg["ep_lens"], seg["ep_rets"])
# list of tuples
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)
lens, rews = map(flatten_lists, zip(*listoflrpairs))
lenbuffer.extend(lens)
rewbuffer.extend(rews)
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
timesteps_so_far += seg["total_timestep"]
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - t_start)
if self.verbose >= 1 and MPI.COMM_WORLD.Get_rank() == 0:
logger.dump_tabular()
return self
def predict(self, observation, state=None, mask=None):
if state is None:
state = self.initial_state
if mask is None:
mask = [False for _ in range(self.n_envs)]
observation = np.array(observation).reshape((-1,) + self.observation_space.shape)
actions, _, states, _ = self.step(observation, state, mask)
return actions, states
def action_probability(self, observation, state=None, mask=None):
if state is None:
state = self.initial_state
if mask is None:
mask = [False for _ in range(self.n_envs)]
observation = np.array(observation).reshape((-1,) + self.observation_space.shape)
return self.proba_step(observation, state, mask)
def save(self, save_path):
data = {
"gamma": self.gamma,
"timesteps_per_actorbatch": self.timesteps_per_actorbatch,
"clip_param": self.clip_param,
"entcoeff": self.entcoeff,
"optim_epochs": self.optim_epochs,
"optim_stepsize": self.optim_stepsize,
"optim_batchsize": self.optim_batchsize,
"lam": self.lam,
"adam_epsilon": self.adam_epsilon,
"schedule": self.schedule,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"_vectorize_action": self._vectorize_action
}
params = self.sess.run(self.params)
self._save_to_file(save_path, data=data, params=params)
@classmethod
def load(cls, load_path, env=None, **kwargs):
data, params = cls._load_from_file(load_path)
model = cls(None, env=None, _init_setup_model=False)
model.__dict__.update(data)
model.__dict__.update(kwargs)
model.set_env(env)
model.setup_model()
restores = []
for param, loaded_p in zip(model.params, params):
restores.append(param.assign(loaded_p))
model.sess.run(restores)
return model
|
jfsantos/stable-baselines | stable_baselines/common/identity_env.py | <filename>stable_baselines/common/identity_env.py
import numpy as np
from gym import Env
from gym.spaces import Discrete, MultiDiscrete, MultiBinary, Box
class IdentityEnv(Env):
def __init__(self, dim, ep_length=100):
"""
Identity environment for testing purposes
:param dim: (int) the size of the dimensions you want to learn
:param ep_length: (int) the length of each episodes in timesteps
"""
self.action_space = Discrete(dim)
self.ep_length = ep_length
self.current_step = 0
self.reset()
def reset(self):
self.current_step = 0
self._choose_next_state()
self.observation_space = self.action_space
return self.state
def step(self, action):
reward = self._get_reward(action)
self._choose_next_state()
self.current_step += 1
done = self.current_step >= self.ep_length
return self.state, reward, done, {}
def _choose_next_state(self):
self.state = self.action_space.sample()
def _get_reward(self, action):
return 1 if self.state == action else 0
def render(self, mode='human'):
pass
class IdentityEnvMultiDiscrete(Env):
def __init__(self, dim, ep_length=100):
"""
Identity environment for testing purposes
:param dim: (int) the size of the dimensions you want to learn
:param ep_length: (int) the length of each episodes in timesteps
"""
self.action_space = MultiDiscrete([dim, dim])
self.dim = dim
self.observation_space = Box(low=0, high=1, shape=(dim * 2,), dtype=int)
self.ep_length = ep_length
self.reset()
def reset(self):
self._choose_next_state()
return self.state
def step(self, action):
reward = self._get_reward(action)
self._choose_next_state()
return self.state, reward, False, {}
def _choose_next_state(self):
state = np.zeros(self.dim*2, dtype=int)
mask = self.action_space.sample()
state[mask[0]] = 1
state[mask[1] + self.dim] = 1
self.state = state
def _get_reward(self, action):
return 1 if np.all(self.state == action) else 0
def render(self, mode='human'):
pass
class IdentityEnvMultiBinary(Env):
def __init__(self, dim, ep_length=100):
"""
Identity environment for testing purposes
:param dim: (int) the size of the dimensions you want to learn
:param ep_length: (int) the length of each episodes in timesteps
"""
self.action_space = MultiBinary(dim)
self.observation_space = Box(low=0, high=1, shape=(dim,), dtype=int)
self.ep_length = ep_length
self.reset()
def reset(self):
self._choose_next_state()
return self.state
def step(self, action):
reward = self._get_reward(action)
self._choose_next_state()
return self.state, reward, False, {}
def _choose_next_state(self):
self.state = self.action_space.sample()
def _get_reward(self, action):
return 1 if np.all(self.state == action) else 0
def render(self, mode='human'):
pass
|
jfsantos/stable-baselines | tests/test_save.py | import os
import pytest
from stable_baselines.a2c import A2C
from stable_baselines.acer import ACER
from stable_baselines.acktr import ACKTR
from stable_baselines.deepq import DeepQ
from stable_baselines.ppo1 import PPO1
from stable_baselines.ppo2 import PPO2
from stable_baselines.trpo_mpi import TRPO
from stable_baselines.common import set_global_seeds
from stable_baselines.common.identity_env import IdentityEnv
from stable_baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.deepq import models as deepq_models
N_TRIALS = 2000
MODEL_POLICY_LIST = [
(A2C, MlpPolicy),
(ACER, MlpPolicy),
(ACKTR, MlpPolicy),
(DeepQ, deepq_models.mlp([32])),
(PPO1, MlpPolicy),
(PPO2, MlpPolicy),
(TRPO, MlpPolicy)
]
@pytest.mark.slow
@pytest.mark.parametrize("model_policy", MODEL_POLICY_LIST)
def test_model_manipulation(model_policy):
"""
Test if the algorithm (with a given policy) can be loaded and saved without any issues, the environment switching
works and that the action prediction works
:param model_policy: (BaseRLModel, Object) A model, policy pair
"""
model_class, policy = model_policy
try:
env = DummyVecEnv([lambda: IdentityEnv(10)])
# check the env is deterministic
action = [env.action_space.sample()]
set_global_seeds(0)
obs = env.step(action)[0]
for _ in range(N_TRIALS):
set_global_seeds(0)
assert obs == env.step(action)[0], "Error: environment tested not deterministic with the same seed"
# create and train
model = model_class(policy=policy, env=env)
model.learn(total_timesteps=50000)
# predict and measure the acc reward
acc_reward = 0
obs = env.reset()
set_global_seeds(0)
for _ in range(N_TRIALS):
action, _ = model.predict(obs)
obs, reward, _, _ = env.step(action)
acc_reward += reward
acc_reward = sum(acc_reward) / N_TRIALS
# saving
model.save("./test_model")
del model, env
# loading
model = model_class.load("./test_model")
# changing environment (note: this can be done at loading)
env = DummyVecEnv([lambda: IdentityEnv(10)])
model.set_env(env)
# predict the same output before saving
loaded_acc_reward = 0
obs = env.reset()
set_global_seeds(0)
for _ in range(N_TRIALS):
action, _ = model.predict(obs)
obs, reward, _, _ = env.step(action)
loaded_acc_reward += reward
loaded_acc_reward = sum(loaded_acc_reward) / N_TRIALS
assert abs(acc_reward - loaded_acc_reward) < 0.1, "Error: the prediction seems to have changed between " \
"loading and saving"
# learn post loading
model.learn(total_timesteps=1000)
# validate no reset post learning
loaded_acc_reward = 0
obs = env.reset()
set_global_seeds(0)
for _ in range(N_TRIALS):
action, _ = model.predict(obs)
obs, reward, _, _ = env.step(action)
loaded_acc_reward += reward
loaded_acc_reward = sum(loaded_acc_reward) / N_TRIALS
assert abs(acc_reward - loaded_acc_reward) < 0.1, "Error: the prediction seems to have changed between " \
"pre learning and post learning"
# predict new values
obs = env.reset()
for _ in range(N_TRIALS):
action, _ = model.predict(obs)
obs, _, _, _ = env.step(action)
del model, env
finally:
if os.path.exists("./test_model"):
os.remove("./test_model")
|
jfsantos/stable-baselines | stable_baselines/common/running_stat.py | import numpy as np
class RunningStat(object):
def __init__(self, shape):
"""
calulates the running mean and std of a data stream
http://www.johndcook.com/blog/standard_deviation/
:param shape: (tuple) the shape of the data stream's output
"""
self._step = 0
self._mean = np.zeros(shape)
self._std = np.zeros(shape)
def push(self, value):
"""
update the running mean and std
:param value: (numpy Number) the data
"""
value = np.asarray(value)
assert value.shape == self._mean.shape
self._step += 1
if self._step == 1:
self._mean[...] = value
else:
old_m = self._mean.copy()
self._mean[...] = old_m + (value - old_m) / self._step
self._std[...] = self._std + (value - old_m) * (value - self._mean)
@property
def n(self):
"""
the number of data points
:return: (int)
"""
return self._step
@property
def mean(self):
"""
the average value
:return: (float)
"""
return self._mean
@property
def var(self):
"""
the variation of the data points
:return: (float)
"""
return self._std / (self._step - 1) if self._step > 1 else np.square(self._mean)
@property
def std(self):
"""
the standard deviation of the data points
:return: (float)
"""
return np.sqrt(self.var)
@property
def shape(self):
"""
the shape of the data points
:return: (tuple)
"""
return self._mean.shape
|
LanceSuen/WundergroundHistoryDataParser | crawl.py | <gh_stars>1-10
# Author : LanceSuen
import time
import csv
import sys
from datetime import date, timedelta
from selenium import webdriver
from selenium.webdriver import Chrome
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.proxy import Proxy, ProxyType
# Please change the configuration
PATH_TO_UBLOCK = r'C:\\Users\\Admin\\Desktop\\Crawler\\uBlock'
PATH_TO_WEBDRIVER = './chromedriver.exe'
LOCATION_CODE = 'ZSPD'
def requestAndSave(date,driver):
url = 'https://www.wunderground.com/history/daily/' + LOCATION_CODE + '/date/' + date
driver.get(url)
print(url)
time.sleep(7)
tablelist = driver.find_elements_by_xpath('//*[@id="inner-content"]/div[2]/div[1]/div[5]/div[1]/div/lib-city-history-observation/div/div[2]/table')
table = tablelist[0]
with open(date[:-6]+'.csv', 'a', newline='') as csvfile:
wr = csv.writer(csvfile)
for row in table.find_elements_by_css_selector('tr')[1:]:
temp = [d.text for d in row.find_elements_by_css_selector('td')]
temp.append(date)
#print(temp)
wr.writerow(temp)
def main():
# Options
chrome_options = webdriver.ChromeOptions()
#chrome_options.add_argument("--proxy-server=socks5://127.0.0.1:10808")
chrome_options.add_argument('load-extension=' + PATH_TO_UBLOCK)
#chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
driver = Chrome(PATH_TO_WEBDRIVER,chrome_options=chrome_options)
#driver = Chrome()
y1 = int(sys.argv[1])
y2 = int(sys.argv[2])
d1 = date(y1, 1, 1)
d2 = date(y2, 12, 31)
delta = d2 - d1
for i in range(delta.days + 1):
print('Crawling ' + str(d1 + timedelta(days=i)))
try:
requestAndSave(str(d1 + timedelta(days=i)), driver)
except:
print('No DATA on ' + str(d1 + timedelta(days=i)))
if __name__ == "__main__":
main()
|
davidpelaez/oso | languages/python/oso/tests/test_polar_roles.py | import pytest
import timeit
import os
from sqlalchemy import create_engine
from sqlalchemy.types import String
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from oso import Oso, OsoError
from polar.exceptions import RolesValidationError
from .polar_roles_sqlalchemy_helpers import (
resource_role_class,
assign_role,
remove_role,
)
Base = declarative_base(name="RoleBase")
class Org(Base): # type: ignore
__tablename__ = "orgs"
name = Column(String(), primary_key=True)
def __repr__(self):
return f"Org({self.name})"
class User(Base): # type: ignore
__tablename__ = "users"
name = Column(String(), primary_key=True)
def __repr__(self):
return f"User({self.name})"
class Repo(Base): # type: ignore
__tablename__ = "repos"
name = Column(String(256), primary_key=True)
org_name = Column(String, ForeignKey("orgs.name"))
org = relationship("Org", backref="repos", lazy=True) # type: ignore
def __repr__(self):
return f"Repo({self.name}) <- {self.org}"
class Issue(Base): # type: ignore
__tablename__ = "issues"
name = Column(String(256), primary_key=True)
repo_name = Column(String(256), ForeignKey("repos.name"))
repo = relationship("Repo", backref="issues", lazy=True) # type: ignore
def __repr__(self):
return f"Issue({self.name}) <- {self.repo}"
RepoRoleMixin = resource_role_class(User, Repo, ["reader", "writer"])
class RepoRole(Base, RepoRoleMixin): # type: ignore
pass
OrgRoleMixin = resource_role_class(User, Org, ["owner", "member"])
class OrgRole(Base, OrgRoleMixin): # type: ignore
pass
@pytest.fixture
def init_oso():
engine = create_engine("sqlite://")
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
oso = Oso()
for m in Base.registry.mappers:
oso.register_class(m.class_)
return (oso, session)
@pytest.fixture
def sample_data(init_oso):
_, session = init_oso
apple = Org(name="apple")
osohq = Org(name="osohq")
ios = Repo(name="ios", org=apple)
oso_repo = Repo(name="oso", org=osohq)
demo_repo = Repo(name="demo", org=osohq)
ios_laggy = Issue(name="laggy", repo=ios)
oso_bug = Issue(name="bug", repo=oso_repo)
leina = User(name="leina")
steve = User(name="steve")
gabe = User(name="gabe")
objs = {
"leina": leina,
"steve": steve,
"gabe": gabe,
"apple": apple,
"osohq": osohq,
"ios": ios,
"oso_repo": oso_repo,
"demo_repo": demo_repo,
"ios_laggy": ios_laggy,
"oso_bug": oso_bug,
}
for obj in objs.values():
session.add(obj)
session.commit()
return objs
def test_empty_role(init_oso):
# defining role with no permissions/implications throws an error
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {}
};
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError) as e:
oso.enable_roles()
assert e.match("Must define actions or implications for a role.")
@pytest.mark.skip(reason="TODO: More validation")
def test_bad_namespace_perm(init_oso):
# - assigning permission with bad namespace throws an error
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
permissions: ["repo:pull"]
}
};
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
def test_resource_with_roles_no_actions(init_oso, sample_data):
# - only define roles, no actions (role has actions/implications from different resource)
oso, session = init_oso
policy = """
resource(_type: Org, "org", _, roles) if
roles = {
member: {
implies: ["repo:reader"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
leina = sample_data["leina"]
steve = sample_data["steve"]
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
assign_role(leina, osohq, "member", session)
assign_role(steve, oso_repo, "reader", session)
session.commit()
assert oso.is_allowed(leina, "pull", oso_repo)
assert oso.is_allowed(steve, "pull", oso_repo)
def test_duplicate_resource_name(init_oso):
# - duplicate resource name throws an error
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
# DUPLICATE RESOURCE NAME "org"
resource(_type: Repo, "org", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
}
};
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError) as e:
oso.enable_roles()
assert e.match("Duplicate resource name org.")
# TODO(gj): Test that this is fine in Oso Roles.
@pytest.mark.skip("TODO: relationship validation")
def test_nested_dot_relationship(init_oso):
# - multiple dot lookups throws an error for now
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Issue, "issue", actions, roles) if
actions = [
"edit"
];
parent_child(parent_org, issue: Issue) if
issue.repo.org = parent_org;
allow(actor, action, resource) if
role_allows(actor, action, resource);
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
@pytest.mark.skip("TODO: relationship validation")
def test_bad_relationship_lookup(init_oso):
# - nonexistent attribute lookup throws an error for now
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Repo, "repo", actions, {}) if
actions = [
"pull"
];
parent_child(parent_org: Org, repo: Repo) if
# INCORRECT FIELD NAME
repo.organization = parent_org;
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
@pytest.mark.skip("TODO: validation")
def test_relationship_without_specializer(init_oso):
oso, _ = init_oso
policy = """
resource(_type: Repo, "repo", actions, {}) if
actions = [
"pull"
];
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
def test_relationship_without_resources(init_oso):
oso, _ = init_oso
policy = """
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError) as e:
oso.enable_roles()
assert e.match(
r"Need to define at least one `resource\(type, name, actions, roles\)` predicate to use Oso Roles"
)
def test_role_namespaces(init_oso, sample_data):
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = [
"invite", "create_repo"
] and
roles = {
owner: {
permissions: ["invite"],
implies: ["member", "repo:reader"]
},
member: {
permissions: ["create_repo"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
leina = sample_data["leina"]
steve = sample_data["steve"]
gabe = sample_data["gabe"]
assign_role(leina, osohq, "owner", session)
assign_role(steve, oso_repo, "reader", session)
assign_role(gabe, osohq, "member", session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "pull", oso_repo)
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "pull", oso_repo)
assert not oso.is_allowed(gabe, "invite", osohq)
assert oso.is_allowed(gabe, "create_repo", osohq)
assert not oso.is_allowed(gabe, "pull", oso_repo)
def test_resource_actions(init_oso):
# only define actions, not roles
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, {}) if
actions = [
"invite"
];
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
oso.enable_roles()
def test_duplicate_action(init_oso):
# - duplicate action
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, _roles) if
actions = [
"invite",
"invite"
];
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError) as e:
oso.enable_roles()
assert e.match("Duplicate action invite for Org.")
@pytest.mark.skip(reason="TODO: More validation")
def test_undeclared_permission(init_oso):
# - assign permission that wasn't declared
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
permissions: ["create_repo"]
}
};
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
@pytest.mark.skip(reason="TODO: More validation")
def test_undeclared_role(init_oso):
# - imply role that wasn't declared
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
implies: ["fake_role"]
}
};
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
@pytest.mark.skip("TODO: relationship validation")
def test_role_implication_without_relationship(init_oso):
# - imply role without valid relationship
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
implies: ["repo:reader"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
}
};
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
@pytest.mark.skip("TODO: relationship validation")
def test_role_permission_without_relationship(init_oso):
# - assign permission without valid relationship
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
permissions: ["repo:push"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = [
"push",
"pull"
];
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
@pytest.mark.skip(reason="TODO: More validation")
def test_invalid_role_permission(init_oso):
# assigning permission on related role type errors if role exists for permission resource
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
# THIS IS NOT ALLOWED
permissions: ["repo:push"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["push"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
@pytest.mark.skip(reason="TODO: More validation")
def test_permission_assignment_to_implied_role(init_oso):
# assigning the same permission to two roles where one implies the other throws an error
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
permissions: ["invite"]
},
owner: {
permissions: ["invite"],
implies: ["org:member"]
}
};
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
def test_incorrect_arity_resource(init_oso):
# - use resource predicate with incorrect arity
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions) if
actions = [
"invite"
];
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError) as e:
oso.enable_roles()
assert e.match(
r"Need to define at least one `resource\(type, name, actions, roles\)` predicate to use Oso Roles."
)
# TODO(gj): should we try catching this?
@pytest.mark.skip(reason="TODO: More validation")
def test_incorrect_arity_resource_multiple(init_oso):
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions) if actions = ["invite"];
resource(_type: Repo, "repo", actions, {}) if actions = ["invite"];
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.enable_roles()
def test_undefined_resource_arguments(init_oso):
# - use resource predicate without defining actions/roles
oso, _ = init_oso
policy = """
resource(_type: Org, "org", _actions, _roles);
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError) as e:
oso.enable_roles()
e.match("Must define actions or roles.")
def test_wrong_type_resource_arguments(init_oso):
# - use resource predicate with field types
oso, _ = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
# incorrect key name
actions: ["invite"]
}
};
actor_has_role_for_resource(_, _, _);
"""
oso.load_str(policy)
with pytest.raises(OsoError) as e:
oso.enable_roles()
assert e.match("Role definition contains invalid key: actions")
# Overlapping role assignments:
def test_overlapping_permissions(init_oso, sample_data):
# - Assigning a more permissive and less permissive role to the same user grants most permissive access
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
},
writer: {
# writer is more permissive than reader
permissions: ["push"],
implies: ["reader"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
leina = sample_data["leina"]
steve = sample_data["steve"]
# writer is more permissive than member
assign_role(leina, osohq, "member", session=session)
assign_role(steve, osohq, "member", session=session)
assign_role(leina, oso_repo, "writer", session=session)
assert oso.is_allowed(leina, "pull", oso_repo)
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "push", oso_repo)
assert oso.is_allowed(steve, "pull", oso_repo)
assert oso.is_allowed(steve, "invite", osohq)
assert not oso.is_allowed(steve, "push", oso_repo)
# Homogeneous role-permission assignment:
def test_homogeneous_role_perm(init_oso, sample_data):
# - Adding a permission of same resource type to a role grants assignee access
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(leina, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert not oso.is_allowed(steve, "invite", osohq)
# - Removing a permission of same resource type from a role revokes assignee access
new_policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite", "list_repos"] and
roles = {
member: {
# REMOVE INVITE AND ADD LIST_REPOS
permissions: ["list_repos"]
}
};
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.load_str(new_policy)
oso.enable_roles()
assert not oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "list_repos", osohq)
assert not oso.is_allowed(steve, "list_repos", osohq)
# Parent->child role-permission assignment:
def test_parent_child_role_perm(init_oso, sample_data):
# - Adding a permission of child resource type to a role grants assignee access
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite", "repo:pull"]
}
};
resource(_type: Repo, "repo", actions, {}) if
actions = [
"push",
"pull"
];
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
ios = sample_data["ios"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(leina, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "pull", oso_repo)
assert not oso.is_allowed(leina, "pull", ios)
assert not oso.is_allowed(steve, "pull", oso_repo)
# - Removing a permission of child resource type from a role revokes assignee access
new_policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Repo, "repo", actions, {}) if
actions = [
"push",
"pull"
];
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.load_str(new_policy)
oso.enable_roles()
assert not oso.is_allowed(leina, "pull", oso_repo)
assert oso.is_allowed(leina, "invite", osohq)
# Grandparent->child role-permission assignment:
def test_grandparent_child_role_perm(init_oso, sample_data):
# - Adding a permission of grandchild resource type to a role grants assignee access (without intermediate resource)
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["list_repos", "invite"] and
roles = {
member: {
permissions: ["list_repos", "issue:edit"]
},
owner: {
permissions: ["invite"],
implies: ["member"]
}
};
resource(_type: Issue, "issue", actions, {}) if
actions = [
"edit"
];
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
parent_child(parent_repo: Repo, issue: Issue) if
issue.repo = parent_repo;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
oso_bug = sample_data["oso_bug"]
ios_laggy = sample_data["ios_laggy"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(leina, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "list_repos", osohq)
assert oso.is_allowed(leina, "edit", oso_bug)
assert not oso.is_allowed(leina, "edit", ios_laggy)
assert not oso.is_allowed(leina, "invite", osohq)
assert not oso.is_allowed(steve, "edit", oso_bug)
assign_role(steve, osohq, "owner", session=session)
session.commit()
assert oso.is_allowed(steve, "edit", oso_bug)
assert oso.is_allowed(steve, "list_repos", osohq)
assert oso.is_allowed(steve, "invite", osohq)
# - Removing a permission of grandchild resource type from a role revokes assignee access
new_policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Issue, "issue", actions, {}) if
actions = [
"edit"
];
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
parent_child(parent_repo: Repo, issue: Issue) if
issue.repo = parent_repo;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.load_str(new_policy)
oso.enable_roles()
assert not oso.is_allowed(leina, "edit", oso_bug)
assert oso.is_allowed(leina, "invite", osohq)
# Homogeneous role implications:
def test_homogeneous_role_implication(init_oso, sample_data):
# - Adding a role implication of same resource type to a role grants assignee access
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
},
owner: {
implies: ["member"]
}
};
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assert not oso.is_allowed(leina, "invite", osohq)
assign_role(leina, osohq, "member", session=session)
assign_role(steve, osohq, "owner", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert not oso.is_allowed(leina, "invite", apple)
assert oso.is_allowed(steve, "invite", osohq)
assert not oso.is_allowed(steve, "invite", apple)
# - Removing a role implication of same resource type from a role revokes assignee access
new_policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite", "list_repos"] and
roles = {
member: {
permissions: ["invite"]
},
owner: {
# REMOVE "implies"
permissions: ["list_repos"]
}
};
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.load_str(new_policy)
oso.enable_roles()
# leina can still "invite"
assert oso.is_allowed(leina, "invite", osohq)
# steve can't "invite"
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "list_repos", osohq)
# Parent->child role implications:
def test_parent_child_role_implication(init_oso, sample_data):
# - Adding a role implication of child resource type to a role grants assignee access to child
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
ios = sample_data["ios"]
leina = sample_data["leina"]
steve = sample_data["steve"]
# member implies reader which has the "pull" permission
assign_role(leina, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "pull", oso_repo)
assert not oso.is_allowed(leina, "pull", ios)
assert not oso.is_allowed(steve, "pull", oso_repo)
# - Removing a role implication of child resource type from a role revokes assignee access to child
new_policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Repo, "repo", actions, {}) if
actions = [
"push",
"pull"
];
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.load_str(new_policy)
oso.enable_roles()
assert not oso.is_allowed(leina, "pull", oso_repo)
assert oso.is_allowed(leina, "invite", osohq)
# Grandparent->child role implications:
def test_grandparent_child_role_implication(init_oso, sample_data):
# - Adding a role implication of grandchild resource type to a role grants assignee access to grandchild
# without intermediate parent resource
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["issue:editor"]
}
};
resource(_type: Issue, "issue", actions, roles) if
actions = [
"edit"
] and
roles = {
editor: {
permissions: ["edit"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
parent_child(parent_repo: Repo, issue: Issue) if
issue.repo = parent_repo;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
oso_bug = sample_data["oso_bug"]
ios_laggy = sample_data["ios_laggy"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(leina, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "edit", oso_bug)
assert not oso.is_allowed(leina, "edit", ios_laggy)
assert not oso.is_allowed(steve, "edit", oso_bug)
# - Removing a permission of grandchild resource type from a role revokes assignee access
new_policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Issue, "issue", actions, roles) if
actions = [
"edit"
] and
roles = {
editor: {
permissions: ["edit"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
parent_child(parent_repo: Repo, issue: Issue) if
issue.repo = parent_repo;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.load_str(new_policy)
oso.enable_roles()
assert not oso.is_allowed(leina, "edit", oso_bug)
assert oso.is_allowed(leina, "invite", osohq)
def test_chained_role_implication(init_oso, sample_data):
# - Adding a role implication from grandparent->parent->child resource role types grants assignee of grandparent
# role access to grandchild resource
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"],
implies: ["issue:editor"]
}
};
resource(_type: Issue, "issue", actions, roles) if
actions = [
"edit"
] and
roles = {
editor: {
permissions: ["edit"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
parent_child(parent_repo: Repo, issue: Issue) if
issue.repo = parent_repo;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
oso_bug = sample_data["oso_bug"]
ios_laggy = sample_data["ios_laggy"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(leina, osohq, "member", session=session)
assign_role(steve, oso_repo, "reader", session=session)
session.commit()
# leina can invite to the org, pull from the repo, and edit the issue
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(steve, "pull", oso_repo)
assert oso.is_allowed(leina, "edit", oso_bug)
assert not oso.is_allowed(leina, "edit", ios_laggy)
# steve can pull from the repo and edit the issue, but can NOT invite to the org
assert oso.is_allowed(steve, "pull", oso_repo)
assert oso.is_allowed(steve, "edit", oso_bug)
assert not oso.is_allowed(steve, "edit", ios_laggy)
assert not oso.is_allowed(steve, "invite", osohq)
# - Removing a role implication from grandparent->parent->child resource role types revokes assignee of grandparent
# role access to grandchild resource
new_policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"],
implies: ["issue:editor"]
}
};
resource(_type: Issue, "issue", actions, roles) if
actions = [
"edit"
] and
roles = {
editor: {
permissions: ["edit"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
parent_child(parent_repo: Repo, issue: Issue) if
issue.repo = parent_repo;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.load_str(new_policy)
oso.enable_roles()
# leina can't edit the issue anymore
assert not oso.is_allowed(leina, "edit", oso_bug)
assert oso.is_allowed(leina, "invite", osohq)
# steve can still edit the issue
assert oso.is_allowed(steve, "edit", oso_bug)
# TODO: this is just testing our own code / we don't handle role management
# anymore
#
# NOTE(gj): leaving this for now since it actually exercises the policy, but
# I assume the policy bits we're exercising here are already covered by other
# tests. Once that's verified, we can remove this test.
def test_assign_remove_user_role(init_oso, sample_data):
# - Adding user-role assignment grants access
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite", "list_repos"] and
roles = {
member: {
permissions: ["invite"]
},
owner: {
permissions: ["list_repos"]
}
};
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(leina, osohq, "member", session=session)
session.commit()
# Assign leina member role
leina_roles = session.query(OrgRole).filter_by(user_id=leina.name).all()
assert len(leina_roles) == 1
assert leina_roles[0].name == "member"
# Assign steve owner role
assign_role(steve, osohq, "owner", session=session)
session.commit()
steve_roles = session.query(OrgRole).filter_by(user_id=steve.name).all()
assert len(steve_roles) == 1
assert steve_roles[0].name == "owner"
assert oso.is_allowed(leina, "invite", osohq)
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "list_repos", osohq)
# - Removing user-role assignment revokes access
removed = remove_role(leina, osohq, "member", session=session)
session.commit()
assert removed
leina_roles = session.query(OrgRole).filter_by(user_id=leina.name).all()
assert len(leina_roles) == 0
# make sure steve still has his role
steve_roles = session.query(OrgRole).filter_by(user_id=steve.name).all()
assert len(steve_roles) == 1
assert steve_roles[0].name == "owner"
assert not oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(steve, "list_repos", osohq)
@pytest.mark.skip("not worrying about data filtering yet")
def test_authorizing_related_fields(
init_oso, sample_data, auth_sessionmaker, Org, Repo
):
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite", "read"] and
roles = {
member: {
permissions: ["invite", "read"],
implies: ["repo:reader"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
steve = sample_data["steve"]
assign_role(steve, osohq, "member", session)
session.commit()
oso.actor = steve
oso.checked_permissions = {Repo: "pull"}
results = auth_sessionmaker().query(Repo).all()
assert len(results) == 2
assert results[0].org is None
oso.checked_permissions = {Org: "read", Repo: "pull"}
results = auth_sessionmaker().query(Repo).all()
assert len(results) == 2
assert results[0].org.id == osohq.id
# TODO(gj): data filtering
def test_data_filtering_role_allows_not(init_oso, sample_data):
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
not role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(leina, osohq, "member", session=session)
assign_role(steve, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert not oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "invite", apple)
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "invite", apple)
# oso.actor = leina
# oso.checked_permissions = {Org: "invite"}
# auth_session = auth_sessionmaker()
#
# with pytest.raises(OsoError):
# auth_session.query(Org).all()
# TODO(gj): data filtering
def test_data_filtering_role_allows_and(init_oso, sample_data):
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
allow(actor, action, resource) if
role_allows(actor, action, resource) and
resource.name = "osohq";
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(leina, osohq, "member", session=session)
assign_role(leina, apple, "member", session=session)
assign_role(steve, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(steve, "invite", osohq)
assert not oso.is_allowed(leina, "invite", apple)
# oso.actor = leina
# oso.checked_permissions = {Org: "invite"}
# auth_session = auth_sessionmaker()
#
# results = auth_session.query(Org).all()
# assert len(results) == 1
#
# oso.actor = steve
# oso.checked_permissions = {Org: "invite", User: "invite"}
# auth_session = auth_sessionmaker()
#
# results = auth_session.query(User).all()
# assert len(results) == 0
# TODO(gj): data filtering
def test_data_filtering_role_allows_explicit_or(init_oso, sample_data):
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
allow(actor, action, resource) if
role_allows(actor, action, resource) or
resource.name = "osohq";
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
# leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(steve, apple, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "invite", apple)
# oso.actor = steve
# oso.checked_permissions = {Org: "invite"}
# auth_session = auth_sessionmaker()
#
# results = auth_session.query(Org).all()
# assert len(results) == 2
#
# oso.actor = steve
# oso.checked_permissions = {Repo: "pull"}
# auth_session = auth_sessionmaker()
# results = auth_session.query(Repo).all()
# assert len(results) == 1
# assert results[0].org_id == "apple"
#
# oso.actor = leina
# oso.checked_permissions = {Org: "invite", User: "invite"}
# auth_session = auth_sessionmaker()
# results = auth_session.query(Org).all()
# assert len(results) == 1
# TODO(gj): data filtering
def test_data_filtering_role_allows_implicit_or(init_oso, sample_data):
# Ensure that the filter produced by `Roles.role_allows()` is not AND-ed
# with a false filter produced by a separate `allow()` rule.
oso, session = init_oso
policy = """
# Users can read their own data.
allow(user: User, "read", user);
resource(_type: Org, "org", actions, roles) if
actions = ["read"] and
roles = {
member: {
permissions: ["read"]
}
};
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
assign_role(leina, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "read", leina)
# oso.actor = leina
# oso.checked_permissions = {Org: "read", User: "read"}
# auth_session = auth_sessionmaker()
#
# results = auth_session.query(Org).all()
# assert len(results) == 1
#
# results = auth_session.query(User).all()
# assert len(results) == 1
# TODO(gj): data filtering
def test_data_filtering_actor_can_assume_role_not(init_oso, sample_data):
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
allow(actor, _action, resource) if
not actor_can_assume_role(actor, "member", resource);
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(leina, osohq, "member", session=session)
assign_role(steve, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert not oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "invite", apple)
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "invite", apple)
# oso.actor = leina
# oso.checked_permissions = {Org: "invite"}
# auth_session = auth_sessionmaker()
#
# with pytest.raises(OsoError):
# auth_session.query(Org).all()
# TODO(gj): data filtering
def test_data_filtering_actor_can_assume_role_and(init_oso, sample_data):
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
allow(actor, _action, resource) if
actor_can_assume_role(actor, "member", resource) and
resource.name = "osohq";
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(leina, osohq, "member", session=session)
assign_role(leina, apple, "member", session=session)
assign_role(steve, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(steve, "invite", osohq)
assert not oso.is_allowed(leina, "invite", apple)
# oso.actor = leina
# oso.checked_permissions = {Org: "invite"}
# auth_session = auth_sessionmaker()
#
# results = auth_session.query(Org).all()
# assert len(results) == 1
#
# oso.actor = steve
# oso.checked_permissions = {User: "invite"}
# auth_session = auth_sessionmaker()
#
# results = auth_session.query(User).all()
# assert len(results) == 0
# TODO(gj): data filtering
def test_data_filtering_actor_can_assume_role_explicit_or(init_oso, sample_data):
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
allow(actor, action, resource) if
role_allows(actor, action, resource);
allow(actor, _, resource) if
actor_can_assume_role(actor, "member", resource) or
resource.name = "osohq";
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
# leina = sample_data["leina"]
steve = sample_data["steve"]
assign_role(steve, apple, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "invite", apple)
# oso.actor = steve
# oso.checked_permissions = {Org: "invite"}
# auth_session = auth_sessionmaker()
#
# results = auth_session.query(Org).all()
# assert len(results) == 2
#
# oso.actor = steve
# oso.checked_permissions = {Repo: "pull"}
# auth_session = auth_sessionmaker()
# results = auth_session.query(Repo).all()
# assert len(results) == 1
# assert results[0].org_id == "apple"
#
# oso.actor = leina
# oso.checked_permissions = {Org: "invite"}
# auth_session = auth_sessionmaker()
# results = auth_session.query(Org).all()
# assert len(results) == 1
# TODO(gj): data filtering
def test_data_filtering_actor_can_assume_role_implicit_or(init_oso, sample_data):
# Ensure that the filter produced by `actor_can_assume_role/3` is not AND-ed
# with a false filter produced by a separate `allow()` rule.
oso, session = init_oso
policy = """
# Users can read their own data.
allow(user: User, "read", user);
resource(_type: Org, "org", actions, roles) if
actions = ["read"] and
roles = {
member: {
permissions: ["read"]
}
};
allow(actor, _, resource) if
actor_can_assume_role(actor, "member", resource);
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
assign_role(leina, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "read", leina)
# oso.actor = leina
# oso.checked_permissions = {Org: "read", User: "read"}
# auth_session = auth_sessionmaker()
#
# results = auth_session.query(Org).all()
# assert len(results) == 1
#
# results = auth_session.query(User).all()
# assert len(results) == 1
# TODO(gj): data filtering
def test_data_filtering_combo(init_oso, sample_data):
oso, session = init_oso
policy = """
# Users can read their own data.
allow(user: User, "read", user);
resource(_type: Org, "org", actions, roles) if
actions = ["read"] and
roles = {
member: {
permissions: ["read"]
}
};
allow(actor, action, resource) if
role_allows(actor, action, resource) and
actor_can_assume_role(actor, "member", resource);
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
assign_role(leina, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "read", leina)
# oso.actor = leina
# oso.checked_permissions = {Org: "read"}
# auth_session = auth_sessionmaker()
#
# # TODO: for now this will error
# with pytest.raises(OsoError):
# auth_session.query(Org).all()
# TODO(gj): data filtering
def test_actor_can_assume_role(init_oso, sample_data):
oso, session = init_oso
policy = """
resource(_type: Org, "org", [], roles) if
roles = {
member: {
implies: ["repo:reader"]
},
owner: {
implies: ["member"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
allow(actor, "read", repo: Repo) if
actor_can_assume_role(actor, "reader", repo);
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
leina = sample_data["leina"]
steve = sample_data["steve"]
gabe = sample_data["gabe"]
assign_role(leina, osohq, "member", session)
assign_role(steve, oso_repo, "reader", session)
# Without data filtering
assert oso.is_allowed(leina, "read", oso_repo)
assert oso.is_allowed(steve, "read", oso_repo)
assert not oso.is_allowed(gabe, "read", oso_repo)
# # With data filtering
# oso.actor = leina
# oso.checked_permissions = {Repo: "read"}
# auth_session = auth_sessionmaker()
#
# results = auth_session.query(Repo).all()
# assert len(results) == 2
# for repo in results:
# assert repo.org_id == "osohq"
def test_role_allows_with_other_rules(init_oso, sample_data):
oso, session = init_oso
policy = """
# Users can read their own data.
allow(user: User, "read", user);
resource(_type: Org, "org", actions, roles) if
actions = ["read"] and
roles = {
member: {
permissions: ["read"]
}
};
allow(_, _, resource) if resource = 1;
allow(_, _, resource: Boolean) if resource;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
assign_role(leina, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "read", osohq)
assert oso.is_allowed(leina, "read", 1)
assert not oso.is_allowed(leina, "read", 2)
assert oso.is_allowed(leina, "read", True)
assert not oso.is_allowed(leina, "read", False)
# LEGACY TESTS
def test_roles_integration(init_oso, sample_data):
oso, session = init_oso
policy = """
resource(_type: Org, "org", actions, roles) if
actions = [
"invite",
"create_repo"
] and
roles = {
member: {
permissions: ["create_repo"],
implies: ["repo:reader"]
},
owner: {
permissions: ["invite"],
implies: ["member", "repo:writer"]
}
};
resource(_type: Repo, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
writer: {
permissions: ["push", "issue:edit"],
implies: ["reader"]
},
reader: {
permissions: ["pull"]
}
};
resource(_type: Issue, "issue", actions, {}) if
actions = [
"edit"
];
parent_child(parent_org: Org, repo: Repo) if
repo.org = parent_org;
parent_child(parent_repo: Repo, issue: Issue) if
issue.repo = parent_repo;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
# Get sample data
# -------------------
leina = sample_data["leina"]
steve = sample_data["steve"]
gabe = sample_data["gabe"]
osohq = sample_data["osohq"]
# apple = sample_data["apple"]
oso_repo = sample_data["oso_repo"]
# ios = sample_data["ios"]
# demo_repo = sample_data["demo_repo"]
ios_laggy = sample_data["ios_laggy"]
oso_bug = sample_data["oso_bug"]
# @NOTE: Need the users and resources in the db before assigning roles
# so you have to call session.commit() first.
assign_role(leina, osohq, "owner", session=session)
assign_role(steve, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "create_repo", osohq)
assert oso.is_allowed(leina, "push", oso_repo)
assert oso.is_allowed(leina, "pull", oso_repo)
assert oso.is_allowed(leina, "edit", oso_bug)
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "create_repo", osohq)
assert not oso.is_allowed(steve, "push", oso_repo)
assert oso.is_allowed(steve, "pull", oso_repo)
assert not oso.is_allowed(steve, "edit", oso_bug)
assert not oso.is_allowed(leina, "edit", ios_laggy)
assert not oso.is_allowed(steve, "edit", ios_laggy)
# oso.actor = leina
# oso.checked_permissions = {Repo: "pull"}
# auth_session = auth_sessionmaker()
# results = auth_session.query(Repo).all()
# assert len(results) == 2
# result_ids = [repo.id for repo in results]
# assert oso_repo.id in result_ids
# assert demo_repo.id in result_ids
# assert ios.id not in result_ids
# oso.actor = leina
# oso.checked_permissions = {Issue: "edit"}
# auth_session = auth_sessionmaker()
# results = auth_session.query(Issue).all()
# assert len(results) == 1
# result_ids = [issue.id for issue in results]
# assert oso_bug.id in result_ids
assert not oso.is_allowed(gabe, "edit", oso_bug)
assign_role(gabe, osohq, "member", session=session)
session.commit()
assert not oso.is_allowed(gabe, "edit", oso_bug)
assign_role(gabe, osohq, "owner", session=session)
session.commit()
assert oso.is_allowed(gabe, "edit", oso_bug)
assign_role(gabe, osohq, "member", session=session)
session.commit()
assert not oso.is_allowed(gabe, "edit", oso_bug)
assign_role(gabe, osohq, "owner", session=session)
session.commit()
assert oso.is_allowed(gabe, "edit", oso_bug)
remove_role(gabe, osohq, "owner", session=session)
session.commit()
assert not oso.is_allowed(gabe, "edit", oso_bug)
# Legacy test from sam/polar-roles
def test_legacy_sam_polar_roles(init_oso, sample_data):
oso, session = init_oso
policy = """
resource(_: Org, "org", actions, roles) if
actions = ["create_repo", "invite"] and
roles = {
member: {
permissions: ["create_repo"],
implies: ["repo:reader"]
},
owner: {
permissions: ["invite"],
implies: ["member", "repo:writer"]
}
};
resource(_: Repo, "repo", actions, roles) if
actions = ["pull", "push"] and
roles = {
writer: {
permissions: ["push"],
implies: ["reader"]
},
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Org, repo: Repo) if
parent_org = repo.org;
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.enable_roles()
leina = sample_data["leina"]
steve = sample_data["steve"]
gabe = sample_data["gabe"]
osohq = sample_data["osohq"]
apple = sample_data["apple"]
oso_repo = sample_data["oso_repo"]
ios = sample_data["ios"]
# Things that happen in the app via the management api.
assign_role(leina, osohq, "owner", session)
assign_role(steve, osohq, "member", session)
assign_role(gabe, oso_repo, "writer", session)
# Test
# Test Org roles
# Leina can invite people to osohq because she is an OWNER
assert oso.is_allowed(leina, "invite", osohq)
assert not oso.is_allowed(leina, "invite", apple)
# Steve can create repos in osohq because he is a MEMBER
assert oso.is_allowed(steve, "create_repo", osohq)
# Steve can't invite people to osohq because only OWNERs can invite, and he's not an OWNER
assert not oso.is_allowed(steve, "invite", osohq)
# Leina can create a repo because she's the OWNER and OWNER implies MEMBER
assert oso.is_allowed(leina, "create_repo", osohq)
assert oso.is_allowed(steve, "pull", oso_repo)
assert not oso.is_allowed(steve, "pull", ios)
# Leina can pull from oso_repo because she's an OWNER of osohq
# which implies WRITE on oso_repo
# which implies READ on oso_repo
assert oso.is_allowed(leina, "pull", oso_repo)
# Gabe can pull from oso_repo because he has WRTIE on oso_repo
# which implies READ on oso_repo
assert oso.is_allowed(gabe, "pull", oso_repo)
# Steve can NOT push to oso_repo because he is a MEMBER of osohq
# which implies READ on oso_repo but not WRITE
assert not oso.is_allowed(steve, "push", oso_repo)
# Leina can push to oso_repo because she's an OWNER of osohq
# which implies WRITE on oso_repo
assert oso.is_allowed(leina, "push", oso_repo)
# Gabe can push to oso_repo because he has WRTIE on oso_repo
assert oso.is_allowed(gabe, "push", oso_repo)
# TODO(gj): look at wowhack in sqlalchemy_oso/partial.py
# # Data filtering test:
# auth_filter = authorize_model(oso, leina, "push", session, Repo)
# assert str(auth_filter) == ":param_1 = repositories.organization_id"
# authorized_repos = session.query(Repo).filter(auth_filter).all()
# assert len(authorized_repos) == 1
# assert authorized_repos[0] == oso_repo
@pytest.mark.skipif(not os.environ.get("PERF"), reason="this b slow")
def test_perf_polar(init_oso, sample_data):
oso, session = init_oso
# Test many direct roles
p = """
resource(_: Repo, "repo", actions, roles) if
actions = ["read", "write"] and
roles = {
reader: {
permissions: ["read"]
},
writer: {
permissions: ["write"]
}
};
actor_has_role_for_resource(actor, role_name: String, role_resource: Repo) if
role in actor.repo_roles and
role matches {name: role_name, resource: role_resource};
actor_has_role_for_resource(actor, role_name: String, role_resource: Org) if
role in actor.org_roles and
role matches {name: role_name, resource: role_resource};
allow(actor, action, resource) if
role_allows(actor, action, resource);
"""
# p = """resource(_: Repo, "repo", actions, roles) if
# actions = ["pull", "push"] and
# roles = {
# writer: {
# permissions: ["push"],
# implies: ["reader"]
# },
# reader: {
# permissions: ["pull"]
# }
# };
# parent_child(parent_org: Org, repo: Repo) if
# parent_org = repo.org;
# """
oso.load_str(p)
oso.enable_roles()
leina = sample_data["leina"]
# steve = sample_data["steve"]
osohq = sample_data["osohq"]
# oso_repo = sample_data["oso_repo"]
# Create 100 repositories
oso_repos = []
for i in range(100):
name = f"oso_repo_{i}"
repo = Repo(name=name, org=osohq)
oso_repos.append(repo)
session.add(repo)
session.commit()
n_roles = 100
for i in range(n_roles):
assign_role(leina, oso_repos[i], "writer", session)
session.commit()
assert len(leina.repo_roles) == n_roles
def test_query():
return oso.is_allowed(leina, "write", oso_repos[99])
# Ensure valid policy is loaded.
assert test_query()
number = 10
time = timeit.timeit(test_query, number=number)
print(f"Executed in : {time/number*1000} ms\n Averaged over {number} repetitions.")
def test_enable_roles_before_loading_policy(init_oso):
oso, _ = init_oso
with pytest.raises(OsoError) as e:
oso.enable_roles()
assert e.match("Make sure to load policy before calling Oso.enable_roles().")
def test_missing_actor_has_role_for_resource(init_oso):
oso, _ = init_oso
p = """
resource(_: Repo, "repo", actions, roles) if
actions = ["read", "write"] and
roles = {
reader: {
permissions: ["read"]
},
writer: {
permissions: ["write"]
}
};"""
oso.load_str(p)
with pytest.raises(OsoError) as e:
oso.enable_roles()
assert e.match(
r"Need to define `actor_has_role_for_resource\(actor, role_name, resource\)`"
)
def test_role_config_revalidated_when_loading_rules_after_enabling_roles(init_oso):
oso, _ = init_oso
valid_policy = """resource(_: Repo, "repo", ["read"], {});
actor_has_role_for_resource(_, _, _);"""
invalid_policy = """resource(_: Org, "org", [], {});
actor_has_role_for_resource(_, _, _);"""
oso.load_str(valid_policy)
oso.enable_roles()
with pytest.raises(RolesValidationError):
oso.load_str(invalid_policy)
def test_validation_with_method_calls(init_oso):
oso, _ = init_oso
p = """resource(_: Repo, "repo", ["read"], {});
actor_has_role_for_resource(actor, role_name, resource) if
actor.has_role(role_name, resource);"""
oso.load_str(p)
oso.enable_roles()
|
davidpelaez/oso | languages/python/oso/polar/data_filtering.py | from typing import Any, Optional
from dataclasses import dataclass
VALID_KINDS = ["parent", "children"]
# Used so we know what fetchers to call and how to match up constraints.
@dataclass
class Relationship:
kind: str
other_type: str
my_field: str
other_field: str
# @NOTE(Steve): Some of this stuff is very inconsistent right now. Names for fields
# and stuff need cleaning up. Sort of left a mess from when I was figuring this all
# out.
def serialize_types(types, class_names):
"""
Convert types stored in python to what the core expects.
"""
polar_types = {}
for tag, fields in types.items():
field_types = {}
for k, v in fields.items():
if isinstance(v, Relationship):
field_types[k] = {
"Relationship": {
"kind": v.kind,
"other_class_tag": v.other_type,
"my_field": v.my_field,
"other_field": v.other_field,
}
}
else:
field_types[k] = {
"Base": {
"class_tag": class_names[v],
}
}
polar_types[tag] = field_types
return polar_types
@dataclass
class Field:
field: str
@dataclass
class Ref:
field: Optional[str]
result_id: str
@dataclass
class Constraint:
kind: str # ["Eq", "In", "Contains"]
field: str
value: Any
def to_predicate(self):
def known_value(x):
return self.value
def field_value(x):
return getattr(x, self.value.field)
get_value = field_value if isinstance(self.value, Field) else known_value
if self.kind == "Eq":
return lambda x: getattr(x, self.field) == get_value(x)
if self.kind == "In":
return lambda x: getattr(x, self.field) in get_value(x)
if self.kind == "Contains":
return lambda x: get_value(x) in getattr(x, self.field)
assert False, "unknown constraint kind"
def parse_constraint(polar, constraint):
kind = constraint["kind"]
assert kind in ["Eq", "In", "Contains"]
field = constraint["field"]
value = constraint["value"]
value_kind = next(iter(value))
value = value[value_kind]
if value_kind == "Term":
value = polar.host.to_python(value)
elif value_kind == "Ref":
child_field = value["field"]
result_id = value["result_id"]
value = Ref(field=child_field, result_id=result_id)
elif value_kind == "Field":
value = Field(field=value)
else:
assert False, "Unknown value kind"
return Constraint(kind=kind, field=field, value=value)
def ground_constraints(polar, results, filter_plan, constraints):
for constraint in constraints:
if isinstance(constraint.value, Ref):
ref = constraint.value
constraint.value = results[ref.result_id]
if ref.field is not None:
constraint.value = [getattr(v, ref.field) for v in constraint.value]
# @NOTE(Steve): This is just operating on the json. Could still have a step to parse this into a python data structure
# first. Probably more important later when make implementing a resolver nice.
def builtin_filter_plan_resolver(polar, filter_plan):
result_sets = filter_plan["result_sets"]
results = []
for rs in result_sets:
set_results = {}
requests = rs["requests"]
resolve_order = rs["resolve_order"]
result_id = rs["result_id"]
for i in resolve_order:
req = requests[i]
class_name = req["class_tag"]
constraints = req["constraints"]
constraints = [parse_constraint(polar, c) for c in constraints]
# Substitute in results from previous requests.
ground_constraints(polar, set_results, filter_plan, constraints)
fetcher = polar.host.fetchers[class_name]
set_results[i] = fetcher(constraints)
results.extend(set_results[result_id])
# NOTE(steve): Not the best way to remove duplicates.
return [i for n, i in enumerate(results) if i not in results[:n]]
def filter_data(polar, filter_plan, filter_plan_resolver=None):
if filter_plan_resolver is None:
return builtin_filter_plan_resolver(polar, filter_plan)
else:
return filter_plan_resolver(polar, filter_plan)
|
davidpelaez/oso | languages/python/django-oso/django_oso/__init__.py | <reponame>davidpelaez/oso
import oso
from .oso import Oso
__version__ = "0.11.0"
default_app_config = "django_oso.apps.DjangoOsoConfig"
|
davidpelaez/oso | languages/python/oso/oso/exceptions.py | <filename>languages/python/oso/oso/exceptions.py
class AuthorizationError(Exception):
pass
class NotFoundError(AuthorizationError):
pass
class ForbiddenError(AuthorizationError):
pass
|
davidpelaez/oso | languages/python/sqlalchemy-oso/sqlalchemy_oso/auth.py | from oso import Oso
from polar import Variable
from polar.exceptions import PolarRuntimeError
from polar.partial import TypeConstraint
from sqlalchemy.orm.query import Query
from sqlalchemy.orm.session import Session
from sqlalchemy import inspect
from sqlalchemy.sql import expression as sql
from sqlalchemy_oso.partial import partial_to_filter
from sqlalchemy_oso import roles
from sqlalchemy_oso.compat import iterate_model_classes
from functools import reduce
def polar_model_name(model) -> str:
"""Return polar class name for SQLAlchemy model."""
return model.__name__
def null_query(session: Session, model) -> Query:
"""Return an intentionally empty query."""
# TODO (dhatch): Make this not hit the database.
return session.query(model).filter(sql.false())
def register_models(oso: Oso, base_or_registry):
"""Register all models in registry (SQLAlchemy 1.4) or declarative base
class (1.3 and 1.4) ``base_or_registry`` with Oso as classes."""
for model in iterate_model_classes(base_or_registry):
oso.register_class(model)
def authorize_model(oso: Oso, actor, action, session: Session, model):
"""Return SQLAlchemy expression that applies the policy to ``model``.
Executing this query will return only authorized objects. If the request is
not authorized, a query that always contains no result will be returned.
:param oso: The oso class to use for evaluating the policy.
:param actor: The actor to authorize.
:param action: The action to authorize.
:param session: The SQLAlchemy session.
:param model: The model to authorize, must be a SQLAlchemy model or alias.
"""
def get_field_type(model, field):
try:
field = getattr(model, field)
except AttributeError:
raise PolarRuntimeError(f"Cannot get property {field} on {model}.")
try:
return field.entity.class_
except AttributeError as e:
raise PolarRuntimeError(
f"Cannot determine type of {field} on {model}."
) from e
oso.host.get_field = get_field_type
try:
mapped_class = inspect(model, raiseerr=True).class_
except AttributeError:
raise TypeError(f"Expected a model; received: {model}")
resource = Variable("resource")
constraint = TypeConstraint(resource, polar_model_name(mapped_class))
results = oso.query_rule(
"allow",
actor,
action,
resource,
bindings={resource: constraint},
accept_expression=True,
)
combined_filter = None
has_result = False
for result in results:
has_result = True
resource_partial = result["bindings"]["resource"]
if isinstance(resource_partial, model):
def f(pk):
return getattr(model, pk) == getattr(resource_partial, pk)
filters = [f(pk.name) for pk in inspect(model).primary_key]
filter = reduce(lambda a, b: a & b, filters)
else:
filter, role_method = partial_to_filter(
resource_partial, session, model, get_model=oso.get_class
)
if role_method is not None:
roles_filter = roles._generate_query_filter(oso, role_method, model)
filter &= roles_filter
if combined_filter is None:
combined_filter = filter
else:
combined_filter = combined_filter | filter
if not has_result:
return sql.false()
return combined_filter
|
davidpelaez/oso | languages/python/oso/oso/__init__.py | <gh_stars>0
from polar import polar_class, Variable, Predicate
from .oso import Oso, Policy
from .exceptions import AuthorizationError, ForbiddenError, NotFoundError
from polar.exceptions import OsoError
from .enforcer import Enforcer
|
davidpelaez/oso | languages/python/sqlalchemy-oso/sqlalchemy_oso/partial.py | """Translate Oso Expressions into SQLAlchemy Expressions.
This module translates between Oso Expressions that are returned from queries
with partial variables into SQLAlchemy constraints.
The entrypoint is :py:func:`partial_to_filter`. The translation is written as a
recursive mapping operation. We do a traversal of the expression tree, using the
``translate_*`` functions to map each node of the Oso expression tree to a
SQLAlchemy expression.
Translation functions
=====================
These functions accept as input:
- ``expression``: an :py:class:`polar.expression.Expression` instance returned
by the query. The expression must be translated by
:py:func:`sqlalchemy_oso.preprocess.preprocess`.
- ``session``: The :py:class:`sqlalchemy.orm.Session` session object to
translate for.
- ``model``: The model class that this expression is constraining.
- ``get_model``: A callable that returns a SQLAlchemy model type corresponding
with a Polar type tag.
Expression structure
--------------------
The translation functions operate over expressions that constrain a single
variable, named ``_this`` which corresponds to the ``model`` pararmeter.
Constraints on a to-many relationship (expressed in Polar like ``tag in
post.tags and tag.id = 1``) are represented as a subexpression. The Polar::
allow(_, _, post) if
post.id = 1 and tag in post.tags and
tag.id = 2 and
tag.is_public;
Would be represented as the expression::
_this.id = 1 and (_this.id = 2 and _this.is_public= true) in post.tags
- :py:func:`translate_expr`: Translate an expression.
- :py:func:`translate_and`: Translate an and operation
- :py:func:`translate_compare`: Translate a comparison operation (=, <, etc.)
- :py:func:`translate_in`: Translate an in opertaion.
- :py:func:`translate_isa`: Translate an isa.
- :py:func:`translate_dot`: Translate a dot operation.
Emit functions
==============
The functions :py:func:`emit_compare`, :py:func:`emit_contains`, and
:py:func:`emit_subexpression` are used by :py:func:`translate_dot` to aid in
producing SQLAlchemy expressions over dot operations. More information on this
in the :py:func:`translate_dot` documentation string.
Examples in module documentation
================================
Throughout the documentation of this module, we will refer to examples
corresponding to the models declared in ``tests/models.py``.
When recursive translation is applied to an operation, the notation ``t(?)`` is
used.
"""
import functools
from typing import Any, Callable, Tuple
from sqlalchemy.orm.session import Session
from sqlalchemy import inspect
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy.sql import expression as sql
from sqlalchemy.sql.elements import True_
from polar.partial import dot_path
from polar.expression import Expression
from polar.variable import Variable
from polar.exceptions import UnsupportedError, OsoError
from polar.predicate import Predicate
from sqlalchemy_oso.preprocess import preprocess
# TODO (dhatch) Better types here, first any is model, second any is a sqlalchemy expr.
EmitFunction = Callable[[Session, Any], Any]
COMPARISONS = {
"Unify": lambda p, v: p == v,
"Eq": lambda p, v: p == v,
"Neq": lambda p, v: p != v,
"Geq": lambda p, v: p >= v,
"Gt": lambda p, v: p > v,
"Leq": lambda p, v: p <= v,
"Lt": lambda p, v: p < v,
}
def flip_op(operator):
flips = {
"Eq": "Eq",
"Unify": "Unify",
"Neq": "Neq",
"Geq": "Leq",
"Gt": "Lt",
"Leq": "Gtq",
"Lt": "Gt",
}
return flips[operator]
def and_filter(current, new):
if isinstance(current, True_):
return new
else:
return current & new
def partial_to_filter(expression: Expression, session: Session, model, get_model):
"""Convert constraints in ``partial`` to a filter over ``model`` that should be applied to query."""
expression = preprocess(expression)
roles_method = check_for_roles_method(expression)
return (translate_expr(expression, session, model, get_model), roles_method)
def check_for_roles_method(expression: Expression):
def _is_roles_method(op, left, right):
is_roles_method = (
isinstance(right, Expression)
and right.operator == "Dot"
and type(right.args[1]) == Predicate
and (
right.args[1].name == "role_allows"
or right.args[1].name == "actor_can_assume_role"
)
)
method = None
if is_roles_method:
assert left is True
if op == "Neq":
raise OsoError("Roles don't currently work with the `not` operator.")
elif op != "Unify":
raise OsoError(f"Roles don't work with the `{op}` operator.")
method = right.args[1]
return is_roles_method, method
assert expression.operator == "And"
methods = []
to_remove = []
for expr in expression.args:
# Try with method call on right
is_roles, method = _is_roles_method(expr.operator, expr.args[0], expr.args[1])
if is_roles:
methods.append(method)
to_remove.append(expr)
# Try with method call on left
is_roles, method = _is_roles_method(expr.operator, expr.args[1], expr.args[0])
if is_roles:
to_remove.append(expr)
methods.append(method)
for expr in to_remove:
expression.args.remove(expr)
if len(methods) > 1:
raise OsoError("Cannot call multiple role methods within the same query.")
try:
return methods[0]
except IndexError:
return None
def translate_expr(expression: Expression, session: Session, model, get_model):
"""Translate an expression into a SQLAlchemy expression.
Accepts any type of expression. Entrypoint to the translation functions."""
assert isinstance(expression, Expression)
if expression.operator in COMPARISONS:
return translate_compare(expression, session, model, get_model)
elif expression.operator == "Isa":
return translate_isa(expression, session, model, get_model)
elif expression.operator == "In":
return translate_in(expression, session, model, get_model)
elif expression.operator == "And":
return translate_and(expression, session, model, get_model)
else:
raise UnsupportedError(f"Unsupported {expression}")
def translate_and(expression: Expression, session: Session, model, get_model):
"""Translate a Polar AND into a SQLAlchemy AND.
Empty and is true: () => sql.true()
Single argument: op1 => t(op1)
> 1 argument: op1 and op2 and op3 => t(op1) & t(op2) & t(op3)
"""
assert expression.operator == "And"
expr = sql.true()
for expression in expression.args:
translated = translate_expr(expression, session, model, get_model)
expr = and_filter(expr, translated)
return expr
def translate_isa(expression: Expression, session: Session, model, get_model):
"""Translate an Isa operation. (``matches`` keyword)
Check that the field on the left hand side matches the type on the right.
``isa`` operations with fields are not supported and throw.
If the type matches, ``sql.true()`` is returned. If the type doesn't match,
``sql.false()`` is returned.
So for example::
allow(_, _, x) if x matches Tag;
would translate to sql.false() (no rows match) when ``x`` is of type Post,
but would translate to ``sql.true()`` when ``x`` is of type Tag.
_this matches Type => sql.true() if Type == model else sql.false()
_this.bar matches Type => sql.true() if typeof(model, "bar") == Type
Where typeof gives the type of the "bar" property of model.
"""
assert expression.operator == "Isa"
left, right = expression.args
left_path = dot_path(left)
# # WOWHACK(gj): this fixes the data filtering test at the bottom of
# # tests/test_roles3.py
# if not left_path:
# left_cls = inspect(left, raiseerr=True).class_
# assert not right.fields, "Unexpected fields in isa expression"
# constraint_type = get_model(right.tag)
# return sql.true() if issubclass(left_cls, constraint_type) else sql.false()
assert left_path[0] == Variable("_this")
left_path = left_path[1:] # Drop _this.
if left_path:
for field_name in left_path:
_, model, __ = get_relationship(model, field_name)
assert not right.fields, "Unexpected fields in isa expression"
constraint_type = get_model(right.tag)
model_type = inspect(model, raiseerr=True).class_
return sql.true() if issubclass(model_type, constraint_type) else sql.false()
def translate_compare(expression: Expression, session: Session, model, get_model):
"""Translate a binary comparison operation.
Operators are listed in ``COMPARISONS``.
Either the left or right argument may contain a path. Paths for both
arguments (i.e. post.name = post.body) are not supported currently.
Also handle unification of _this with an instance of the same type as _this. E.g., _this = ?
where ? is an instance of the same type as _this.
_this.path.(path1)+.tail OP val => Model.path.(path1)+.has(Target.tail OP val)
val OP _this.path.(path1)+.tail => Model.path.(path1)+.has(Target.tail OP mirror(OP) val)
_this = val => model.pk1 = val.pk1 and model.pk2 = val.pk2
Where Target is the type that the dot path refers to and mirror flips an
operaiton.
"""
(left, right) = expression.args
left_path = dot_path(left)
right_path = dot_path(right)
# Dot operation is on the left hand side
if left_path[1:]:
assert left_path[0] == Variable("_this")
assert not right_path
path, field_name = left_path[1:-1], left_path[-1]
return translate_dot(
path,
session,
model,
functools.partial(emit_compare, field_name, right, expression.operator),
)
# Dot operation is on right
elif right_path and right_path[0] == "_this":
return translate_compare(
Expression(flip_op(expression.operator), [right, left]),
session,
model,
get_model,
)
# this = other no dot operation, throws if it's not of the form _this = other other same type as
# this
else:
assert left == Variable("_this")
if not isinstance(right, model):
return sql.false()
if expression.operator not in ("Eq", "Unify"):
raise UnsupportedError(
f"Unsupported comparison: {expression}. Models can only be compared"
" with `=` or `==`"
)
primary_keys = [pk.name for pk in inspect(model).primary_key]
pk_filter = sql.true()
for key in primary_keys:
pk_filter = and_filter(
pk_filter, getattr(model, key) == getattr(right, key)
)
return pk_filter
def translate_in(expression, session, model, get_model):
"""Translate the in operator.
Relationship contains at least one value that matches expr.
(expr) in _this.path.(path1)+ => Model.path.(path1)+.any(t(expr))
relationship at least 1 value with no constraints:
() in _this.path.(path1)+ => Model.path.(path1)+.any(sql.true())
relationship contains val
val in _this.path.(path1)+ => Model.path.(path1)+.contains(val)
"""
assert expression.operator == "In"
left = expression.args[0]
right = expression.args[1]
# IN means at least something must be contained in the property.
# There are two possible types of in operations. In both, the right hand side
# should be a dot op.
path = dot_path(right)
assert path[0] == "_this"
path = path[1:]
assert path
# Partial In: LHS is an expression
if isinstance(left, Expression):
return translate_dot(
path,
session,
model,
functools.partial(emit_subexpression, left, get_model),
)
elif isinstance(left, Variable):
# A variable with no additional constraints
return translate_dot(
path,
session,
model,
functools.partial(emit_subexpression, Expression("And", []), get_model),
)
else:
# Contains: LHS is not an expression.
# TODO (dhatch) Missing check, left type must match type of the target?
path, field_name = path[:-1], path[-1]
return translate_dot(
path, session, model, functools.partial(emit_contains, field_name, left)
)
def translate_dot(path: Tuple[str, ...], session: Session, model, func: EmitFunction):
"""Translate an operation over a path.
Used to translate comparison operations over paths, and in operations.
Walks relationship properties on ``model`` using ``path``, ending by calling
``func`` with ``session`` and the ``model`` of the last field as positional
arguments.
This results in adding an ``EXISTS (SELECT 1 FROM related_table WHERE ...)`` to
the expression, as documented in the SQLAlchemy documentation for ``has``
and ``any``. The ``...`` will either be the next segment of the dot path, or
the result of ``func``.
"""
if len(path) == 0:
return func(session, model)
else:
property, model, is_multi_valued = get_relationship(model, path[0])
if not is_multi_valued:
return property.has(translate_dot(path[1:], session, model, func))
else:
return property.any(translate_dot(path[1:], session, model, func))
def get_relationship(model, field_name: str):
"""Get the property object for field on model. field must be a relationship field.
:returns: (property, model, is_multi_valued)
"""
property = getattr(model, field_name)
assert isinstance(property.property, RelationshipProperty)
relationship = property.property
model = property.entity.class_
return (property, model, relationship.uselist)
def emit_compare(field_name, value, operator, session, model):
"""Emit a comparison operation comparing the value of ``field_name`` on ``model`` to ``value``."""
assert not isinstance(value, Variable), "value is a variable"
property = getattr(model, field_name)
return COMPARISONS[operator](property, value)
def emit_subexpression(sub_expression: Expression, get_model, session: Session, model):
"""Emit a sub-expression on ``model``."""
return translate_expr(sub_expression, session, model, get_model)
def emit_contains(field_name, value, session, model):
"""Emit a contains operation, checking that multi-valued relationship field ``field_name`` contains ``value``."""
# TODO (dhatch): Could this be valid for fields that are not relationship fields?
property, model, is_multi_valued = get_relationship(model, field_name)
assert is_multi_valued
return property.contains(value)
|
davidpelaez/oso | languages/python/oso/tests/polar_roles_sqlalchemy_helpers.py | <reponame>davidpelaez/oso
# Roles 2
from typing import Any, List
from oso import OsoError
from sqlalchemy import inspect, UniqueConstraint
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import class_mapper, relationship, validates, synonym
from sqlalchemy.orm.exc import UnmappedClassError, UnmappedInstanceError
from sqlalchemy.orm.util import object_mapper
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String
# Global list to keep track of role classes as they are created, used to
# generate RBAC base policy in Polar
ROLE_CLASSES: List[Any] = []
def assign_role(user, resource, role_name, session, reassign=True):
assert session is not None
pk_name, _ = get_pk(type(resource))
existing_roles = get_user_roles(
session, user, type(resource), getattr(resource, pk_name)
)
assert len(existing_roles) < 2
if len(existing_roles) == 1:
if reassign:
existing_roles[0].name = role_name
else:
raise OsoError(
f"""User {user} already has a role for this resource.
To reassign, call with `reassign=True`."""
)
else:
return add_user_role(session, user, resource, role_name, commit=True)
def remove_role(user, resource, role_name, session):
pk_name, _ = get_pk(type(resource))
existing_roles = get_user_roles(
session, user, type(resource), getattr(resource, pk_name)
)
assert len(existing_roles) < 2
if len(existing_roles) == 1:
assert existing_roles[0].name == role_name
session.delete(existing_roles[0])
session.flush()
return True
else:
return False
def resource_role_class(user_model, resource_model, role_choices):
"""Create a resource-specific role Mixin
for SQLAlchemy models. The role mixin is an
`Association Object <https://docs.sqlalchemy.org/en/13/orm/basic_relationships.html#association-object>`_
between the ``user_model`` and the ``resource_model``.
:param user_model: The SQLAlchemy model representing users that the \
resource-specific roles can be assigned to. The generated Role mixin will \
have a many-to-one (Foreign Key) relationship with this user model. \
A many-to-many relationship to ``resource_model`` is added to ``user_model``; \
the relationship is named following the convention: ``resource_model.__name__.lower() + "s"``.
:param resource_model: The SQLAlchemy model representing resources that \
the generated Role mixin will be scoped to. The Role mixin will \
have a many-to-one (ForeignKey) relationship with this resource model. \
A many-to-many relationship to ``user_model`` is added to ``resource_model``; \
the relationship is named ``users``. \
NOTE: only one role model can be created per resource model. Attempting to call \
``resource_role_class()`` more than once for the same resource model will result in \
a ``ValueError``.
:param roles: An order-independent list of the built-in roles for this resource-specific role type.
:type roles: List[str]
:type roles: bool
:return: the ResourceRole mixin, which must then be mixed into a SQLAlchemy model for the role. E.g.,
.. code-block:: python
OrganizationRoleMixin = oso_roles.resource_role_class(
User, Organization, ["OWNER", "MEMBER", "BILLING"]
)
class OrganizationRole(Base, OrganizationRoleMixin):
pass
"""
global ROLE_CLASSES
if resource_model in [role.get("resource_model") for role in ROLE_CLASSES]:
raise ValueError(
f"Cannot create two Role classes for the same `resource_model`: {resource_model.__name__}"
)
ROLE_CLASSES.append(
{
"user_model": user_model,
"resource_model": resource_model,
}
)
resource_name = _get_resource_name_lower(resource_model)
tablename = f"{resource_name}_roles"
unique_constraint = UniqueConstraint(f"{resource_name}_id", "user_id")
class ResourceRoleMixin:
choices = role_choices
__tablename__ = tablename
id = Column(Integer, primary_key=True)
name = Column(String())
__table_args__ = (unique_constraint,)
@validates("name")
def validate_name(self, _, name):
if name not in self.choices:
raise ValueError(
f"{name} Is not a valid choice for {self.__class__.__name__}"
)
return name
@declared_attr
def user_id(cls):
name, type = get_pk(user_model)
table_name = user_model.__tablename__
return Column(type, ForeignKey(f"{table_name}.{name}"))
@declared_attr
def user(cls):
return relationship(user_model.__name__, backref=tablename)
def __repr__(self):
return ""
@declared_attr
def named_resource_id(cls):
name, type = get_pk(resource_model)
table_name = resource_model.__tablename__
return Column(type, ForeignKey(f"{table_name}.{name}"))
@declared_attr
def named_resource(cls):
return relationship(resource_model.__name__, backref="roles")
@declared_attr
def resource(cls):
return synonym(resource_name)
setattr(ResourceRoleMixin, f"{resource_name}_id", named_resource_id)
setattr(ResourceRoleMixin, resource_name, named_resource)
setattr(ResourceRoleMixin, "resource", resource)
# Add the relationship between the user_model and the resource_model
resources = relationship(
resource_model.__name__,
secondary=tablename,
viewonly=True,
backref="users",
sync_backref=False,
)
# @Q: Do we try to pluralize this name correctly?
setattr(user_model, resource_name + "s", resources)
return ResourceRoleMixin
# ROLE HELPERS
def _get_resource_name_lower(resource_model):
return resource_model.__name__.lower()
def _check_valid_instance(*args, raise_error=True):
for instance in args:
valid = True
try:
object_mapper(instance)
except UnmappedInstanceError:
valid = False
if raise_error and not valid:
raise TypeError(f"Expected a mapped object instance; received: {instance}")
def _check_valid_model(*args, raise_error=True):
for model in args:
valid = True
try:
class_mapper(model)
except UnmappedClassError:
valid = False
if raise_error and not valid:
raise TypeError(f"Expected a model (mapped class); received: {model}")
def get_pk(model):
pks = inspect(model).primary_key
assert (
len(pks) == 1
), "sqlalchemy.roles2 only supports resources with 1 primary key field."
type = pks[0].type
name = pks[0].name
return (name, type)
def get_role_model_for_resource_model(resource_model):
_check_valid_model(resource_model)
return (
inspect(resource_model, raiseerr=True)
.relationships.get("roles")
.argument.class_
)
def get_user_roles(session, user, resource_model, resource_id=None):
"""Get a user's roles for all resources of a single resource type.
E.g., get all of a user's repositories and their role for each
repository.
Or optionally, all roles scoped to a specific resource_id.
:param session: SQLAlchemy session
:type session: sqlalchemy.orm.session.Session
:param user: user record (python object) of the SQLAlchemy user model \
associated with roles scoped to the supplied ``resource_model``
:param resource_id: (optional) the resource id for which to get the user's roles.
:return: list of the user's roles
"""
_check_valid_instance(user)
_check_valid_model(resource_model)
role_model = get_role_model_for_resource_model(resource_model)
resource_pk, _ = get_pk(resource_model)
roles = (
session.query(role_model)
.join(resource_model)
.filter(role_model.user == user)
.order_by(getattr(resource_model, resource_pk))
.order_by(role_model.name)
)
if resource_id:
roles = roles.filter(getattr(resource_model, resource_pk) == resource_id)
return roles.all()
# - Assign a user to an organization with a role
def add_user_role(session, user, resource, role_name, commit=False):
"""Add a user to a role for a specific resource.
:param session: SQLAlchemy session
:type session: sqlalchemy.orm.session.Session
:param user: user record (python object) to assign the role to
:param role_name: the name of the role to assign to the user
:type role_name: str
:param commit: flag to specify whether or not session should be committed after adding role; defaults to ``False``
:type commit: boolean
"""
_check_valid_instance(user, resource)
# get models
resource_model = type(resource)
role_model = get_role_model_for_resource_model(resource_model)
# create and save role
resource_name = _get_resource_name_lower(resource_model)
kwargs = {"name": role_name, resource_name: resource, "user": user}
new_role = role_model(**kwargs)
session.add(new_role)
if commit:
try:
session.commit()
except IntegrityError:
session.rollback()
raise Exception(
f"""Cannot assign user {user} to role {role_name} for
{resource_name} either because the assignment already exists."""
)
|
davidpelaez/oso | languages/python/sqlalchemy-oso/tests/test_roles.py | <filename>languages/python/sqlalchemy-oso/tests/test_roles.py<gh_stars>0
# Roles 2 tests
from polar.exceptions import ParserError
import pytest
import random
import string
import os
import timeit
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
from sqlalchemy.types import Integer, String
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.orm import relationship, sessionmaker, close_all_sessions
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_oso import authorized_sessionmaker, SQLAlchemyOso
from oso import OsoError
pg_host = os.environ.get("POSTGRES_HOST")
pg_port = os.environ.get("POSTGRES_PORT")
pg_user = os.environ.get("POSTGRES_USER")
pg_pass = os.environ.get("POSTGRES_PASSWORD")
databases = ["sqlite"]
if pg_host is not None:
databases.append("postgres")
@pytest.fixture(params=databases)
def engine(request):
if request.param == "postgres":
import psycopg2
# Create a new database to run the tests.
id = "".join(random.choice(string.ascii_lowercase) for i in range(10))
name = f"roles_test_{id}"
connect_string = "postgresql://"
kwargs = {"host": pg_host}
if pg_user is not None:
kwargs["user"] = pg_user
connect_string += pg_user
if pg_pass is not None:
kwargs["password"] = pg_pass
connect_string += ":" + pg_user
connect_string += "@" + pg_host
if pg_port is not None:
kwargs["port"] = pg_port
connect_string += ":" + pg_port
conn = psycopg2.connect(**kwargs)
conn.autocommit = True
cursor = conn.cursor()
cursor.execute(f"create database {name}")
conn.close()
# Run tests.
engine = create_engine(f"{connect_string}/{name}", poolclass=NullPool)
yield engine
engine.dispose()
close_all_sessions()
# Destroy database.
conn = psycopg2.connect(**kwargs)
conn.autocommit = True
cursor = conn.cursor()
cursor.execute(f"drop database if exists {name}")
conn.close()
elif request.param == "sqlite":
engine = create_engine("sqlite:///:memory:")
yield engine
@pytest.fixture
def Base():
base = declarative_base(name="RoleBase")
return base
@pytest.fixture
def User(Base):
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String())
return User
@pytest.fixture
def Organization(Base):
class Organization(Base):
__tablename__ = "organizations"
id = Column(String(), primary_key=True)
return Organization
@pytest.fixture
def Repository(Base):
class Repository(Base):
__tablename__ = "repositories"
id = Column(String(), primary_key=True)
org_id = Column(String(), ForeignKey("organizations.id"), index=True)
org = relationship("Organization")
return Repository
@pytest.fixture
def Issue(Base):
class Issue(Base):
__tablename__ = "issues"
id = Column(String(), primary_key=True)
repo_id = Column(String(), ForeignKey("repositories.id"))
repo = relationship("Repository")
return Issue
@pytest.fixture
def init_oso(engine, Base, User, Organization, Repository, Issue):
# Initialize Oso and OsoRoles
# ---------------------------
Session = sessionmaker(bind=engine)
session = Session()
oso = SQLAlchemyOso(Base)
oso.enable_roles(User, Session)
# @NOTE: Right now this has to happen after enabling Oso Roles to get the
# tables.
Base.metadata.create_all(engine)
return (oso, session)
@pytest.fixture
def auth_sessionmaker(init_oso, engine):
oso, _ = init_oso
oso.actor = None
oso.checked_permissions = None
AuthSessionmaker = authorized_sessionmaker(
bind=engine,
get_oso=lambda: oso,
get_user=lambda: oso.actor,
get_checked_permissions=lambda: oso.checked_permissions,
)
return AuthSessionmaker
@pytest.fixture
def sample_data(init_oso, Organization, Repository, User, Issue):
_, session = init_oso
# Create sample data
# -------------------
apple = Organization(id="apple")
osohq = Organization(id="osohq")
ios = Repository(id="ios", org=apple)
oso_repo = Repository(id="oso", org=osohq)
demo_repo = Repository(id="demo", org=osohq)
ios_laggy = Issue(id="laggy", repo=ios)
oso_bug = Issue(id="bug", repo=oso_repo)
leina = User(name="leina")
steve = User(name="steve")
gabe = User(name="gabe")
objs = {
"leina": leina,
"steve": steve,
"gabe": gabe,
"apple": apple,
"osohq": osohq,
"ios": ios,
"oso_repo": oso_repo,
"demo_repo": demo_repo,
"ios_laggy": ios_laggy,
"oso_bug": oso_bug,
}
for obj in objs.values():
session.add(obj)
session.commit()
return objs
# TEST OsoRoles Initialization
# - Passing an auth session to OsoRoles raises an exception
# - Passing a session instead of Session factory to OsoRoles raises an exception
# - Passing a non-SQLAlchemy user model to OsoRoles raises an exception
# - Passing a bad declarative_base to OsoRoles raises an exception
def test_oso_roles_init(engine, auth_sessionmaker, Base, User):
oso = SQLAlchemyOso(Base)
# - Passing an auth session to OsoRoles raises an exception
with pytest.raises(OsoError):
oso.enable_roles(
user_model=User,
session_maker=auth_sessionmaker,
)
Session = sessionmaker(bind=engine)
session = Session()
# - Passing a session instead of Session factory to OsoRoles raises an exception
with pytest.raises(AttributeError):
oso.enable_roles(User, session)
class FakeClass:
pass
# - Passing a non-SQLAlchemy user model to OsoRoles raises an exception
with pytest.raises(TypeError):
oso.enable_roles(FakeClass, Session)
# - Passing a bad declarative_base to OsoRoles raises an exception
with pytest.raises(AttributeError):
SQLAlchemyOso(FakeClass)
# - Calling a roles-specific method before calling `enable_roles` fails
with pytest.raises(OsoError):
oso.roles.synchronize_data()
# TEST RESOURCE CONFIGURATION
# Role declaration:
# - [x] duplicate role name throws an error
# - [x] defining role with no permissions/implications throws an error
# Role-permission assignment:
# - [x] duplicate permission throws an error
# - [x] assigning permission that wasn't declared throws an error
# - [x] assigning permission with bad namespace throws an error
# - [x] assigning permission without valid relationship throws an error
# - [x] assigning permission on related role type errors if role exists for permission resource
# - [x] assigning the same permission to two roles where one implies the other throws an error
# Role implications:
# - [x] implying role that wasn't declared throws an error
# - [x] implying role without valid relationship throws an error
# Resource predicate:
# - [x] only define roles, no actions (role has actions/implications from different resource)
# - [x] only define actions, not roles
# - [x] using resource predicate with incorrect arity throws an error
# - [x] using resource predicate without defining actions/roles throws an error
# - [x] using resource predicate with field types throws an error
# - [x] duplicate resource name throws an error
# Role allows:
# - [ ] calling `roles.configure()` without calling `Roles.role_allows()` from policy issues warning
# TODO write test
# Relationships:
# - [x] multiple dot lookups throws an error for now
# - [x] nonexistent attribute lookup throws an error for now
# - [x] relationships without resource definition throws an error
def test_empty_role(init_oso):
# defining role with no permissions/implications throws an error
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {}
};
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
pass
def test_bad_namespace_perm(init_oso):
# - assigning permission with bad namespace throws an error
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
permissions: ["repo:pull"]
}
};
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
# TODO
def test_resource_with_roles_no_actions(init_oso, sample_data):
# - only define roles, no actions (role has actions/implications from different resource)
oso, session = init_oso
policy = """
resource(_type: Organization, "org", _, roles) if
roles = {
member: {
implies: ["repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
leina = sample_data["leina"]
steve = sample_data["steve"]
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
oso.roles.assign_role(leina, osohq, "member", session)
oso.roles.assign_role(steve, oso_repo, "reader", session)
session.commit()
assert oso.is_allowed(leina, "pull", oso_repo)
assert oso.is_allowed(steve, "pull", oso_repo)
def test_duplicate_resource_name(init_oso):
# - duplicate resource name throws an error
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
# DUPLICATE RESOURCE NAME "org"
resource(_type: Repository, "org", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
}
};
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_nested_dot_relationship(init_oso):
# - multiple dot lookups throws an error for now
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Issue, "issue", actions, _roles) if
actions = [
"edit"
];
parent_child(parent_org: Organization, issue: Issue) if
issue.repo.org = parent_org;
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_bad_relationship_lookup(init_oso):
# - nonexistent attribute lookup throws an error for now
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Repository, "repo", actions, _) if
actions = [
"pull"
];
parent_child(parent_org: Organization, repo: Repository) if
# INCORRECT FIELD NAME
repo.organization = parent_org;
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_relationship_without_specializer(init_oso):
oso, session = init_oso
policy = """
resource(_type: Repository, "repo", actions, _) if
actions = [
"pull"
];
parent_child(parent_org: Organization, repo) if
repo.org = parent_org;
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_relationship_without_resources(init_oso):
oso, session = init_oso
policy = """
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_duplicate_role_name_same_resource(init_oso):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite", "create_repo"
] and
roles = {
owner: {
permissions: ["invite"],
implies: ["member", "repo:member"]
},
owner: {
permissions: ["create_repo"]
}
};
"""
with pytest.raises(ParserError):
oso.load_str(policy)
def test_duplicate_role_name_different_resources(init_oso, sample_data):
# duplicate role name throws an error
# Organization and Repository resources both have role named "member"
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite", "create_repo"
] and
roles = {
owner: {
permissions: ["invite"],
implies: ["member", "repo:member"]
},
member: {
permissions: ["create_repo"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
member: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
leina = sample_data["leina"]
steve = sample_data["steve"]
gabe = sample_data["gabe"]
oso.roles.assign_role(leina, osohq, "owner", session)
oso.roles.assign_role(steve, oso_repo, "member", session)
oso.roles.assign_role(gabe, osohq, "member", session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "pull", oso_repo)
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "pull", oso_repo)
assert not oso.is_allowed(gabe, "invite", osohq)
assert oso.is_allowed(gabe, "create_repo", osohq)
assert not oso.is_allowed(gabe, "pull", oso_repo)
def test_resource_actions(init_oso):
# only define actions, not roles
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, _roles) if
actions = [
"invite"
];
"""
oso.load_str(policy)
oso.roles.synchronize_data()
def test_duplicate_action(init_oso):
# - duplicate action
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, _roles) if
actions = [
"invite",
"invite"
];
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_undeclared_permission(init_oso):
# - assign permission that wasn't declared
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
permissions: ["create_repo"]
}
};
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_undeclared_role(init_oso):
# - imply role that wasn't declared
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
implies: ["fake_role"]
}
};
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_role_implication_without_relationship(init_oso):
# - imply role without valid relationship
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
implies: ["repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
}
};
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_role_permission_without_relationship(init_oso):
# - assign permission without valid relationship
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
permissions: ["repo:push"]
}
};
resource(_type: Repository, "repo", actions, _roles) if
actions = [
"push",
"pull"
];
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_invalid_role_permission(init_oso):
# assigning permission on related role type errors if role exists for permission resource
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
# THIS IS NOT ALLOWED
permissions: ["repo:push"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["push"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_permission_assignment_to_implied_role(init_oso):
# assigning the same permission to two roles where one implies the other throws an error
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite"
] and
roles = {
member: {
permissions: ["invite"]
},
owner: {
permissions: ["invite"],
implies: ["org:member"]
}
};
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_incorrect_arity_resource(init_oso):
# - use resource predicate with incorrect arity
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions) if
actions = [
"invite"
];
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_undefined_resource_arguments(init_oso):
# - use resource predicate without defining actions/roles
oso, session = init_oso
policy = """
resource(_type: Organization, "org", _actions, _roles);
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
def test_wrong_type_resource_arguments(init_oso):
# - use resource predicate with field types
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
# incorrect key name
actions: ["invite"]
}
};
"""
oso.load_str(policy)
with pytest.raises(OsoError):
oso.roles.synchronize_data()
# TEST CHECK API
# Homogeneous role-permission assignment:
# - [x] Adding a permission of same resource type to a role grants assignee access
# - [x] Modifying a permission of same resource type on a role modifies assignee access
# - [x] Removing a permission of same resource type from a role revokes assignee access
# Parent->child role-permission assignment:
# - [x] Adding a permission of child resource type to a role grants assignee access
# - [x] Removing a permission of child resource type from a role revokes assignee access
# Grandparent->child role-permission assignment:
# - [x] Adding a permission of grandchild resource type to a role grants assignee access
# - [x] Removing a permission of grandchild resource type from a role revokes assignee access
# Homogeneous role implications:
# - [x] Adding a role implication of same resource type to a role grants assignee access
# - [x] Removing a role implication of same resource type from a role revokes assignee access
# Parent->child role implications:
# - [x] Adding a role implication of child resource type to a role grants assignee access to child
# - [x] Removing a role implication of child resource type from a role revokes assignee access to child
# Grandparent->child role implications:
# - [x] Adding a role implication of grandchild resource type to a role grants assignee access to grandchild
# without intermediate parent resource
# Chained role implications:
# - [x] Adding a role implication from grandparent->parent->child resource role types grants assignee of grandparent role
# access to grandchild resource
# Overlapping role assignments:
# - [x] Assigning a more permissive and less permissive role to the same user grants most permissive access
# Overlapping role assignments:
def test_overlapping_permissions(init_oso, sample_data):
# - Assigning a more permissive and less permissive role to the same user grants most permissive access
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
},
writer: {
# writer is more permissive than reader
permissions: ["push"],
implies: ["reader"]
}
};
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
leina = sample_data["leina"]
steve = sample_data["steve"]
# writer is more permissive than member
oso.roles.assign_role(leina, osohq, "member")
oso.roles.assign_role(steve, osohq, "member")
oso.roles.assign_role(leina, oso_repo, "writer")
assert oso.is_allowed(leina, "pull", oso_repo)
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "push", oso_repo)
assert oso.is_allowed(steve, "pull", oso_repo)
assert oso.is_allowed(steve, "invite", osohq)
assert not oso.is_allowed(steve, "push", oso_repo)
# Homogeneous role-permission assignment:
def test_homogeneous_role_perm(init_oso, sample_data):
# - Adding a permission of same resource type to a role grants assignee access
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert not oso.is_allowed(steve, "invite", osohq)
# - Removing a permission of same resource type from a role revokes assignee access
new_policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite", "list_repos"] and
roles = {
member: {
# REMOVE INVITE AND ADD LIST_REPOS
permissions: ["list_repos"]
}
};
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.roles.config = None
oso.load_str(new_policy)
oso.roles.synchronize_data()
assert not oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "list_repos", osohq)
assert not oso.is_allowed(steve, "list_repos", osohq)
# Parent->child role-permission assignment:
def test_parent_child_role_perm(init_oso, sample_data):
# - Adding a permission of child resource type to a role grants assignee access
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite", "repo:pull"]
}
};
resource(_type: Repository, "repo", actions, _roles) if
actions = [
"push",
"pull"
];
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
ios = sample_data["ios"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "pull", oso_repo)
assert not oso.is_allowed(leina, "pull", ios)
assert not oso.is_allowed(steve, "pull", oso_repo)
# - Removing a permission of child resource type from a role revokes assignee access
new_policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Repository, "repo", actions, _roles) if
actions = [
"push",
"pull"
];
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.roles.config = None
oso.load_str(new_policy)
oso.roles.synchronize_data()
assert not oso.is_allowed(leina, "pull", oso_repo)
assert oso.is_allowed(leina, "invite", osohq)
# Grandparent->child role-permission assignment:
def test_grandparent_child_role_perm(init_oso, sample_data):
# - Adding a permission of grandchild resource type to a role grants assignee access (without intermediate resource)
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["list_repos", "invite"] and
roles = {
member: {
permissions: ["list_repos", "issue:edit"]
},
owner: {
permissions: ["invite"],
implies: ["member"]
}
};
resource(_type: Issue, "issue", actions, _) if
actions = [
"edit"
];
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
parent_child(parent_repo: Repository, issue: Issue) if
issue.repo = parent_repo;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.config = None
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
oso_bug = sample_data["oso_bug"]
ios_laggy = sample_data["ios_laggy"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "list_repos", osohq)
assert oso.is_allowed(leina, "edit", oso_bug)
assert not oso.is_allowed(leina, "edit", ios_laggy)
assert not oso.is_allowed(leina, "invite", osohq)
assert not oso.is_allowed(steve, "edit", oso_bug)
oso.roles.assign_role(steve, osohq, "owner", session=session)
session.commit()
assert oso.is_allowed(steve, "edit", oso_bug)
assert oso.is_allowed(steve, "list_repos", osohq)
assert oso.is_allowed(steve, "invite", osohq)
# - Removing a permission of grandchild resource type from a role revokes assignee access
new_policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Issue, "issue", actions, _) if
actions = [
"edit"
];
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
parent_child(parent_repo: Repository, issue: Issue) if
issue.repo = parent_repo;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.load_str(new_policy)
oso.roles.config = None
oso.roles.synchronize_data()
assert not oso.is_allowed(leina, "edit", oso_bug)
assert oso.is_allowed(leina, "invite", osohq)
# Homogeneous role implications:
def test_homogeneous_role_implication(init_oso, sample_data):
# - Adding a role implication of same resource type to a role grants assignee access
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
},
owner: {
implies: ["member"]
}
};
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.config = None
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
assert not oso.is_allowed(leina, "invite", osohq)
oso.roles.assign_role(leina, osohq, "member", session=session)
oso.roles.assign_role(steve, osohq, "owner", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert not oso.is_allowed(leina, "invite", apple)
assert oso.is_allowed(steve, "invite", osohq)
assert not oso.is_allowed(steve, "invite", apple)
# - Removing a role implication of same resource type from a role revokes assignee access
new_policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite", "list_repos"] and
roles = {
member: {
permissions: ["invite"]
},
owner: {
# REMOVE "implies"
permissions: ["list_repos"]
}
};
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.load_str(new_policy)
oso.roles.config = None
oso.roles.synchronize_data()
# leina can still "invite"
assert oso.is_allowed(leina, "invite", osohq)
# steve can't "invite"
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "list_repos", osohq)
# Parent->child role implications:
def test_parent_child_role_implication(init_oso, sample_data):
# - Adding a role implication of child resource type to a role grants assignee access to child
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
ios = sample_data["ios"]
leina = sample_data["leina"]
steve = sample_data["steve"]
# member implies reader which has the "pull" permission
oso.roles.assign_role(leina, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "pull", oso_repo)
assert not oso.is_allowed(leina, "pull", ios)
assert not oso.is_allowed(steve, "pull", oso_repo)
# - Removing a role implication of child resource type from a role revokes assignee access to child
new_policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Repository, "repo", actions, _roles) if
actions = [
"push",
"pull"
];
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.load_str(new_policy)
oso.roles.config = None
oso.roles.synchronize_data()
assert not oso.is_allowed(leina, "pull", oso_repo)
assert oso.is_allowed(leina, "invite", osohq)
# Grandparent->child role implications:
def test_grandparent_child_role_implication(init_oso, sample_data):
# - Adding a role implication of grandchild resource type to a role grants assignee access to grandchild
# without intermediate parent resource
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["issue:editor"]
}
};
resource(_type: Issue, "issue", actions, roles) if
actions = [
"edit"
] and
roles = {
editor: {
permissions: ["edit"]
}
};
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
parent_child(parent_repo: Repository, issue: Issue) if
issue.repo = parent_repo;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
oso_bug = sample_data["oso_bug"]
ios_laggy = sample_data["ios_laggy"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "edit", oso_bug)
assert not oso.is_allowed(leina, "edit", ios_laggy)
assert not oso.is_allowed(steve, "edit", oso_bug)
# - Removing a permission of grandchild resource type from a role revokes assignee access
new_policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Issue, "issue", actions, roles) if
actions = [
"edit"
] and
roles = {
editor: {
permissions: ["edit"]
}
};
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
parent_child(parent_repo: Repository, issue: Issue) if
issue.repo = parent_repo;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.roles.config = None
oso.load_str(new_policy)
oso.roles.synchronize_data()
assert not oso.is_allowed(leina, "edit", oso_bug)
assert oso.is_allowed(leina, "invite", osohq)
def test_chained_role_implication(init_oso, sample_data):
# - Adding a role implication from grandparent->parent->child resource role types grants assignee of grandparent
# role access to grandchild resource
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"],
implies: ["issue:editor"]
}
};
resource(_type: Issue, "issue", actions, roles) if
actions = [
"edit"
] and
roles = {
editor: {
permissions: ["edit"]
}
};
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
parent_child(parent_repo: Repository, issue: Issue) if
issue.repo = parent_repo;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
oso_bug = sample_data["oso_bug"]
ios_laggy = sample_data["ios_laggy"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session=session)
oso.roles.assign_role(steve, oso_repo, "reader", session=session)
session.commit()
# leina can invite to the org, pull from the repo, and edit the issue
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(steve, "pull", oso_repo)
assert oso.is_allowed(leina, "edit", oso_bug)
assert not oso.is_allowed(leina, "edit", ios_laggy)
# steve can pull from the repo and edit the issue, but can NOT invite to the org
assert oso.is_allowed(steve, "pull", oso_repo)
assert oso.is_allowed(steve, "edit", oso_bug)
assert not oso.is_allowed(steve, "edit", ios_laggy)
assert not oso.is_allowed(steve, "invite", osohq)
# - Removing a role implication from grandparent->parent->child resource role types revokes assignee of grandparent
# role access to grandchild resource
new_policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
reader: {
permissions: ["pull"],
implies: ["issue:editor"]
}
};
resource(_type: Issue, "issue", actions, roles) if
actions = [
"edit"
] and
roles = {
editor: {
permissions: ["edit"]
}
};
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
parent_child(parent_repo: Repository, issue: Issue) if
issue.repo = parent_repo;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.clear_rules()
oso.roles.config = None
oso.load_str(new_policy)
oso.roles.synchronize_data()
# leina can't edit the issue anymore
assert not oso.is_allowed(leina, "edit", oso_bug)
assert oso.is_allowed(leina, "invite", osohq)
# steve can still edit the issue
assert oso.is_allowed(steve, "edit", oso_bug)
# TEST WRITE API
# User-role assignment:
# - [x] Adding user-role assignment grants access
# - [x] Removing user-role assignment revokes access
# - [x] Assigning/removing non-existent role throws an error
# - [x] Removing user from a role they aren't assigned throws an error
# - [x] Assigning to role with wrong resource type throws an error
# - [x] Reassigning user role throws error if `reassign=False`
def test_assign_role_wrong_resource_type(init_oso, sample_data):
# - Assigning to role with wrong resource type throws an error
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
oso_repo = sample_data["oso_repo"]
leina = sample_data["leina"]
with pytest.raises(OsoError):
oso.roles.assign_role(leina, oso_repo, "member", session=session)
def test_assign_remove_nonexistent_role(init_oso, sample_data):
# - Assigning/removing non-existent role throws an error
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
with pytest.raises(OsoError):
oso.roles.assign_role(leina, osohq, "owner", session=session)
with pytest.raises(OsoError):
oso.roles.remove_role(leina, osohq, "owner", session=session)
def test_remove_unassigned_role(init_oso, sample_data):
# - Removing role that user doesn't have returns false
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
removed = oso.roles.remove_role(leina, osohq, "member", session=session)
assert not removed
def test_assign_remove_user_role(init_oso, sample_data):
# - Adding user-role assignment grants access
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite", "list_repos"] and
roles = {
member: {
permissions: ["invite"]
},
owner: {
permissions: ["list_repos"]
}
};
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session=session)
session.commit()
# Assign leina member role
leina_roles = (
session.query(oso.roles.UserRole)
.filter(oso.roles.UserRole.user_id == leina.id)
.all()
)
assert len(leina_roles) == 1
assert leina_roles[0].role == "org:member"
# Assign steve owner role
oso.roles.assign_role(steve, osohq, "owner", session=session)
session.commit()
steve_roles = (
session.query(oso.roles.UserRole)
.filter(oso.roles.UserRole.user_id == steve.id)
.all()
)
assert len(steve_roles) == 1
assert steve_roles[0].role == "org:owner"
assert oso.is_allowed(leina, "invite", osohq)
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "list_repos", osohq)
# - Removing user-role assignment revokes access
removed = oso.roles.remove_role(leina, osohq, "member", session=session)
session.commit()
assert removed
leina_roles = (
session.query(oso.roles.UserRole)
.filter(oso.roles.UserRole.user_id == leina.id)
.all()
)
assert len(leina_roles) == 0
# make sure steve still has his role
steve_roles = (
session.query(oso.roles.UserRole)
.filter(oso.roles.UserRole.user_id == steve.id)
.all()
)
assert len(steve_roles) == 1
assert steve_roles[0].role == "org:owner"
assert not oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(steve, "list_repos", osohq)
def test_reassign_user_role(init_oso, sample_data):
# - Implied roles for the same resource type are mutually exclusive on user-role assignment
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite", "list_repos"] and
roles = {
member: {
permissions: ["invite"]
},
owner: {
permissions: ["list_repos"],
implies: ["member", "repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session)
session.commit()
leina_roles = (
session.query(oso.roles.UserRole)
.filter(oso.roles.UserRole.user_id == leina.id)
.all()
)
assert len(leina_roles) == 1
assert leina_roles[0].role == "org:member"
oso.roles.assign_role(steve, osohq, "owner", session)
session.commit()
steve_roles = (
session.query(oso.roles.UserRole)
.filter(oso.roles.UserRole.user_id == steve.id)
.all()
)
assert len(steve_roles) == 1
assert steve_roles[0].role == "org:owner"
# reassigning with reassign=False throws an error
with pytest.raises(OsoError):
oso.roles.assign_role(leina, osohq, "owner", reassign=False)
# reassign with reassign=True
oso.roles.assign_role(leina, osohq, "owner", session)
session.commit()
leina_roles = (
session.query(oso.roles.UserRole)
.filter(oso.roles.UserRole.user_id == leina.id)
.all()
)
assert len(leina_roles) == 1
assert leina_roles[0].role == "org:owner"
# TEST DATA FILTERING
# - [x] `role_allows` with another rule that produces false filter (implicit OR)
# - [x] `role_allows` inside of an `OR` with another expression
# - [x] `role_allows` inside of an `AND` with another expression
# - [x] `role_allows` inside of a `not` (this probably won't work, so need error handling)
def test_authorizing_related_fields(
init_oso, sample_data, auth_sessionmaker, Organization, Repository
):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite", "read"] and
roles = {
member: {
permissions: ["invite", "read"],
implies: ["repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
steve = sample_data["steve"]
oso.roles.assign_role(steve, osohq, "member", session)
session.commit()
oso.actor = steve
oso.checked_permissions = {Repository: "pull"}
results = auth_sessionmaker().query(Repository).all()
assert len(results) == 2
assert results[0].org is None
oso.checked_permissions = {Organization: "read", Repository: "pull"}
results = auth_sessionmaker().query(Repository).all()
assert len(results) == 2
assert results[0].org.id == osohq.id
def test_data_filtering_role_allows_not(
init_oso, sample_data, auth_sessionmaker, Organization
):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
allow(actor, action, resource) if
not Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session=session)
oso.roles.assign_role(steve, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert not oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "invite", apple)
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "invite", apple)
oso.actor = leina
oso.checked_permissions = {Organization: "invite"}
auth_session = auth_sessionmaker()
with pytest.raises(OsoError):
auth_session.query(Organization).all()
def test_data_filtering_role_allows_and(
init_oso, sample_data, auth_sessionmaker, User, Organization
):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource) and
resource.id = "osohq";
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session=session)
oso.roles.assign_role(leina, apple, "member", session=session)
oso.roles.assign_role(steve, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(steve, "invite", osohq)
assert not oso.is_allowed(leina, "invite", apple)
oso.actor = leina
oso.checked_permissions = {Organization: "invite"}
auth_session = auth_sessionmaker()
results = auth_session.query(Organization).all()
assert len(results) == 1
oso.actor = steve
oso.checked_permissions = {Organization: "invite", User: "invite"}
auth_session = auth_sessionmaker()
results = auth_session.query(User).all()
assert len(results) == 0
def test_data_filtering_role_allows_explicit_or(
init_oso, sample_data, auth_sessionmaker, User, Organization, Repository
):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource) or
resource.id = "osohq";
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(steve, apple, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "invite", apple)
oso.actor = steve
oso.checked_permissions = {Organization: "invite"}
auth_session = auth_sessionmaker()
results = auth_session.query(Organization).all()
assert len(results) == 2
oso.actor = steve
oso.checked_permissions = {Repository: "pull"}
auth_session = auth_sessionmaker()
results = auth_session.query(Repository).all()
assert len(results) == 1
assert results[0].org_id == "apple"
oso.actor = leina
oso.checked_permissions = {Organization: "invite", User: "invite"}
auth_session = auth_sessionmaker()
results = auth_session.query(Organization).all()
assert len(results) == 1
def test_data_filtering_role_allows_implicit_or(
init_oso, sample_data, auth_sessionmaker, User, Organization
):
# Ensure that the filter produced by `Roles.role_allows()` is not AND-ed
# with a false filter produced by a separate `allow()` rule.
oso, session = init_oso
policy = """
# Users can read their own data.
allow(user: User, "read", user);
resource(_type: Organization, "org", actions, roles) if
actions = ["read"] and
roles = {
member: {
permissions: ["read"]
}
};
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
oso.roles.assign_role(leina, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "read", leina)
oso.actor = leina
oso.checked_permissions = {Organization: "read", User: "read"}
auth_session = auth_sessionmaker()
results = auth_session.query(Organization).all()
assert len(results) == 1
results = auth_session.query(User).all()
assert len(results) == 1
def test_data_filtering_actor_can_assume_role_not(
init_oso, sample_data, auth_sessionmaker, Organization
):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"]
}
};
allow(actor, _action, resource) if
not Roles.actor_can_assume_role(actor, "member", resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session=session)
oso.roles.assign_role(steve, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert not oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "invite", apple)
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "invite", apple)
oso.actor = leina
oso.checked_permissions = {Organization: "invite"}
auth_session = auth_sessionmaker()
with pytest.raises(OsoError):
auth_session.query(Organization).all()
def test_data_filtering_actor_can_assume_role_and(
init_oso, sample_data, auth_sessionmaker, User, Organization
):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
allow(actor, _action, resource) if
Roles.actor_can_assume_role(actor, "member", resource) and
resource.id = "osohq";
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(leina, osohq, "member", session=session)
oso.roles.assign_role(leina, apple, "member", session=session)
oso.roles.assign_role(steve, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(steve, "invite", osohq)
assert not oso.is_allowed(leina, "invite", apple)
oso.actor = leina
oso.checked_permissions = {Organization: "invite"}
auth_session = auth_sessionmaker()
results = auth_session.query(Organization).all()
assert len(results) == 1
oso.actor = steve
oso.checked_permissions = {User: "invite"}
auth_session = auth_sessionmaker()
results = auth_session.query(User).all()
assert len(results) == 0
def test_data_filtering_actor_can_assume_role_explicit_or(
init_oso, sample_data, auth_sessionmaker, User, Organization, Repository
):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite"] and
roles = {
member: {
permissions: ["invite"],
implies: ["repo:reader"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
allow(actor, _, resource) if
Roles.actor_can_assume_role(actor, "member", resource) or
resource.id = "osohq";
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
apple = sample_data["apple"]
leina = sample_data["leina"]
steve = sample_data["steve"]
oso.roles.assign_role(steve, apple, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "invite", apple)
oso.actor = steve
oso.checked_permissions = {Organization: "invite"}
auth_session = auth_sessionmaker()
results = auth_session.query(Organization).all()
assert len(results) == 2
oso.actor = steve
oso.checked_permissions = {Repository: "pull"}
auth_session = auth_sessionmaker()
results = auth_session.query(Repository).all()
assert len(results) == 1
assert results[0].org_id == "apple"
oso.actor = leina
oso.checked_permissions = {Organization: "invite"}
auth_session = auth_sessionmaker()
results = auth_session.query(Organization).all()
assert len(results) == 1
def test_data_filtering_actor_can_assume_role_implicit_or(
init_oso, sample_data, auth_sessionmaker, User, Organization
):
# Ensure that the filter produced by `Roles.role_allows()` is not AND-ed
# with a false filter produced by a separate `allow()` rule.
oso, session = init_oso
policy = """
# Users can read their own data.
allow(user: User, "read", user);
resource(_type: Organization, "org", actions, roles) if
actions = ["read"] and
roles = {
member: {
permissions: ["read"]
}
};
allow(actor, _action, resource) if
Roles.actor_can_assume_role(actor, "member", resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
oso.roles.assign_role(leina, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "read", leina)
oso.actor = leina
oso.checked_permissions = {Organization: "read", User: "read"}
auth_session = auth_sessionmaker()
results = auth_session.query(Organization).all()
assert len(results) == 1
results = auth_session.query(User).all()
assert len(results) == 1
def test_data_filtering_combo(
init_oso, sample_data, auth_sessionmaker, User, Organization
):
oso, session = init_oso
policy = """
# Users can read their own data.
allow(user: User, "read", user);
resource(_type: Organization, "org", actions, roles) if
actions = ["read"] and
roles = {
member: {
permissions: ["read"]
}
};
allow(actor, action, resource) if
role_allows = Roles.role_allows(actor, action, resource) and
actor_can_assume_role = Roles.actor_can_assume_role(actor, "member", resource) and
role_allows and actor_can_assume_role;
"""
# You can't directly `and` the two Roles calls right now but it does work if you do it like ^
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
oso.roles.assign_role(leina, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "read", leina)
oso.actor = leina
oso.checked_permissions = {Organization: "read"}
auth_session = auth_sessionmaker()
# TODO: for now this will error
with pytest.raises(OsoError):
auth_session.query(Organization).all()
# TEST READ API
# - [ ] Test getting all roles for a resource
# - [ ] Test getting all role assignments for a resource
def test_read_api(init_oso, sample_data, Repository, Organization):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = ["invite", "list_repos"] and
roles = {
member: {
permissions: ["list_repos"]
},
owner: {
permissions: ["invite"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
ios = sample_data["ios"]
leina = sample_data["leina"]
steve = sample_data["steve"]
# - [ ] Test getting all roles for a resource
repo_roles = oso.roles.for_resource(Repository, session)
assert len(repo_roles) == 1
assert repo_roles[0] == "reader"
org_roles = oso.roles.for_resource(Organization, session)
assert len(org_roles) == 2
assert "member" in org_roles
assert "owner" in org_roles
# - [ ] Test getting all role assignments for a resource
oso.roles.assign_role(leina, osohq, "member", session=session)
oso.roles.assign_role(leina, oso_repo, "reader", session=session)
oso.roles.assign_role(steve, osohq, "owner", session=session)
oso.roles.assign_role(steve, ios, "reader", session=session)
session.commit()
osohq_assignments = oso.roles.assignments_for_resource(osohq, session)
assert len(osohq_assignments) == 2
oso_repo_assignments = oso.roles.assignments_for_resource(oso_repo, session)
assert len(oso_repo_assignments) == 1
ios_assignments = oso.roles.assignments_for_resource(ios, session)
assert len(ios_assignments) == 1
leina_assignments = oso.roles.assignments_for_user(leina, session)
assert len(leina_assignments) == 2
steve_assignments = oso.roles.assignments_for_user(steve, session)
assert len(steve_assignments) == 2
def test_actor_can_assume_role(
init_oso, sample_data, Repository, Organization, auth_sessionmaker
):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", _actions, roles) if
roles = {
member: {
implies: ["repo:reader"]
},
owner: {
implies: ["member"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = ["pull"] and
roles = {
reader: {
permissions: ["pull"]
}
};
parent_child(parent_org: Organization, repo: Repository) if
repo.org = parent_org;
allow(actor, "read", repo: Repository) if
Roles.actor_can_assume_role(actor, "reader", repo);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
oso_repo = sample_data["oso_repo"]
leina = sample_data["leina"]
steve = sample_data["steve"]
gabe = sample_data["gabe"]
oso.roles.assign_role(leina, osohq, "member")
oso.roles.assign_role(steve, oso_repo, "reader")
# Without data filtering
assert oso.is_allowed(leina, "read", oso_repo)
assert oso.is_allowed(steve, "read", oso_repo)
assert not oso.is_allowed(gabe, "read", oso_repo)
# With data filtering
oso.actor = leina
oso.checked_permissions = {Repository: "read"}
auth_session = auth_sessionmaker()
results = auth_session.query(Repository).all()
assert len(results) == 2
for repo in results:
assert repo.org_id == "osohq"
def test_mismatched_id_types_throws_error(engine, Base, User):
class One(Base):
__tablename__ = "ones"
id = Column(String(), primary_key=True)
class Two(Base):
__tablename__ = "twos"
id = Column(Integer(), primary_key=True)
Session = sessionmaker(bind=engine)
oso = SQLAlchemyOso(Base)
with pytest.raises(OsoError):
oso.enable_roles(User, Session)
def test_enable_roles_twice(engine, Base, User):
class One(Base):
__tablename__ = "ones"
id = Column(Integer(), primary_key=True)
Session = sessionmaker(bind=engine)
oso = SQLAlchemyOso(Base)
oso.enable_roles(User, Session)
with pytest.raises(OsoError):
oso.enable_roles(User, Session)
def test_global_declarative_base(engine, Base, User):
"""Test two different Osos & two different OsoRoles but a shared
declarative_base(). This shouldn't error."""
class One(Base):
__tablename__ = "ones"
id = Column(Integer(), primary_key=True)
Session = sessionmaker(bind=engine)
oso = SQLAlchemyOso(Base)
oso.enable_roles(User, Session)
oso2 = SQLAlchemyOso(Base)
oso2.enable_roles(User, Session)
@pytest.mark.parametrize("sa_type,one_id", [(String, "1"), (Integer, 1)])
def test_id_types(engine, Base, User, sa_type, one_id):
class One(Base):
__tablename__ = "ones"
id = Column(sa_type(), primary_key=True)
class Two(Base):
__tablename__ = "twos"
id = Column(sa_type(), primary_key=True)
Session = sessionmaker(bind=engine)
session = Session()
oso = SQLAlchemyOso(Base)
oso.enable_roles(User, Session)
Base.metadata.create_all(engine)
policy = """
resource(_type: One, "one", ["read"], {boss: {permissions: ["read"]}});
resource(_type: Two, "two", ["read"], _roles);
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
steve = User(name="steve")
one = One(id=one_id)
session.add(steve)
session.add(one)
session.commit()
oso.roles.assign_role(steve, one, "boss")
session.commit()
assert oso.is_allowed(steve, "read", one)
def test_role_allows_with_other_rules(
init_oso, sample_data, auth_sessionmaker, User, Organization
):
oso, session = init_oso
policy = """
# Users can read their own data.
allow(user: User, "read", user);
resource(_type: Organization, "org", actions, roles) if
actions = ["read"] and
roles = {
member: {
permissions: ["read"]
}
};
allow(_, _, resource) if resource = 1;
allow(_, _, resource: Boolean) if resource;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
oso.roles.synchronize_data()
osohq = sample_data["osohq"]
leina = sample_data["leina"]
oso.roles.assign_role(leina, osohq, "member", session=session)
session.commit()
# This is just to ensure we don't modify the policy above.
assert oso.is_allowed(leina, "read", osohq)
assert oso.is_allowed(leina, "read", 1)
assert not oso.is_allowed(leina, "read", 2)
assert oso.is_allowed(leina, "read", True)
assert not oso.is_allowed(leina, "read", False)
# LEGACY TEST
def test_roles_integration(
init_oso, auth_sessionmaker, User, Organization, Repository, Issue
):
oso, session = init_oso
policy = """
resource(_type: Organization, "org", actions, roles) if
actions = [
"invite",
"create_repo"
] and
roles = {
member: {
permissions: ["create_repo"],
implies: ["repo:reader"]
},
owner: {
permissions: ["invite"],
implies: ["member", "repo:writer"]
}
};
resource(_type: Repository, "repo", actions, roles) if
actions = [
"push",
"pull"
] and
roles = {
writer: {
permissions: ["push", "issue:edit"],
implies: ["reader"]
},
reader: {
permissions: ["pull"]
}
};
resource(_type: Issue, "issue", actions, _) if
actions = [
"edit"
];
parent_child(parent_org: Organization, repository: Repository) if
repository.org = parent_org;
parent_child(parent_repo: Repository, issue: Issue) if
issue.repo = parent_repo;
allow(actor, action, resource) if
Roles.role_allows(actor, action, resource);
"""
oso.load_str(policy)
# tbd on the name for this, but this is what used to happy lazily.
# it reads the config from the policy and sets everything up.
oso.roles.synchronize_data()
# Create sample data
# -------------------
apple = Organization(id="apple")
osohq = Organization(id="osohq")
ios = Repository(id="ios", org=apple)
oso_repo = Repository(id="oso", org=osohq)
demo_repo = Repository(id="demo", org=osohq)
laggy = Issue(id="laggy", repo=ios)
bug = Issue(id="bug", repo=oso_repo)
leina = User(name="leina")
steve = User(name="steve")
gabe = User(name="gabe")
objs = [leina, steve, gabe, apple, osohq, ios, oso_repo, demo_repo, laggy, bug]
for obj in objs:
session.add(obj)
session.commit()
# @NOTE: Need the users and resources in the db before assigning roles
# so you have to call session.commit() first.
oso.roles.assign_role(leina, osohq, "owner", session=session)
oso.roles.assign_role(steve, osohq, "member", session=session)
session.commit()
assert oso.is_allowed(leina, "invite", osohq)
assert oso.is_allowed(leina, "create_repo", osohq)
assert oso.is_allowed(leina, "push", oso_repo)
assert oso.is_allowed(leina, "pull", oso_repo)
assert oso.is_allowed(leina, "edit", bug)
assert not oso.is_allowed(steve, "invite", osohq)
assert oso.is_allowed(steve, "create_repo", osohq)
assert not oso.is_allowed(steve, "push", oso_repo)
assert oso.is_allowed(steve, "pull", oso_repo)
assert not oso.is_allowed(steve, "edit", bug)
assert not oso.is_allowed(leina, "edit", laggy)
assert not oso.is_allowed(steve, "edit", laggy)
oso.actor = leina
oso.checked_permissions = {Repository: "pull"}
auth_session = auth_sessionmaker()
results = auth_session.query(Repository).all()
assert len(results) == 2
result_ids = [repo.id for repo in results]
assert oso_repo.id in result_ids
assert demo_repo.id in result_ids
assert ios.id not in result_ids
oso.actor = leina
oso.checked_permissions = {Issue: "edit"}
auth_session = auth_sessionmaker()
results = auth_session.query(Issue).all()
assert len(results) == 1
result_ids = [issue.id for issue in results]
assert bug.id in result_ids
assert not oso.is_allowed(gabe, "edit", bug)
oso.roles.assign_role(gabe, osohq, "member", session=session)
session.commit()
assert not oso.is_allowed(gabe, "edit", bug)
oso.roles.assign_role(gabe, osohq, "owner", session=session)
session.commit()
assert oso.is_allowed(gabe, "edit", bug)
oso.roles.assign_role(gabe, osohq, "member", session=session)
session.commit()
assert not oso.is_allowed(gabe, "edit", bug)
oso.roles.assign_role(gabe, osohq, "owner", session=session)
session.commit()
assert oso.is_allowed(gabe, "edit", bug)
oso.roles.remove_role(gabe, osohq, "owner", session=session)
session.commit()
assert not oso.is_allowed(gabe, "edit", bug)
org_roles = oso.roles.for_resource(Repository)
assert set(org_roles) == {"reader", "writer"}
oso_assignments = oso.roles.assignments_for_resource(osohq)
assert oso_assignments == [
{"user_id": leina.id, "role": "owner"},
{"user_id": steve.id, "role": "member"},
]
def test_perf_sqlalchemy(init_oso, sample_data, Repository):
oso, session = init_oso
# Test many direct roles
p = """
resource(_: Repository, "repo", actions, roles) if
actions = ["read", "write"] and
roles = {
reader: { permissions: ["read"] },
reader1: { permissions: ["read"] },
reader2: { permissions: ["read"] },
reader3: { permissions: ["read"] },
reader4: { permissions: ["read"] },
reader5: { permissions: ["read"] },
reader6: { permissions: ["read"] },
reader7: { permissions: ["read"] },
reader8: { permissions: ["read"] },
reader9: { permissions: ["read"] },
reader10: { permissions: ["read"] },
reader11: { permissions: ["read"] },
reader12: { permissions: ["read"] },
reader13: { permissions: ["read"] },
reader14: { permissions: ["read"] },
reader15: { permissions: ["read"] },
reader16: { permissions: ["read"] },
reader17: { permissions: ["read"] },
writer: { permissions: ["write"] }
};
allow(actor, action, resource) if Roles.role_allows(actor, action, resource);"""
# p = """resource(_: Repository, "repo", actions, roles) if
# actions = ["pull", "push"] and
# roles = {
# writer: {
# permissions: ["push"],
# implies: ["reader"]
# },
# reader: {
# permissions: ["pull"]
# }
# };
# parent_child(org: Organization, repo: Repository) if
# org = repo.organization;
# """
oso.load_str(p)
oso.roles.synchronize_data()
leina = sample_data["leina"]
# steve = sample_data["steve"]
osohq = sample_data["osohq"]
# oso_repo = sample_data["oso_repo"]
# Create 100 repositories
oso_repos = []
for i in range(100):
name = f"oso_repo_{i}"
repo = Repository(id=name, org=osohq)
oso_repos.append(repo)
session.add(repo)
session.commit()
for i in range(100):
oso.roles.assign_role(leina, oso_repos[i], "writer", session)
session.commit()
leina_roles = oso.roles.assignments_for_user(leina, session)
assert len(leina_roles) == 100
number = 10
time = timeit.timeit(
lambda: oso.is_allowed(leina, "write", oso_repos[99]), number=number
)
print(f"Executed in : {time/number*1000} ms\n Averaged over {number} repetitions.")
|
davidpelaez/oso | languages/python/oso/oso/enforcer.py | from typing import List, Any
from polar import Variable, exceptions
from .exceptions import NotFoundError, ForbiddenError
from .oso import Policy
class Enforcer:
"""
NOTE: This is a preview feature.
Exposes high-level enforcement APIs which can be used by apps to perform
resource-, request-, and query-level authorization.
"""
def __init__(self, policy: Policy, *, get_error=None, read_action="read"):
"""Create an Enforcer, which is used to enforce an Oso policy in an app.
:param get_error: Optionally override the method used to build errors
raised by the ``authorize`` and ``authorize_request``
methods. Should be a callable that takes one argument
``is_not_found`` and returns an exception.
:param read_action: The action used by the ``authorize`` method to
determine whether an authorization failure should
raise a ``NotFoundError`` or a ``ForbiddenError``
"""
self.policy = policy
if get_error is None:
self._get_error = self._default_get_error
else:
self._get_error = get_error
self.read_action = read_action
super().__init__()
def _default_get_error(self, is_not_found):
return NotFoundError() if is_not_found else ForbiddenError()
def authorize(self, actor, action, resource, *, check_read=True):
"""Ensure that ``actor`` is allowed to perform ``action`` on
``resource``.
If the action is permitted with an ``allow`` rule in the policy, then
this method returns ``None``. If the action is not permitted by the
policy, this method will raise an error.
The error raised by this method depends on whether the actor can perform
the ``"read"`` action on the resource. If they cannot read the resource,
then a ``NotFound`` error is raised. Otherwise, a ``ForbiddenError`` is
raised.
:param actor: The actor performing the request.
:param action: The action the actor is attempting to perform.
:param resource: The resource being accessed.
:param check_read: If set to ``False``, a ``ForbiddenError`` is always
thrown on authorization failures, regardless of whether the actor can
read the resource. Default is ``True``.
:type check_read: bool
"""
if not self.policy.query_rule_once("allow", actor, action, resource):
is_not_found = False
if action == self.read_action:
is_not_found = True
elif check_read and not self.policy.query_rule_once(
"allow", actor, self.read_action, resource
):
is_not_found = True
raise self._get_error(is_not_found)
def authorize_request(self, actor, request):
"""Ensure that ``actor`` is allowed to send ``request`` to the server.
Checks the ``allow_request`` rule of a policy.
If the request is permitted with an ``allow_request`` rule in the
policy, then this method returns ``None``. Otherwise, this method raises
a ``ForbiddenError``.
:param actor: The actor performing the request.
:param request: An object representing the request that was sent by the
actor.
"""
if not self.policy.query_rule_once("allow_request", actor, request):
raise self._get_error(False)
def authorized_actions(self, actor, resource, allow_wildcard=False) -> List[Any]:
"""Determine the actions ``actor`` is allowed to take on ``resource``.
Collects all actions allowed by allow rules in the Polar policy for the
given combination of actor and resource.
Identical to ``Oso.get_allowed_actions``.
:param actor: The actor for whom to collect allowed actions
:param resource: The resource being accessed
:param allow_wildcard: Flag to determine behavior if the policy \
includes a wildcard action. E.g., a rule allowing any action: \
``allow(_actor, _action, _resource)``. If ``True``, the method will \
return ``["*"]``, if ``False``, the method will raise an exception.
:type allow_wildcard: bool
:return: A list of the unique allowed actions.
"""
results = self.policy.query_rule("allow", actor, Variable("action"), resource)
actions = set()
for result in results:
action = result.get("bindings").get("action")
if isinstance(action, Variable):
if not allow_wildcard:
raise exceptions.OsoError(
"""The result of authorized_actions() contained an
"unconstrained" action that could represent any
action, but allow_wildcard was set to False. To fix,
set allow_wildcard to True and compare with the "*"
string."""
)
else:
return ["*"]
actions.add(action)
return list(actions)
def authorize_field(self, actor, action, resource, field):
"""Ensure that ``actor`` is allowed to perform ``action`` on a given
``resource``'s ``field``.
If the action is permitted by an ``allow_field`` rule in the policy,
then this method returns ``None``. If the action is not permitted by the
policy, this method will raise a ``ForbiddenError`.
:param actor: The actor performing the request.
:param action: The action the actor is attempting to perform on the
field.
:param resource: The resource being accessed.
:param field: The name of the field being accessed.
"""
if not self.policy.query_rule_once(
"allow_field", actor, action, resource, field
):
raise self._get_error(False)
def authorized_fields(
self, actor, action, resource, allow_wildcard=False
) -> List[Any]:
"""Determine the fields of ``resource`` on which ``actor`` is allowed to
perform ``action``.
Uses ``allow_field`` rules in the policy to find all allowed fields.
:param actor: The actor for whom to collect allowed fields.
:param action: The action being taken on the field.
:param resource: The resource being accessed.
:param allow_wildcard: Flag to determine behavior if the policy \
includes a wildcard field. E.g., a rule allowing any field: \
``allow_field(_actor, _action, _resource, _field)``. If ``True``, the \
method will return ``["*"]``, if ``False``, the method will raise an \
exception.
:type allow_wildcard: bool
:return: A list of the unique allowed fields.
"""
results = self.policy.query_rule(
"allow_field", actor, action, resource, Variable("field")
)
fields = set()
for result in results:
field = result.get("bindings").get("field")
if isinstance(field, Variable):
if not allow_wildcard:
raise exceptions.OsoError(
"""The result of authorized_fields() contained an
"unconstrained" field that could represent any
field, but allow_wildcard was set to False. To fix,
set allow_wildcard to True and compare with the "*"
string."""
)
else:
return ["*"]
fields.add(field)
return list(fields)
|
davidpelaez/oso | languages/python/oso/tests/test_enforcer.py | """Tests the Enforcement API"""
from oso.exceptions import AuthorizationError, ForbiddenError, NotFoundError
from oso import Enforcer
from pathlib import Path
import pytest
from oso import Policy
from polar import exceptions
from .test_oso import Actor, Widget, Company
@pytest.fixture
def test_enforcer():
policy = Policy()
policy.register_class(Actor, name="test_oso::Actor")
policy.register_class(Widget, name="test_oso::Widget")
policy.register_class(Company, name="test_oso::Company")
policy.load_file(Path(__file__).parent / "test_oso.polar")
return Enforcer(policy)
def test_authorize(test_enforcer):
actor = Actor(name="guest")
resource = Widget(id="1")
action = "read"
test_enforcer.authorize(actor, action, resource)
test_enforcer.authorize({"username": "guest"}, action, resource)
test_enforcer.authorize("guest", action, resource)
actor = Actor(name="president")
action = "create"
resource = Company(id="1")
test_enforcer.authorize(actor, action, resource)
test_enforcer.authorize({"username": "president"}, action, resource)
def test_fail_authorize(test_enforcer):
actor = Actor(name="guest")
resource = Widget(id="1")
action = "not_allowed"
# ForbiddenError is expected because actor can "read" resource
with pytest.raises(ForbiddenError):
test_enforcer.authorize(actor, action, resource)
with pytest.raises(ForbiddenError):
test_enforcer.authorize({"username": "guest"}, action, resource)
# NotFoundError is expected because actor can NOT "read" resource
resource = Company(id="1")
with pytest.raises(NotFoundError):
test_enforcer.authorize({"username": "guest"}, action, resource)
def test_authorized_actions(test_enforcer):
rule = """allow(_actor: test_oso::Actor{name: "Sally"}, action, _resource: test_oso::Widget{id: "1"}) if
action in ["CREATE", "UPDATE"];"""
test_enforcer.policy.load_str(rule)
user = Actor(name="Sally")
resource = Widget(id="1")
assert set(test_enforcer.authorized_actions(user, resource)) == set(
["read", "CREATE", "UPDATE"]
)
rule = """allow(_actor: test_oso::Actor{name: "John"}, _action, _resource: test_oso::Widget{id: "1"});"""
test_enforcer.policy.load_str(rule)
user = Actor(name="John")
with pytest.raises(exceptions.OsoError):
test_enforcer.authorized_actions(user, resource)
assert set(
test_enforcer.authorized_actions(user, resource, allow_wildcard=True)
) == set(["*"])
def test_authorize_request(test_enforcer):
class Request:
def __init__(self, method, path) -> None:
self.method = method
self.path = path
policy = """
allow_request("graham", request: Request) if
request.path.startswith("/repos");
allow_request(user: test_oso::Actor, request: Request) if
request.path.startswith("/account")
and user.verified;
"""
verified = Actor("verified")
verified.verified = True
test_enforcer.policy.register_class(Request)
test_enforcer.policy.load_str(policy)
test_enforcer.authorize_request("graham", Request("GET", "/repos/1"))
with pytest.raises(ForbiddenError):
test_enforcer.authorize_request("sam", Request("GET", "/repos/1"))
test_enforcer.authorize_request(verified, Request("GET", "/account"))
with pytest.raises(ForbiddenError):
test_enforcer.authorize_request("graham", Request("GET", "/account"))
def test_authorize_field(test_enforcer):
admin = Actor(name="president")
guest = Actor(name="guest")
company = Company(id="1")
resource = Widget(id=company.id)
# Admin can update name
test_enforcer.authorize_field(admin, "update", resource, "name")
# Admin cannot update another field
with pytest.raises(ForbiddenError):
test_enforcer.authorize_field(guest, "update", resource, "foo")
# Guests can read non-private fields
test_enforcer.authorize_field(guest, "read", resource, "name")
with pytest.raises(ForbiddenError):
test_enforcer.authorize_field(guest, "read", resource, "private_field")
def test_authorized_fields(test_enforcer):
admin = Actor(name="president")
guest = Actor(name="guest")
company = Company(id="1")
resource = Widget(id=company.id)
# Admin should be able to update all fields
assert set(test_enforcer.authorized_fields(admin, "update", resource)) == set(
["name", "purpose", "private_field"]
)
# Guests should not be able to update fields
assert set(test_enforcer.authorized_fields(guest, "update", resource)) == set()
# Admins should be able to read all fields
assert set(test_enforcer.authorized_fields(admin, "read", resource)) == set(
["name", "purpose", "private_field"]
)
# Guests should be able to read all public fields
assert set(test_enforcer.authorized_fields(guest, "read", resource)) == set(
["name", "purpose"]
)
def test_custom_errors():
class TestException(Exception):
def __init__(self, is_not_found):
self.is_not_found = is_not_found
policy = Policy()
enforcer = Enforcer(policy, get_error=lambda *args: TestException(*args))
with pytest.raises(TestException) as excinfo:
enforcer.authorize("graham", "frob", "bar")
assert excinfo.value.is_not_found
def test_custom_read_action():
policy = Policy()
enforcer = Enforcer(policy, read_action="fetch")
with pytest.raises(AuthorizationError) as excinfo:
enforcer.authorize("graham", "frob", "bar")
assert excinfo.type == NotFoundError
# Allow user to "fetch" bar
policy.load_str("""allow("graham", "fetch", "bar");""")
with pytest.raises(AuthorizationError) as excinfo:
enforcer.authorize("graham", "frob", "bar")
assert excinfo.type == ForbiddenError
if __name__ == "__main__":
pytest.main([__file__])
|
ryuichi1208/koredare | koredare.py | <reponame>ryuichi1208/koredare<filename>koredare.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Everything is stuck here.
The images are downloaded from wikipedia.
"""
import aiohttp
import datetime
import itertools
import numpy
import os
import pprint
import requests
import shutil
import sys
import urllib.parse
from bs4 import BeautifulSoup
from collections import OrderedDict
from flask import Flask, request, redirect, jsonify, abort
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import MessageEvent, TextMessage, TextSendMessage, ImageSendMessage
app = Flask(__name__)
"""
* Serving Flask app "hello.py" (lazy loading)
* Environment: development
* Debug mode: on
* Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
* Restarting with stat
* Debugger is active!
* Debugger PIN: 150-239-588
"""
LINE_BOT_ACCESS_TOKEN = os.getenv("LINE_BOT_ACCESS_TOKEN", None)
LINE_BOT_CHANNEL_SECRET = os.getenv("LINE_BOT_CHANNEL_SECRET", None)
HEROKU_APP_NAME = os.getenv("HEROKU_APP_NAME", None)
HEROKU_IMAGE_DOWNDLOADS_DIR = os.getenv("HEROKU_IMAGE_DOWNDLOADS_DIR", "static")
# class IllegalParameter(HTTPException):
# code = 400
# description = 'ILLIGAL PARAMETER'
linebot_api = LineBotApi(LINE_BOT_ACCESS_TOKEN)
handler = WebhookHandler(LINE_BOT_CHANNEL_SECRET)
if LINE_BOT_ACCESS_TOKEN is None or LINE_BOT_CHANNEL_SECRET is None:
app.logger.warn("Error : not set token...")
sys.exit(1)
class self_logger(object):
"""
Logging class for normal processing of flasks.
Check heroku log as standard output is picked up.
"""
def __init__(self, log_level, message):
self.log_level = log_level
self.message = message
def log_print(self):
log = f"{[self.log_level]}: {message}"
print(log)
def call_func_time(func):
"""
Collator for measuring function call time.
"""
def _wrapLog(*args, **kwargs):
print(
datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S"),
"call : ",
func.__name__,
)
func(*args, **kwargs)
return _wrapLog
@call_func_time
def exec_http_requests(url: str, headers: dict = {}):
"""
A function for issuing http requests.
Responsibility is sent to the caller since only 404 is handled after execution.
"""
app.logger.info("request url " + url)
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5)",
"Keep-Alive": {"timeout": 15, "max": 100},
}
try:
res = requests.get(url, headers=headers)
if res.status_code == 404:
return 1
parse_html_file(res.text)
except requests.exceptions.MissingSchema:
return 1
@call_func_time
def parse_html_file(res, name="阿部 寛"):
soup = BeautifulSoup(res, "lxml")
image_list_org = soup.find_all("a", attrs={"class": "image"})
image_list = [
image_path for image_path in image_list_org if image_path.get("title") == name
]
print("img", image_list)
if image_list is not None:
image_url = image_list[0].find("img").get("srcset").split(",")[1].split()
url = "https:" + image_url[0]
print(url)
down_load_image(url)
else:
print("No such image file...")
@call_func_time
def down_load_image(url: str):
self_logger("INFO", url)
image_bin = requests.get(url, stream=True)
# response = requests.get(url, allow_redirects=False, timeout=timeout)
# if response.status_code != 200:
# e = Exception("HTTP status: " + response.status_code)
# raise e
if image_bin.status_code == 200:
# Download in raw.
with open("static/test.jpg", "wb") as f:
image_bin.raw.decode_content = True
shutil.copyfileobj(image_bin.raw, f)
else:
"""
Traceback (most recent call last):
File "requests/models.py", line 832, in raise_for_status
raise http_error
requests.exceptions.HTTPError: 404 Client Error
If the request exceeds the configured maximum number of redirects,
a TooManyRedirects exception is raised.
"""
abort(400)
def make_filename(base_dir, number, url):
"""
Create a file name to save the file.
Todo: generate a file name that matches the extension
"""
ext = os.path.splitext(url)[1]
filename = number + ext
fullpath = os.path.join(base_dir, filename)
return fullpath
def decorate_args(func):
"""
Decorator that removes the space between the last name and the name.
"""
def or_dec_sepalate(*args, **kwargs):
try:
names = str(args[0]).replace(" ", "")
names_enc = urllib.parse.quote(names)
func(names_enc)
except Exception:
abort(404)
return or_dec_sepalate
@decorate_args
def url_generator(name: str):
base_url = "https://ja.wikipedia.org/wiki"
url = f"{base_url}/{name}"
print(url)
if exec_http_requests(url) == 1:
abort(404)
@app.route("/_check/status")
@call_func_time
def status_check():
"""
Endpoint for life and death monitoring.
"""
status = {
"date": datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S"),
"status": "ok",
}
app.logger.info(status["status"])
return jsonify(OrderedDict(status))
@app.route("/callback", methods=["POST"])
def callback():
"""
Callback function for sending a message.
"""
signature = request.headers["X-Line-Signature"]
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return "ok"
@call_func_time
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
"""
Linebot's main processing function.
Searching and saving images are processed in different places.
"""
rev_message = url_generator(event.message.text)
app.logger.info("Recv message " + event.message.text)
# linebot_api.reply_message(
# event.reply_token, TextSendMessage(text="aaa")
# )
url_generator("阿部 寛")
image = {
"image_url": f"{HEROKU_APP_NAME}/static/test.jpg",
"preview_image_url": f"{HEROKU_APP_NAME}/static/test.jpg",
}
image_message = ImageSendMessage(
original_content_url=image["image_url"],
preview_image_url=image["preview_image_url"],
)
linebot_api.reply_message(event.reply_token, image_message)
@app.errorhandler(403)
@app.errorhandler(404)
@app.errorhandler(500)
def no_such_human_pages(error):
"""
Error handling process when 404 occurs.
"""
return "No such file or direcotory"
def __init(obj, org, bk):
pass
# with open(urls_txt, "r") as fin:
# for line in fin:
# url = line.strip()
# filename = make_filename(images_dir, idx, url)
# print "%s" % (url)
# try:
# image = download_image(url)
# save_image(filename, image)
# idx += 1
# except KeyboardInterrupt:
# break
# except Exception as err:
# print "%s" % (err)
if __name__ == "__main__":
FLASK_HOST = str(os.getenv("FLASK_HOST", "0.0.0.0"))
FLASK_PORT = int(os.getenv("FLASK_PORT", 5000))
FLASK_DEBUG_MODE = True
# app.logger.disabled = False
app.run(host=FLASK_HOST, port=FLASK_PORT, debug=FLASK_DEBUG_MODE)
|
wdv4758h/rsglob | setup.py | <gh_stars>0
import os
import sys
from setuptools import find_packages, setup, Extension
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
try:
from setuptools_rust import RustExtension
except ImportError:
import subprocess
errno = subprocess.call(
[sys.executable, '-m', 'pip', 'install', 'setuptools-rust'])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import RustExtension
def get_requirements(filename):
# parse_requirements() returns generator of pip.req.InstallRequirement instance
install_requires = parse_requirements(
os.path.join(ROOT_DIR, filename),
session=False,
)
# requirements is a list of requirement
requirements = list(map(lambda x: str(x).split()[0], install_requires))
return requirements
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
version = __import__('rsglob').VERSION
setup_requires = ['setuptools-rust>=0.6.0']
install_requires = get_requirements('requirements.txt')
test_requires = get_requirements('requirements-test.txt')
rust_extensions = [RustExtension('rsglob._rsglob', 'Cargo.toml')]
setup(
name='rsglob',
version=version,
url='https://github.com/wdv4758h/rsglob',
author='<NAME>',
author_email='<EMAIL>',
description=('Python glob in Rust'),
long_description=open("README.rst").read(),
download_url="https://github.com/wdv4758h/rsglob/archive/v{}.zip".format(
version
),
license='BSD',
tests_require=test_requires,
install_requires=install_requires,
packages=find_packages(),
rust_extensions=rust_extensions,
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
wdv4758h/rsglob | rsglob/__init__.py | <reponame>wdv4758h/rsglob
"""Filename globbing utility."""
__all__ = ["glob", "iglob", "escape"]
VERSION = '0.0.1'
try:
import rsglob._rsglob as _glob
def glob(pathname, *, recursive=False):
if isinstance(pathname, str):
return _glob.glob_str(pathname)
elif isinstance(pathname, bytes):
# FIXME: better way ?
return list(map(str.encode, _glob.glob_str(pathname.decode())))
def iglob(pathname, *, recursive=False):
# FIXME: better way ?
return iter(glob(pathname, recursive=recursive))
def escape(pathname):
pass
except:
print("no _rsglob exist")
|
wdv4758h/rsglob | tests/test_simple.py | <reponame>wdv4758h/rsglob<gh_stars>0
import glob as glob1
import rsglob as glob2
def test_nondot_files():
assert set(glob1.glob("*")) == set(glob2.glob("*"))
def test_dot_files():
assert set(glob1.glob(".*")) == set(glob2.glob(".*"))
|
RyanNavillus/tcav | tcav/utils.py | """
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from scipy.stats import ttest_ind
import numpy as np
import tensorflow as tf
from tcav.tcav_results.results_pb2 import Result, Results
_KEYS = [
"cav_key", "cav_concept", "negative_concept", "target_class", "i_up",
"val_directional_dirs_abs_mean", "val_directional_dirs_mean",
"val_directional_dirs_std", "note", "alpha", "bottleneck"
]
def create_session(timeout=10000, interactive=True):
"""Create a tf session for the model.
# This function is slight motification of code written by <NAME>
Args:
timeout: tfutil param.
Returns:
TF session.
"""
graph = tf.Graph()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.operation_timeout_in_ms = int(timeout*1000)
if interactive:
return tf.compat.v1.InteractiveSession(graph=graph, config=config)
else:
return tf.compat.v1.Session(graph=graph, config=config)
def flatten(nested_list):
"""Flatten a nested list."""
return [item for a_list in nested_list for item in a_list]
def process_what_to_run_expand(pairs_to_test,
random_counterpart=None,
num_random_exp=100,
random_concepts=None):
"""Get concept vs. random or random vs. random pairs to run.
Given set of target, list of concept pairs, expand them to include
random pairs. For instance [(t1, [c1, c2])...] becomes
[(t1, [c1, random1],
(t1, [c1, random2],...
(t1, [c2, random1],
(t1, [c2, random2],...]
Args:
pairs_to_test: [(target1, concept1), (target1, concept2), ...,
(target2, concept1), (target2, concept2), ...]
random_counterpart: random concept that will be compared to the concept.
num_random_exp: number of random experiments to run against each concept.
random_concepts: A list of names of random concepts for the random
experiments to draw from. Optional, if not provided, the
names will be random500_{i} for i in num_random_exp.
Returns:
all_concepts: unique set of targets/concepts
new_pairs_to_test: expanded
"""
def get_random_concept(i):
return (random_concepts[i] if random_concepts
else 'random500_{}'.format(i))
new_pairs_to_test = []
for (target, concept_set) in pairs_to_test:
new_pairs_to_test_t = []
# if only one element was given, this is to test with random.
if len(concept_set) == 1:
i = 0
while len(new_pairs_to_test_t) < min(100, num_random_exp):
# make sure that we are not comparing the same thing to each other.
if concept_set[0] != get_random_concept(
i) and random_counterpart != get_random_concept(i):
new_pairs_to_test_t.append(
(target, [concept_set[0], get_random_concept(i)]))
i += 1
elif len(concept_set) > 1:
new_pairs_to_test_t.append((target, concept_set))
else:
tf.compat.v1.logging.info('PAIR NOT PROCCESSED')
new_pairs_to_test.extend(new_pairs_to_test_t)
all_concepts = list(set(flatten([cs + [tc] for tc, cs in new_pairs_to_test])))
return all_concepts, new_pairs_to_test
def process_what_to_run_concepts(pairs_to_test):
"""Process concepts and pairs to test.
Args:
pairs_to_test: a list of concepts to be tested and a target (e.g,
[ ("target1", ["concept1", "concept2", "concept3"]),...])
Returns:
return pairs to test:
target1, concept1
target1, concept2
...
target2, concept1
target2, concept2
...
"""
pairs_for_sstesting = []
# prepare pairs for concpet vs random.
for pair in pairs_to_test:
for concept in pair[1]:
pairs_for_sstesting.append([pair[0], [concept]])
return pairs_for_sstesting
def process_what_to_run_randoms(pairs_to_test, random_counterpart):
"""Process concepts and pairs to test.
Args:
pairs_to_test: a list of concepts to be tested and a target (e.g,
[ ("target1", ["concept1", "concept2", "concept3"]),...])
random_counterpart: a random concept that will be compared to the concept.
Returns:
return pairs to test:
target1, random_counterpart,
target2, random_counterpart,
...
"""
# prepare pairs for random vs random.
pairs_for_sstesting_random = []
targets = list(set([pair[0] for pair in pairs_to_test]))
for target in targets:
pairs_for_sstesting_random.append([target, [random_counterpart]])
return pairs_for_sstesting_random
# helper functions to write summary files
def print_results(results, random_counterpart=None, random_concepts=None, num_random_exp=100,
min_p_val=0.05):
"""Helper function to organize results.
If you ran TCAV with a random_counterpart, supply it here, otherwise supply random_concepts.
If you get unexpected output, make sure you are using the correct keywords.
Args:
results: dictionary of results from TCAV runs.
random_counterpart: name of the random_counterpart used, if it was used.
random_concepts: list of random experiments that were run.
num_random_exp: number of random experiments that were run.
min_p_val: minimum p value for statistical significance
"""
# helper function, returns if this is a random concept
def is_random_concept(concept):
if random_counterpart:
return random_counterpart == concept
elif random_concepts:
return concept in random_concepts
else:
return 'random500_' in concept
# print class, it will be the same for all
print("Class =", results[0]['target_class'])
# prepare data
# dict with keys of concepts containing dict with bottlenecks
result_summary = {}
# random
random_i_ups = {}
for result in results:
if result['cav_concept'] not in result_summary:
result_summary[result['cav_concept']] = {}
if result['bottleneck'] not in result_summary[result['cav_concept']]:
result_summary[result['cav_concept']][result['bottleneck']] = []
result_summary[result['cav_concept']][result['bottleneck']].append(result)
# store random
if is_random_concept(result['cav_concept']):
if result['bottleneck'] not in random_i_ups:
random_i_ups[result['bottleneck']] = []
random_i_ups[result['bottleneck']].append(result['i_up'])
# print concepts and classes with indentation
for concept in result_summary:
# if not random
if not is_random_concept(concept):
print(" ", "Concept =", concept)
for bottleneck in result_summary[concept]:
i_ups = [item['i_up'] for item in result_summary[concept][bottleneck]]
# Calculate statistical significance
_, p_val = ttest_ind(random_i_ups[bottleneck], i_ups)
print(3 * " ", "Bottleneck =", ("%s. TCAV Score = %.2f (+- %.2f), "
"random was %.2f (+- %.2f). p-val = %.3f (%s)") % (
bottleneck, np.mean(i_ups), np.std(i_ups),
np.mean(random_i_ups[bottleneck]),
np.std(random_i_ups[bottleneck]), p_val,
"undefined" if np.isnan(p_val) else "not significant" if p_val > min_p_val else "significant"))
def make_dir_if_not_exists(directory):
if not tf.io.gfile.exists(directory):
tf.io.gfile.makedirs(directory)
def result_to_proto(result):
"""Given a result dict, convert it to a tcav.Result proto.
Args:
result: a dictionary returned by tcav._run_single_set()
Returns:
TCAV.Result proto
"""
result_proto = Result()
for key in _KEYS:
setattr(result_proto, key, result[key])
positive_set_name = result["cav_concept"]
negative_set_name = result["negative_concept"]
for val in result["val_directional_dirs"]:
result_proto.val_directional_dirs.append(val)
result_proto.cav_accuracies.positive_set_accuracy = result["cav_accuracies"][
positive_set_name]
result_proto.cav_accuracies.negative_set_accuracy = result["cav_accuracies"][
negative_set_name]
result_proto.cav_accuracies.overall_accuracy = result["cav_accuracies"][
"overall"]
return result_proto
def results_to_proto(results):
"""Given a list of result dicts, convert it to a tcav.Results proto.
Args:
results: a list of dictionaries returned by tcav.run()
Returns:
TCAV.Results proto
"""
results_proto = Results()
for result in results:
results_proto.results.append(result_to_proto(result))
return results_proto
|
RyanNavillus/tcav | tcav/model_test.py | """
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import googletest
from tcav.model import ModelWrapper
tf.compat.v1.disable_eager_execution()
class ModelTest_model(ModelWrapper):
"""A mock model of model class for ModelTest class."""
def __init__(self, model_path=None, node_dict=None):
super(ModelTest_model, self).__init__(
model_path=model_path, node_dict=node_dict)
class ModelTest(googletest.TestCase):
def setUp(self):
# Create an execution graph
x = tf.compat.v1.placeholder(dtype=tf.float64, shape=[], name='input')
a = tf.Variable(111, name='var1', dtype=tf.float64)
y = tf.math.multiply(x, a, name='output')
self.ckpt_dir = '/tmp/ckpts/'
self.saved_model_dir = '/tmp/saved_model/'
self.frozen_graph_dir = '/tmp/frozen_graph/'
self.tmp_dirs = [self.ckpt_dir, self.saved_model_dir, self.frozen_graph_dir]
for d in self.tmp_dirs:
if tf.io.gfile.exists(d):
tf.io.gfile.rmtree(d)
tf.io.gfile.makedirs(d)
with tf.compat.v1.Session() as sess:
tf.compat.v1.initialize_all_variables().run()
# Save as checkpoint
saver = tf.compat.v1.train.Saver()
saver.save(sess, self.ckpt_dir + 'model.ckpt', write_meta_graph=True)
# Save as SavedModel
tf.compat.v1.saved_model.simple_save(
sess,
self.saved_model_dir,
inputs={'input': x},
outputs={'output': y})
graph = sess.graph
input_graph_def = graph.as_graph_def()
output_node_names = ['output']
output_graph_def = graph_util.convert_variables_to_constants(
sess, input_graph_def, output_node_names)
# Save as binary graph
tf.io.write_graph(
output_graph_def, self.frozen_graph_dir, 'graph.pb', as_text=False)
# Save as text graph
tf.io.write_graph(
output_graph_def, self.frozen_graph_dir, 'graph.pbtxt', as_text=True)
def tearDown(self):
for d in self.tmp_dirs:
tf.io.gfile.rmtree(d)
def _check_output_and_gradient(self, model_path, import_prefix=False):
model = ModelTest_model(model_path=model_path, node_dict={'v1': 'var1'})
input_name = 'input:0'
output_name = 'output:0'
if import_prefix:
input_name = 'import/' + input_name
output_name = 'import/' + output_name
out = model.sess.run(output_name, feed_dict={input_name: 3})
self.assertEqual(out, 333.0)
model.loss = model.sess.graph.get_tensor_by_name(output_name)
# Make sure that loaded graph can be modified
model._make_gradient_tensors()
grad = model.sess.run(
model.bottlenecks_gradients['v1'], feed_dict={input_name: 555})
self.assertEqual(grad, 555.0)
def test_try_loading_model_from_ckpt(self):
self._check_output_and_gradient(self.ckpt_dir)
def test_try_loading_model_from_saved_model(self):
self._check_output_and_gradient(self.saved_model_dir)
def test_try_loading_model_from_frozen_pb(self):
model_path = self.frozen_graph_dir + 'graph.pb'
self._check_output_and_gradient(model_path, import_prefix=True)
def test_try_loading_model_from_frozen_txt(self):
model_path = self.frozen_graph_dir + 'graph.pbtxt'
self._check_output_and_gradient(model_path, import_prefix=True)
if __name__ == '__main__':
googletest.main()
|
ebenpack/dotfiles | qtile/utilities.py | # symlinked at ~/.config/qtile/utilities.py
from libqtile.config import Screen
from libqtile import bar, widget
import subprocess, re
class Xrandr(object):
"""
Object representing connected screens, as reported by xrandr.
This is very simple at the moment, and does not, in its current state, really deserve
its own module, but the intention is to expand the functionality to provide more
complicated interaction with connected screens.
"""
def __init__(self):
xrandr_process = subprocess.Popen("xrandr", stdout=subprocess.PIPE)
out, err = xrandr_process.communicate()
self.xrandr_output = out.decode(encoding='UTF-8')
@property
def screens(self):
screens = []
lines = self.xrandr_output.split('\n')[1:]
temp = []
for line in lines:
if not line.startswith(" "):
screens.append(temp)
temp = []
temp.append(line)
screens.append(temp)
return screens
@property
def connected_screens(self):
return re.findall("(.*)\sconnected", self.xrandr_output)
def initialize_screens():
X = Xrandr()
S1 = "LVDS1"
S2 = "VGA1"
primary_screen = Screen(
top = bar.Bar(
[
widget.GroupBox(
urgent_alert_method='text',
fontsize=14,
borderwidth=1),
widget.CurrentLayout(),
widget.WindowName(foreground = "a0a0a0"),
widget.Prompt(foreground = "CF0C0C"),
widget.Notify(),
widget.Systray(),
widget.Wlan(interface="wlp4s0b1"),
widget.Battery(
energy_now_file='energy_now',
energy_full_file='energy_full',
power_now_file='energy_now',
update_delay = 5,
foreground = "7070ff"),
widget.Volume(foreground = "70ff70"),
widget.Clock(foreground = "a0a0a0",
fmt = '%Y-%m-%d %a %I:%M %p'),
], 22,
),
)
secondary_screen = Screen(
top = bar.Bar(
[
widget.GroupBox(
urgent_alert_method='text',
fontsize=14,
borderwidth=1),
widget.CurrentLayout(),
widget.WindowName(foreground = "a0a0a0"),
widget.Prompt(),
widget.Clock(foreground = "a0a0a0",
fmt = '%Y-%m-%d %a %I:%M %p'),
], 22,
),
)
one_screen = [primary_screen]
two_screens = [primary_screen, secondary_screen]
if len(X.connected_screens) == 2:
subprocess.call(["xrandr", "--output", S1, "--auto", "--output",
S2, "--auto", "--left-of", S1])
screens = two_screens
else:
subprocess.call(["xrandr", "--output", S1, "--auto", "--output",
S2, "--off"])
screens = one_screen
return screens |
ebenpack/dotfiles | qtile/config.py | <gh_stars>0
# symlinked at ~/.config/qtile/config.py
from libqtile.config import Key, Group, Click, Drag
from libqtile.command import lazy
from libqtile import layout, hook
import utilities
import subprocess, re
mod = "mod4"
widget_defaults = dict(
font = 'Source Code Pro',
border_focus="#de4377",
border_normal="#ad8e99"
)
keys = [
# Log out
Key([mod, "shift"], "q",
lazy.shutdown()),
Key([mod, "shift"], "r",
lazy.restart()),
Key([mod, "shift"], "c",
lazy.window.kill()),
# Switch between windows in current stack pane
Key([mod], "l",
lazy.layout.down()),
Key([mod], "h",
lazy.layout.up()),
# Move windows up or down in current stack
Key([mod], "k",
lazy.layout.next()),
Key([mod], "j",
lazy.layout.previous()),
# Resize windows in current stack pane
Key([mod, "shift"], "h",
lazy.layout.decrease_ratio()),
Key([mod, "shift"], "l",
lazy.layout.increase_ratio()),
# MonadTall resize
####
# It would be nice if these key bindings could be same as the two above. Is this possible?
####
Key([mod, "shift"], "j",
lazy.layout.grow()),
Key([mod, "shift"], "k",
lazy.layout.shrink()),
# Move windows up or down in current stack
Key([mod, "control"], "k",
lazy.layout.shuffle_down()),
Key([mod, "control"], "j",
lazy.layout.shuffle_up()),
Key([mod, "control"], "h",
lazy.layout.section_down()),
Key([mod, "control"], "l",
lazy.layout.section_up()),
# Switch window focus to other pane(s) of stack
Key([mod, "shift"], "space",
lazy.layout.next()),
# Swap panes of split stack
Key([mod], "s",
lazy.layout.rotate()),
# Toggle between split and unsplit sides of stack.
# Split = all windows displayed
# Unsplit = 1 window displayed, like Max layout, but still with multiple stack panes
Key([mod, "shift"], "Return",
lazy.layout.toggle_split()),
# Key([mod], "h",
# lazy.to_screen(1)),
# Key([mod], "l",
# lazy.to_screen(0)),
# Launch specific applications
Key([mod], "Return",
lazy.spawn("terminator")),
Key([mod], "w",
lazy.spawn("firefox")),
Key([mod], "r",
lazy.spawncmd()),
# Toggle between different layouts as defined below
Key([mod], "space",
lazy.nextlayout()),
Key([mod], "f",
lazy.window.toggle_fullscreen()),
Key([mod, "control"], "space",
lazy.window.toggle_floating()),
Key([mod], "m",
lazy.window.toggle_maximize()),
Key([mod], "n",
lazy.window.toggle_minimize()),
# Move between groups
Key([mod], "Right",
lazy.screen.nextgroup()),
Key([mod], "Left",
lazy.screen.prevgroup()),
]
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
groups = [
Group("1"),
Group("2"),
Group("3"),
Group("4"),
Group("5"),
Group("6"),
Group("7"),
Group("8"),
]
for i in groups:
# mod4 + letter of group = switch to group
keys.append(
Key([mod], i.name, lazy.group[i.name].toscreen()))
# mod4 + shift + letter of group = switch to & move focused window to group
keys.append(
Key([mod, "shift"], i.name, lazy.window.togroup(i.name)))
dbgroups_key_binder = None
dbgroups_app_rules = []
layouts = [
layout.Tile(**widget_defaults),
layout.RatioTile(**widget_defaults),
layout.MonadTall(**widget_defaults),
layout.Max(),
layout.TreeTab(),
layout.Zoomy(),
]
screens = utilities.initialize_screens()
def is_running(process):
s = subprocess.Popen(["ps", "axuw"], stdout=subprocess.PIPE)
for x in s.stdout:
if re.search(process, x):
return True
return False
def execute_once(process, options=""):
if not is_running(process):
process = process.split()
if options:
process.extend(options.split())
return subprocess.Popen(process)
main = None
follow_mouse_focus = True
cursor_warp = False
floating_layout = layout.Floating()
mouse = ()
@hook.subscribe.startup
def startup():
execute_once("conky")
execute_once("dropboxd")
execute_once("redshift", "-l 44.6:-68.37 -t 5700:3600 -g 0.8 -m randr")
subprocess.Popen([
"feh",
"--bg-tile",
"/home/ebenpack/Documents/background.png"
])
|
Ziems/Machine-Learning | CS231n/assignment2/cs231n/classifiers/convnet.py | import numpy as np
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.layer_utils import *
class ConvNet(object):
"""
A L-layer convolutional network with the following architecture:
[conv-relu-pool2x2]xL - [affine - relu]xM - affine - softmax
The network operates on minibatches of data that have shape (N, C, H, W)
consisting of N images, each with height H and width W and with C input
channels.
"""
def __init__(self, input_dim=(3, 32, 32), num_filters=[16, 32], filter_size=3,
hidden_dims=[100, 100], num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32, use_batchnorm=False):
"""
Initialize a new network.
Inputs:
- input_dim: Tuple (C, H, W) giving size of input data
- num_filters: List of size Nbconv+1 with the number of filters
to use in each convolutional layer
- filter_size: Size of filters to use in the convolutional layer
- hidden_dims: Number of units to use in the fully-connected hidden layer
- num_classes: Number of scores to produce from the final affine layer.
- weight_scale: Scalar giving standard deviation for random initialization
of weights.
- reg: Scalar giving L2 regularization strength
- dtype: numpy datatype to use for computation.
"""
self.use_batchnorm = use_batchnorm
self.params = {}
self.reg = reg
self.dtype = dtype
self.bn_params = {}
self.filter_size = filter_size
self.L = len(num_filters) # Number of weights
self.M = len(hidden_dims) # Number of conv/relu/pool blocks
# Size of the input
Cinput, Hinput, Winput = input_dim
stride_conv = 1 # stride
# Initialize the weight for the conv layers
F = [Cinput] + num_filters
for i in xrange(self.L):
idx = i + 1
W = weight_scale * \
np.random.randn(
F[i + 1], F[i], self.filter_size, self.filter_size)
b = np.zeros(F[i + 1])
self.params.update({'W' + str(idx): W,
'b' + str(idx): b})
if self.use_batchnorm:
bn_param = {'mode': 'train',
'running_mean': np.zeros(F[i + 1]),
'running_var': np.zeros(F[i + 1])}
gamma = np.ones(F[i + 1])
beta = np.zeros(F[i + 1])
self.bn_params.update({
'bn_param' + str(idx): bn_param})
self.params.update({
'gamma' + str(idx): gamma,
'beta' + str(idx): beta})
# Initialize the weights for the affine-relu layers
# Size of the last activation
Hconv, Wconv = self.Size_Conv(
stride_conv, self.filter_size, Hinput, Winput, self.L)
dims = [Hconv * Wconv * F[-1]] + hidden_dims
for i in xrange(self.M):
idx = self.L + i + 1
W = weight_scale * \
np.random.randn(dims[i], dims[i + 1])
b = np.zeros(dims[i + 1])
self.params.update({'W' + str(idx): W,
'b' + str(idx): b})
if self.use_batchnorm:
bn_param = {'mode': 'train',
'running_mean': np.zeros(dims[i + 1]),
'running_var': np.zeros(dims[i + 1])}
gamma = np.ones(dims[i + 1])
beta = np.zeros(dims[i + 1])
self.bn_params.update({
'bn_param' + str(idx): bn_param})
self.params.update({
'gamma' + str(idx): gamma,
'beta' + str(idx): beta})
# Scoring layer
W = weight_scale * np.random.randn(dims[-1], num_classes)
b = np.zeros(num_classes)
self.params.update({'W' + str(self.L + self.M + 1): W,
'b' + str(self.L + self.M + 1): b})
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def Size_Conv(self, stride_conv, filter_size, H, W, Nbconv):
P = (filter_size - 1) / 2 # padd
Hc = (H + 2 * P - filter_size) / stride_conv + 1
Wc = (W + 2 * P - filter_size) / stride_conv + 1
width_pool = 2
height_pool = 2
stride_pool = 2
Hp = (Hc - height_pool) / stride_pool + 1
Wp = (Wc - width_pool) / stride_pool + 1
if Nbconv == 1:
return Hp, Wp
else:
H = Hp
W = Wp
return self.Size_Conv(stride_conv, filter_size, H, W, Nbconv - 1)
def loss(self, X, y=None):
"""
Evaluate loss and gradient for the three-layer convolutional network.
Input / output: Same API as TwoLayerNet in fc_net.py.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
N = X.shape[0]
# pass conv_param to the forward pass for the convolutional layer
filter_size = self.filter_size
conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}
# pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
if self.use_batchnorm:
for key, bn_param in self.bn_params.iteritems():
bn_param[mode] = mode
scores = None
#######################################################################
# TODO: Implement the forward pass for the three-layer convolutional net, #
# computing the class scores for X and storing them in the scores #
# variable. #
#######################################################################
blocks = {}
blocks['h0'] = X
# Forward into the conv blocks
for i in xrange(self.L):
idx = i + 1
w = self.params['W' + str(idx)]
b = self.params['b' + str(idx)]
h = blocks['h' + str(idx - 1)]
if self.use_batchnorm:
beta = self.params['beta' + str(idx)]
gamma = self.params['gamma' + str(idx)]
bn_param = self.bn_params['bn_param' + str(idx)]
h, cache_h = conv_norm_relu_pool_forward(
h, w, b, conv_param, pool_param, gamma, beta, bn_param)
else:
h, cache_h = conv_relu_pool_forward(
h, w, b, conv_param, pool_param)
blocks['h' + str(idx)] = h
blocks['cache_h' + str(idx)] = cache_h
# Forward into the linear blocks
for i in xrange(self.M):
idx = self.L + i + 1
h = blocks['h' + str(idx - 1)]
if i == 0:
h = h.reshape(N, np.product(h.shape[1:]))
w = self.params['W' + str(idx)]
b = self.params['b' + str(idx)]
if self.use_batchnorm:
beta = self.params['beta' + str(idx)]
gamma = self.params['gamma' + str(idx)]
bn_param = self.bn_params['bn_param' + str(idx)]
h, cache_h = affine_norm_relu_forward(h, w, b, gamma,
beta, bn_param)
else:
h, cache_h = affine_relu_forward(h, w, b)
blocks['h' + str(idx)] = h
blocks['cache_h' + str(idx)] = cache_h
# Fnally Forward into the score
idx = self.L + self.M + 1
w = self.params['W' + str(idx)]
b = self.params['b' + str(idx)]
h = blocks['h' + str(idx - 1)]
h, cache_h = affine_forward(h, w, b)
blocks['h' + str(idx)] = h
blocks['cache_h' + str(idx)] = cache_h
scores = blocks['h' + str(idx)]
if y is None:
return scores
loss, grads = 0, {}
#######################################################################
# TODO: Implement the backward pass for the three-layer convolutional net, #
# storing the loss and gradients in the loss and grads variables. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
#######################################################################
# Computing of the loss
data_loss, dscores = softmax_loss(scores, y)
reg_loss = 0
for w in [self.params[f] for f in self.params.keys() if f[0] == 'W']:
reg_loss += 0.5 * self.reg * np.sum(w * w)
loss = data_loss + reg_loss
# Backward pass
# print 'Backward pass'
# Backprop into the scoring layer
idx = self.L + self.M + 1
dh = dscores
h_cache = blocks['cache_h' + str(idx)]
dh, dw, db = affine_backward(dh, h_cache)
blocks['dh' + str(idx - 1)] = dh
blocks['dW' + str(idx)] = dw
blocks['db' + str(idx)] = db
# Backprop into the linear blocks
for i in range(self.M)[::-1]:
idx = self.L + i + 1
dh = blocks['dh' + str(idx)]
h_cache = blocks['cache_h' + str(idx)]
if self.use_batchnorm:
dh, dw, db, dgamma, dbeta = affine_norm_relu_backward(
dh, h_cache)
blocks['dbeta' + str(idx)] = dbeta
blocks['dgamma' + str(idx)] = dgamma
else:
dh, dw, db = affine_relu_backward(dh, h_cache)
blocks['dh' + str(idx - 1)] = dh
blocks['dW' + str(idx)] = dw
blocks['db' + str(idx)] = db
# Backprop into the conv blocks
for i in range(self.L)[::-1]:
idx = i + 1
dh = blocks['dh' + str(idx)]
h_cache = blocks['cache_h' + str(idx)]
if i == max(range(self.L)[::-1]):
dh = dh.reshape(*blocks['h' + str(idx)].shape)
if self.use_batchnorm:
dh, dw, db, dgamma, dbeta = conv_norm_relu_pool_backward(
dh, h_cache)
blocks['dbeta' + str(idx)] = dbeta
blocks['dgamma' + str(idx)] = dgamma
else:
dh, dw, db = conv_relu_pool_backward(dh, h_cache)
blocks['dh' + str(idx - 1)] = dh
blocks['dW' + str(idx)] = dw
blocks['db' + str(idx)] = db
# w gradients where we add the regulariation term
list_dw = {key[1:]: val + self.reg * self.params[key[1:]]
for key, val in blocks.iteritems() if key[:2] == 'dW'}
# Paramerters b
list_db = {key[1:]: val for key, val in blocks.iteritems() if key[:2] ==
'db'}
# Parameters gamma
list_dgamma = {key[1:]: val for key, val in blocks.iteritems() if key[
:6] == 'dgamma'}
# Paramters beta
list_dbeta = {key[1:]: val for key, val in blocks.iteritems() if key[
:5] == 'dbeta'}
grads = {}
grads.update(list_dw)
grads.update(list_db)
grads.update(list_dgamma)
grads.update(list_dbeta)
return loss, grads
class ThreeLayerConvNet(object):
"""
A three-layer convolutional network with the following architecture:
conv - relu - 2x2 max pool - affine - relu - affine - softmax
The network operates on minibatches of data that have shape (N, C, H, W)
consisting of N images, each with height H and width W and with C input
channels.
"""
def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,
hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32, use_batchnorm=False):
"""
Initialize a new network.
Inputs:
- input_dim: Tuple (C, H, W) giving size of input data
- num_filters: Number of filters to use in the convolutional layer
- filter_size: Size of filters to use in the convolutional layer
- hidden_dim: Number of units to use in the fully-connected hidden layer
- num_classes: Number of scores to produce from the final affine layer.
- weight_scale: Scalar giving standard deviation for random initialization
of weights.
- reg: Scalar giving L2 regularization strength
- dtype: numpy datatype to use for computation.
"""
self.use_batchnorm = use_batchnorm
self.params = {}
self.reg = reg
self.dtype = dtype
self.bn_params = {}
#######################################################################
# TODO: Initialize weights and biases for the three-layer convolutional #
# network. Weights should be initialized from a Gaussian with standard #
# deviation equal to weight_scale; biases should be initialized to zero. #
# All weights and biases should be stored in the dictionary self.params. #
# Store weights and biases for the convolutional layer using the keys 'W1' #
# and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #
# hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #
# of the output affine layer. #
#######################################################################
# Size of the input
C, H, W = input_dim
# Conv layer
# The parameters of the conv is of size (F,C,HH,WW) with
# F give the nb of filters, C,HH,WW characterize the size of
# each filter
# Input size : (N,C,H,W)
# Output size : (N,F,Hc,Wc)
F = num_filters
filter_height = filter_size
filter_width = filter_size
stride_conv = 1 # stride
P = (filter_size - 1) / 2 # padd
Hc = (H + 2 * P - filter_height) / stride_conv + 1
Wc = (W + 2 * P - filter_width) / stride_conv + 1
W1 = weight_scale * np.random.randn(F, C, filter_height, filter_width)
b1 = np.zeros(F)
# Pool layer : 2*2
# The pool layer has no parameters but is important in the
# count of dimension.
# Input : (N,F,Hc,Wc)
# Ouput : (N,F,Hp,Wp)
width_pool = 2
height_pool = 2
stride_pool = 2
Hp = (Hc - height_pool) / stride_pool + 1
Wp = (Wc - width_pool) / stride_pool + 1
# Hidden Affine layer
# Size of the parameter (F*Hp*Wp,H1)
# Input: (N,F*Hp*Wp)
# Output: (N,Hh)
Hh = hidden_dim
W2 = weight_scale * np.random.randn(F * Hp * Wp, Hh)
b2 = np.zeros(Hh)
# Output affine layer
# Size of the parameter (Hh,Hc)
# Input: (N,Hh)
# Output: (N,Hc)
Hc = num_classes
W3 = weight_scale * np.random.randn(Hh, Hc)
b3 = np.zeros(Hc)
self.params.update({'W1': W1,
'W2': W2,
'W3': W3,
'b1': b1,
'b2': b2,
'b3': b3})
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
if self.use_batchnorm:
print 'We use batchnorm here'
bn_param1 = {'mode': 'train',
'running_mean': np.zeros(F),
'running_var': np.zeros(F)}
gamma1 = np.ones(F)
beta1 = np.zeros(F)
bn_param2 = {'mode': 'train',
'running_mean': np.zeros(Hh),
'running_var': np.zeros(Hh)}
gamma2 = np.ones(Hh)
beta2 = np.zeros(Hh)
self.bn_params.update({'bn_param1': bn_param1,
'bn_param2': bn_param2})
self.params.update({'beta1': beta1,
'beta2': beta2,
'gamma1': gamma1,
'gamma2': gamma2})
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Evaluate loss and gradient for the three-layer convolutional network.
Input / output: Same API as TwoLayerNet in fc_net.py.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
if self.use_batchnorm:
for key, bn_param in self.bn_params.iteritems():
bn_param[mode] = mode
N = X.shape[0]
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
W3, b3 = self.params['W3'], self.params['b3']
if self.use_batchnorm:
bn_param1, gamma1, beta1 = self.bn_params[
'bn_param1'], self.params['gamma1'], self.params['beta1']
bn_param2, gamma2, beta2 = self.bn_params[
'bn_param2'], self.params['gamma2'], self.params['beta2']
# pass conv_param to the forward pass for the convolutional layer
filter_size = W1.shape[2]
conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}
# pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
scores = None
#######################################################################
# TODO: Implement the forward pass for the three-layer convolutional net, #
# computing the class scores for X and storing them in the scores #
# variable. #
#######################################################################
# Forward into the conv layer
x = X
w = W1
b = b1
if self.use_batchnorm:
beta = beta1
gamma = gamma1
bn_param = bn_param1
conv_layer, cache_conv_layer = conv_norm_relu_pool_forward(
x, w, b, conv_param, pool_param, gamma, beta, bn_param)
else:
conv_layer, cache_conv_layer = conv_relu_pool_forward(
x, w, b, conv_param, pool_param)
N, F, Hp, Wp = conv_layer.shape # output shape
# Forward into the hidden layer
x = conv_layer.reshape((N, F * Hp * Wp))
w = W2
b = b2
if self.use_batchnorm:
gamma = gamma2
beta = beta2
bn_param = bn_param2
hidden_layer, cache_hidden_layer = affine_norm_relu_forward(
x, w, b, gamma, beta, bn_param)
else:
hidden_layer, cache_hidden_layer = affine_relu_forward(x, w, b)
N, Hh = hidden_layer.shape
# Forward into the linear output layer
x = hidden_layer
w = W3
b = b3
scores, cache_scores = affine_forward(x, w, b)
if y is None:
return scores
loss, grads = 0, {}
#######################################################################
# TODO: Implement the backward pass for the three-layer convolutional net, #
# storing the loss and gradients in the loss and grads variables. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
#######################################################################
data_loss, dscores = softmax_loss(scores, y)
reg_loss = 0.5 * self.reg * np.sum(W1**2)
reg_loss += 0.5 * self.reg * np.sum(W2**2)
reg_loss += 0.5 * self.reg * np.sum(W3**2)
loss = data_loss + reg_loss
# Backpropagation
grads = {}
# Backprop into output layer
dx3, dW3, db3 = affine_backward(dscores, cache_scores)
dW3 += self.reg * W3
# Backprop into first layer
if self.use_batchnorm:
dx2, dW2, db2, dgamma2, dbeta2 = affine_norm_relu_backward(
dx3, cache_hidden_layer)
else:
dx2, dW2, db2 = affine_relu_backward(dx3, cache_hidden_layer)
dW2 += self.reg * W2
# Backprop into the conv layer
dx2 = dx2.reshape(N, F, Hp, Wp)
if self.use_batchnorm:
dx, dW1, db1, dgamma1, dbeta1 = conv_norm_relu_pool_backward(
dx2, cache_conv_layer)
else:
dx, dW1, db1 = conv_relu_pool_backward(dx2, cache_conv_layer)
dW1 += self.reg * W1
grads.update({'W1': dW1,
'b1': db1,
'W2': dW2,
'b2': db2,
'W3': dW3,
'b3': db3})
if self.use_batchnorm:
grads.update({'beta1': dbeta1,
'beta2': dbeta2,
'gamma1': dgamma1,
'gamma2': dgamma2})
#######################################################################
# END OF YOUR CODE #
#######################################################################
return loss, grads
pass |
Ziems/Machine-Learning | CS231n/assignment2/cs231n/layer_utils.py | from cs231n.layers import *
from cs231n.fast_layers import *
def affine_relu_forward(x, w, b):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
out, relu_cache = relu_forward(a)
cache = (fc_cache, relu_cache)
return out, cache
def affine_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db
def affine_norm_relu_forward(x, w, b, gamma, beta, bn_param):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
- gamma, beta : Weight for the batch norm regularization
- bn_params : Contain variable use to batch norml, running_mean and var
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
h, h_cache = affine_forward(x, w, b)
hnorm, hnorm_cache = batchnorm_forward(h, gamma, beta, bn_param)
hnormrelu, relu_cache = relu_forward(hnorm)
cache = (h_cache, hnorm_cache, relu_cache)
return hnormrelu, cache
def affine_norm_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
h_cache, hnorm_cache, relu_cache = cache
dhnormrelu = relu_backward(dout, relu_cache)
dhnorm, dgamma, dbeta = batchnorm_backward_alt(dhnormrelu, hnorm_cache)
dx, dw, db = affine_backward(dhnorm, h_cache)
return dx, dw, db, dgamma, dbeta
def conv_relu_forward(x, w, b, conv_param):
"""
A convenience layer that performs a convolution followed by a ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
out, relu_cache = relu_forward(a)
cache = (conv_cache, relu_cache)
return out, cache
def conv_relu_backward(dout, cache):
"""
Backward pass for the conv-relu convenience layer.
"""
conv_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
s, relu_cache = relu_forward(a)
out, pool_cache = max_pool_forward_fast(s, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
def conv_relu_pool_backward(dout, cache):
"""
Backward pass for the conv-relu-pool convenience layer
"""
conv_cache, relu_cache, pool_cache = cache
ds = max_pool_backward_fast(dout, pool_cache)
da = relu_backward(ds, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
def conv_norm_relu_forward(x, w, b, conv_param, gamma, beta, bn_param):
"""Convenience layer that performs a convolution, spatial
batchnorm, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
conv, conv_cache = conv_forward_fast(x, w, b, conv_param)
norm, norm_cache = spatial_batchnorm_forward(conv, gamma, beta, bn_param)
out, relu_cache = relu_forward(norm)
cache = (conv_cache, norm_cache, relu_cache)
return out, cache
def conv_norm_relu_backward(dout, cache):
"""
Backward pass for the conv-relu-pool convenience layer
"""
conv_cache, norm_cache, relu_cache = cache
drelu = relu_backward(dout, relu_cache)
dnorm, dgamma, dbeta = spatial_batchnorm_backward(drelu, norm_cache)
dx, dw, db = conv_backward_fast(dnorm, conv_cache)
return dx, dw, db, dgamma, dbeta
def conv_norm_relu_pool_forward(x, w, b, conv_param, pool_param, gamma, beta, bn_param):
"""Convenience layer that performs a convolution, spatial
batchnorm, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
conv, conv_cache = conv_forward_fast(x, w, b, conv_param)
norm, norm_cache = spatial_batchnorm_forward(conv, gamma, beta, bn_param)
relu, relu_cache = relu_forward(norm)
out, pool_cache = max_pool_forward_fast(relu, pool_param)
cache = (conv_cache, norm_cache, relu_cache, pool_cache)
return out, cache
def conv_norm_relu_pool_backward(dout, cache):
"""
Backward pass for the conv-relu-pool convenience layer
"""
conv_cache, norm_cache, relu_cache, pool_cache = cache
dpool = max_pool_backward_fast(dout, pool_cache)
drelu = relu_backward(dpool, relu_cache)
dnorm, dgamma, dbeta = spatial_batchnorm_backward(drelu, norm_cache)
dx, dw, db = conv_backward_fast(dnorm, conv_cache)
return dx, dw, db, dgamma, dbeta |
kikei/scraping-20160707 | collect.py | from selenium import webdriver
import urllib
import os
import re
CHROME_DRIVER_PATH = os.path.join(os.path.dirname(__file__), 'chromedriver')
TOP_PAGE = 'http://example.com'
SAVE_DIR = 'download'
browser = webdriver.Chrome(CHROME_DRIVER_PATH)
browser.get(TOP_PAGE)
imgs = browser.find_elements_by_tag_name('img')
srcs = map(to_src, imgs)
srcs = filter(is_jpg, srcs)
for src in srcs: download_to(src, SAVE_DIR)
links = browser.find_elements_by_tag_name('a')
refs = list_href(links)
pickups = list(filter(lambda r: r.find('pickup') > -1, refs))
sumples = list(filter(lambda r: r.find('sumple') > -1, refs))
for pickup in pickups:
download_pickup(browser, pickup)
for sumple in sumples: download_sumple(browser, sumple)
def download_sumple(browser, sumple):
browser.get(sumple)
imgs = browser.find_elements_by_tag_name('img')
srcs = map(to_src, imgs)
srcs = filter(is_jpg, srcs)
for src in srcs: download_to(src, SAVE_DIR)
links = browser.find_elements_by_tag_name('a')
refs = list_href(links)
sumpleis = list(filter(is_sumple_link, refs))
for sumplei in sumpleis: download_pickup(browser, sumplei)
def is_sumple_link(r):
return re.search('sumple\d_\d.htm', r) is not None
def download_pickup(browser, pickup):
browser.get(pickup)
imgs = browser.find_elements_by_tag_name('img')
srcs = map(to_src, imgs)
srcs = filter(is_jpg, srcs)
for src in srcs: download_to(src, SAVE_DIR)
def list_href(es):
refs = map(to_href, es)
return list(filter(lambda r: r is not None, refs))
def to_href(e):
return e.get_attribute('href')
def to_src(e):
return e.get_attribute('src')
def is_jpg(p):
return p.find('.jpg') > -1 or p.find('.jpeg') > -1
def is_not_jpg(p):
return not is_jpg(p)
def mkdir_p(d):
if not os.path.isdir(d):
os.mkdir(d)
if not os.path.isdir(d):
raise Error("failed to make directory")
def download_to(url, d):
mkdir_p(d)
to = d + "/" + os.path.basename(url)
print("Downloading " + url + "...")
urllib.request.urlretrieve(url, to)
|
GregorTpk/blmol | blmol.py | """Scripts for loading molecular geometries into Blender.
Main use is to define the "molecule" object, which can be used to
draw models of molecular coordinates.
Written by <NAME>, www.hartleygroup.org and
blog.hartleygroup.org.
Hosted here:
https://github.com/scotthartley/blmol
Some aspects (especially rotating the bond cylinders) based on this
wonderful blog post from <NAME>:
http://patrick-fuller.com/molecules-from-smiles-molfiles-in-blender/
His project is hosted here:
https://github.com/patrickfuller/blender-chemicals
"""
import numpy as np
import time
# Python outside of Blender doesn't play all that well with bpy, so need
# to handle ImportError.
try:
import bpy
import bmesh
bpy_avail = True
except ImportError:
bpy_avail = False
# Dictionary of color definitions (RGB + alpha tuples). Blender RGB
# colors can be conveniently determined by using a uniform png of the
# desired color as a background image, then using the eyedropper. Colors
# with the "_cb" tag are based on colorblind-safe colors as described in
# this Nature Methods editorial DOI:10.1038/nmeth.1618.
COLORS = {
'black': (0, 0, 0, 1.0),
'dark_red': (0.55, 0, 0, 1.0),
'gray': (0.2, 0.2, 0.2, 1.0),
'dark_gray': (0.1, 0.1, 0.1, 1.0),
'green': (0.133, 0.545, 0.133, 1.0),
'dark_green': (0.1, 0.5, 0.1, 1.0),
'indigo': (0.294, 0, 0.509, 1.0),
'light_gray': (0.7, 0.7, 0.7, 1.0),
'orange': (1.0, 0.647, 0, 1.0),
'purple': (0.627, 0.125, 0.941, 1.0),
'red': (0.8, 0, 0, 1.0),
'royal_blue': (0.255, 0.412, 0.882, 1.0),
'white': (1.0, 1.0, 1.0, 1.0),
'yellow': (1.0, 1.0, 0, 1.0),
'violet': (0.561, 0, 1.0, 1.0),
'blue_cb': (0, 0.168, 0.445, 1.0),
'bluish_green_cb': (0, 0.620, 0.451, 1.0),
'orange_cb': (0.791, 0.347, 0, 1.0),
'reddish_purple_cb': (0.800, 0.475, 0.655, 1.0),
'sky_blue_cb': (0.337, 0.706, 0.914, 1.0),
'vermillion_cb': (0.665, 0.112, 0, 1.0),
'yellow_cb': (0.871, 0.776, 0.054, 1.0),
}
ATOMIC_NUMBERS = {
'H': 1,
'LI': 3,
'B': 5,
'C': 6,
'N': 7,
'O': 8,
'F': 9,
'NA': 11,
'MG': 12,
'P': 15,
'S': 16,
'CL': 17,
'K': 19,
'ZN': 30,
'BR': 35,
'RB': 37,
'I': 53,
'CS': 55
}
# Dictionary of Van der Waals radii, by atomic number, from Wolfram
# Alpha.
RADII = {
1: 1.20,
3: 1.82,
5: 1.92, # From wikipedia
6: 1.70,
7: 1.55,
8: 1.52,
9: 1.47,
11: 2.27,
12: 1.73, # From wikipedia
15: 1.80,
16: 1.80,
17: 1.75,
19: 2.75,
30: 1.39, # From wikipedia
35: 1.85,
37: 3.03,
53: 1.98,
55: 3.43
}
# Dictionaries of colors for drawing elements, by atomic number. Used by
# several functions when the `color = 'by_element'` option is passed.
ELEMENT_COLORS = {
1: 'white',
3: 'violet',
5: 'orange',
6: 'gray',
7: 'royal_blue',
8: 'red',
9: 'green',
11: 'violet',
12: 'dark_green',
15: 'orange',
16: 'yellow',
17: 'green',
19: 'violet',
30: 'dark_gray',
35: 'dark_red',
37: 'violet',
53: 'indigo',
55: 'violet'
}
# Conversion factors for 1 BU. Default is typically 1 nm. Assumes
# geometries are input with coords in angstroms.
UNIT_CONV = {
'nm': 0.1,
'A': 1.0
}
def _create_new_material(name, color):
"""Create a new material.
Args:
name (str): Name for the new material (e.g., 'red')
color (tuple): RGB color for the new material (diffuse_color)
(e.g., (1, 0, 0, 1))
Returns:
The new material.
"""
mat = bpy.data.materials.new(name)
mat.diffuse_color = color
mat.roughness = 0.5
mat.specular_color = (1, 1, 1)
mat.specular_intensity = 0.2
return mat
class Atom:
"""A single atom.
Attributes:
at_num (int): The atomic number of the atom.
location (numpy array): The xyz location of the atom, in
Angstroms.
id_num (int): A unique identifier number.
"""
def __init__(self, atomic_number, location, id_num):
self.at_num = atomic_number
self.location = location # np.array
self.id_num = id_num
def draw(self, color='by_element', radius=None, units='nm',
scale=1.0, subsurf_level=2, segments=16):
"""Draw the atom in Blender.
Args:
color (string, ='by_element'): If None, coloring is done by
element. Otherwise specifies the color.
radius (string, =None): If None, draws at the van der Waals
radius. Otherwise specifies the radius in angstroms.
units (sting, ='nm'): 1 BU = 1 nm by default. Can also be
set to angstroms.
scale (float, =1.0): Scaling factor for the atom. Useful
when generating ball-and-stick models.
subsurf_level (int, =2): Subsurface subdivisions that will
be applied.
segments (int, =16): Number of segments in each UV sphere
primitive
Returns:
The blender object.
"""
# The corrected location (i.e., scaled to units.)
loc_corr = tuple(c*UNIT_CONV[units] for c in self.location)
# Work out the sphere radius in BU.
if not radius:
rad_adj = RADII[self.at_num]*UNIT_CONV[units]*scale
else:
rad_adj = radius*UNIT_CONV[units]*scale
# Create sphere as bmesh.
bm = bmesh.new()
bmesh.ops.create_uvsphere(bm,
u_segments=segments,
v_segments=segments,
radius=rad_adj)
for f in bm.faces:
f.smooth = True
# Convert to mesh.
me = bpy.data.meshes.new("Mesh")
bm.to_mesh(me)
bm.free()
# Assign mesh to object and place in space.
atom_sphere = bpy.data.objects.new("atom({})_{}".format(
self.at_num, self.id_num), me)
bpy.context.collection.objects.link(atom_sphere)
atom_sphere.location = loc_corr
# Assign subsurface modifier, if requested
if subsurf_level != 0:
atom_sphere.modifiers.new('Subsurf', 'SUBSURF')
atom_sphere.modifiers['Subsurf'].levels = subsurf_level
# Color atom and assign material
if color == 'by_element':
atom_color = ELEMENT_COLORS[self.at_num]
else:
atom_color = color
if atom_color not in bpy.data.materials:
_create_new_material(atom_color, COLORS[atom_color])
atom_sphere.data.materials.append(bpy.data.materials[atom_color])
return atom_sphere
class Bond:
"""A bond between two atoms.
Attributes:
atom1 (atom): The first atom in the bond.
atom2 (atom): The second atom in the bond.
"""
def __init__(self, atom1, atom2):
self.atom1 = atom1
self.atom2 = atom2
@staticmethod
def _draw_half(location, length, rot_angle, rot_axis, element,
radius=0.2, color='by_element', units='nm',
vertices=64, edge_split=False):
"""Draw half of a bond (static method).
Draws half of a bond, given the location and length. Bonds are
drawn in halves to facilitate coloring by element.
Args:
location (np.array): The center point of the half bond.
length (float): The length of the half bond.
rot_angle (float): Angle by which bond will be rotated.
rot_axis (np.array): Axis of rotation.
element (int): atomic number of element of the bond (for
coloring).
radius (float, =0.2): radius of the bond.
color (string, ='by_element'): color of the bond. If
'by_element', uses element coloring.
units (string, ='nm'): 1 BU = 1 nm, by default. Can change
to angstroms ('A').
vertices (int, =64): Number of vertices in each bond
cylinder.
edge_split (bool, =False): Whether to apply the edge split
modifier to each bond.
Returns:
The new bond (Blender object).
"""
loc_corr = tuple(c*UNIT_CONV[units] for c in location)
len_corr = length * UNIT_CONV[units]
radius_corr = radius * UNIT_CONV[units]
bpy.ops.mesh.primitive_cylinder_add(vertices=vertices,
radius=radius_corr,
depth=len_corr, location=loc_corr,
end_fill_type='NOTHING')
# Generate an orientation matrix from rot_axis to handle changes in
# Blender's API introduced in v 2.8.
rot_matrix_z = rot_axis/np.linalg.norm(rot_axis)
rot_matrix_y = np.cross(rot_matrix_z, [0, 0, 1])
rot_matrix_x = np.cross(rot_matrix_y, rot_matrix_z)
rot_matrix_y = rot_matrix_y/np.linalg.norm(rot_matrix_y)
rot_matrix_x = rot_matrix_x/np.linalg.norm(rot_matrix_x)
rot_matrix = [rot_matrix_x, rot_matrix_y, rot_matrix_z]
bpy.ops.transform.rotate(value=rot_angle, orient_axis='Z',
orient_matrix=rot_matrix,
constraint_axis=(False, False, True))
bpy.ops.object.shade_smooth()
if edge_split:
bpy.ops.object.modifier_add(type='EDGE_SPLIT')
bpy.ops.object.modifier_apply(modifier='EdgeSplit')
if color == 'by_element':
bond_color = ELEMENT_COLORS[element]
else:
bond_color = color
if bond_color not in bpy.data.materials:
_create_new_material(bond_color, COLORS[bond_color])
bpy.context.object.data.materials.append(
bpy.data.materials[bond_color])
return bpy.context.object
def draw(self, radius=0.2, color='by_element', units='nm',
vertices=64, edge_split=False):
"""Draw the bond as two half bonds (to allow coloring).
Args:
radius (float, =0.2): Radius of cylinder in angstroms.
color (string, ='by_element'): Color of the bond. If
'by_element', each half gets element coloring.
units (string, ='nm'): 1 BU = 1 nm, by default. Can change
to angstroms ('A').
vertices (int, =64): Number of vertices in each bond
cylinder.
edge_split (bool, =False): Whether to apply the edge split
modifier to each bond.
Returns:
The bond (Blender object), with both halves joined.
"""
created_objects = []
center_loc = (self.atom1.location + self.atom2.location)/2
bond_vector = self.atom1.location - self.atom2.location
length = np.linalg.norm(bond_vector)
bond_axis = bond_vector/length
cyl_axis = np.array((0, 0, 1))
rot_axis = np.cross(bond_axis, cyl_axis)
# Fix will not draw bond if perfectly aligned along z axis
# because rot_axis becomes (0, 0, 0).
if ((bond_axis == np.array((0, 0, 1))).all()
or (bond_axis == np.array((0, 0, -1))).all()):
rot_axis = np.array((1, 0, 0))
angle = -np.arccos(np.dot(cyl_axis, bond_axis))
start_center = (self.atom1.location + center_loc)/2
created_objects.append(Bond._draw_half(start_center, length/2, angle,
rot_axis, self.atom1.at_num, radius, color,
units, vertices, edge_split))
end_center = (self.atom2.location + center_loc)/2
created_objects.append(Bond._draw_half(end_center, length/2, angle,
rot_axis, self.atom2.at_num, radius, color,
units, vertices, edge_split))
# Deselect all objects in scene.
for obj in bpy.context.selected_objects:
obj.select_set(state=False)
# Select all newly created objects.
for obj in created_objects:
obj.select_set(state=True)
bpy.ops.object.join()
bpy.context.object.name = "bond_{}({})_{}({})".format(
self.atom1.id_num, self.atom1.at_num, self.atom2.id_num,
self.atom2.at_num)
return bpy.context.object
class Molecule:
"""The molecule object.
Attributes:
atoms (list, = []): List of atoms (atom objects) in molecule.
bonds (list, = []): List of bonds (bond objects) in molecule.
"""
def __init__(self, name='molecule', atoms=None, bonds=None):
self.name = name
if atoms is None:
self.atoms = []
else:
self.atoms = atoms
if bonds is None:
self.bonds = []
else:
self.bonds = bonds
def add_atom(self, atom):
"""Adds an atom to the molecule."""
self.atoms.append(atom)
def add_bond(self, a1id, a2id):
"""Adds a bond to the molecule, using atom ids."""
if not self.search_bondids(a1id, a2id):
self.bonds.append(Bond(self.search_atomid(a1id),
self.search_atomid(a2id)))
def search_atomid(self, id_to_search):
"""Searches through atom list and returns atom object
corresponding to (unique) id."""
for atom in self.atoms:
if atom.id_num == id_to_search:
return atom
return None
def search_bondids(self, id1, id2):
"""Searches through bond list and returns bond object
corresponding to (unique) ids."""
for b in self.bonds:
if ((id1, id2) == (b.atom1.id_num, b.atom2.id_num) or
(id2, id1) == (b.atom1.id_num, b.atom2.id_num)):
return b
return None
def draw_bonds(self, caps=True, radius=0.2, color='by_element',
units='nm', join=True, with_H=True, subsurf_level=1,
vertices=64, edge_split=False):
"""Draws the molecule's bonds.
Args:
caps (bool, =True): If true, each bond capped with sphere of
radius at atom position. Make false if drawing
ball-and-stick model using separate atom drawings.
radius (float, =0.2): Radius of bonds in angstroms.
color (string, ='by_element'): Color of the bonds. If
'by_element', each gets element coloring.
units (string, ='nm'): 1 BU = 1 nm, by default. Can change
to angstroms ('A').
join (bool, =True): If true, all bonds are joined together
into a single Bl object.
with_H (bool, =True): Include H's.
subsurf_level (int, =1): Subsurface subdivisions that will
be applied to the atoms (end caps).
vertices (int, =64): Number of vertices in each bond
cylinder.
edge_split (bool, =False): Whether to apply the edge split
modifier to each bond.
Returns:
The bonds as a single Blender object, if join=True.
Otherwise, None.
"""
created_objects = []
for b in self.bonds:
if with_H or (b.atom1.at_num != 1 and b.atom2.at_num != 1):
created_objects.append(b.draw(radius=radius,
color=color,
units=units,
vertices=vertices,
edge_split=edge_split))
if caps:
for a in self.atoms:
if with_H or a.at_num != 1:
created_objects.append(a.draw(color=color,
radius=radius,
units=units,
subsurf_level=subsurf_level))
if join:
# # Deselect anything currently selected.
# for obj in bpy.context.selected_objects:
# obj.select = False
# # Select drawn bonds.
# for obj in created_objects:
# obj.select = True
# Deselect all objects in scene.
for obj in bpy.context.selected_objects:
obj.select_set(state=False)
# Select drawn bonds.
for obj in created_objects:
obj.select_set(state=True)
bpy.ops.object.join()
bpy.context.object.name = self.name + '_bonds'
return bpy.context.object
else:
return None
def draw_atoms(self, color='by_element', radius=None, units='nm',
scale=1.0, join=True, with_H=True, subsurf_level=2,
segments=16):
"""Draw spheres for all atoms.
Args:
color (str, ='by_element'): If 'by_element', uses colors in
ELEMENT_COLORS. Otherwise, can specify color for whole
model. Must be defined in COLORS.
radius (float, =None): If specified, gives radius of all
atoms.
units (str, ='nm'): Units for 1 BU. Can also be A.
join (bool, =True): If true, all atoms are joined together
into a single Bl object.
with_H (bool, =True): Include the hydrogens.
subsurf_level (int, =2): Subsurface subdivisions that will
be applied to the atoms.
segments (int, =16): Number of segments in each UV sphere
primitive
Returns:
The atoms as a single Blender object, if join=True.
Otherwise, None.
"""
# Store start time to time script.
start_time = time.time()
# Holds links to all created objects, so that they can be
# joined.
created_objects = []
# Initiate progress monitor over mouse cursor.
bpy.context.window_manager.progress_begin(0, len(self.atoms))
n = 0
for a in self.atoms:
if with_H or a.at_num != 1:
created_objects.append(a.draw(color=color, radius=radius,
units=units, scale=scale,
subsurf_level=subsurf_level,
segments=segments))
n += 1
bpy.context.window_manager.progress_update(n)
# End progress monitor.
bpy.context.window_manager.progress_end()
if join:
# Deselect all objects in scene.
for obj in bpy.context.selected_objects:
obj.select_set(state=False)
# Select all newly created objects.
for obj in created_objects:
obj.select_set(state=True)
bpy.context.view_layer.objects.active = created_objects[0]
bpy.ops.object.join()
bpy.context.object.name = self.name + '_atoms'
print("{} seconds".format(time.time()-start_time))
return
def read_pdb(self, filename):
"""Loads a pdb file into a molecule object. Only accepts atoms
with Cartesian coords through the ATOM/HETATM label and bonds
through the CONECT label.
Args:
filename (string): The target file.
"""
with open(filename) as pdbfile:
for line in pdbfile:
if line[0:4] == "ATOM":
idnum = int(line[6:11])
atnum = ATOMIC_NUMBERS[line[76:78].strip().upper()]
coords = np.array((float(line[30:38]), float(line[38:46]),
float(line[46:54])))
self.add_atom(Atom(atnum, coords, idnum))
elif line[0:6] == "HETATM":
idnum = int(line[6:11])
atnum = ATOMIC_NUMBERS[line[76:78].strip().upper()]
coords = np.array((float(line[30:38]), float(line[38:46]),
float(line[46:54])))
self.add_atom(Atom(atnum, coords, idnum))
elif line[0:6] == "CONECT":
# Loads atoms as a list. First atom is bonded to the
# remaining atoms (up to four).
atoms = line[6:].split()
for bonded_atom in atoms[1:]:
# print(atoms[0], bonded_atom)
self.add_bond(int(atoms[0]), int(bonded_atom))
|
rockofsky/vislice | model.py | import random
# Definiramo konstante
ŠTEVILO_DOVOLJENIH_NAPAK = 10
PRAVILNA_CRKA = "+"
PONOVLJENA_CRKA = "o"
NAPACNA_CRKA = "-"
ZAČETEK = "S"
ZMAGA = "W"
PORAZ = "X"
# Definiramo logični model igre
class Igra:
def __init__(self, geslo, crke):
self.geslo = geslo.upper() # String
self.crke = [i.upper() for i in crke] # List
def napacne_crke(self):
return [i for i in self.crke if i not in self.geslo]
def pravilne_crke(self):
return [i for i in self.crke if i in self.geslo]
def stevilo_napak(self):
return len(self.napacne_crke())
def zmaga(self):
for i in self.geslo:
if i not in self.crke:
return False
return True
def poraz(self):
if len(self.napacne_crke()) >= ŠTEVILO_DOVOLJENIH_NAPAK:
return True
return False
def pravilni_del_gesla(self):
pravilni_del = self.geslo
for i in self.geslo:
if i not in self.crke:
pravilni_del.replace(i, "_")
return pravilni_del
def nepravilni_ugibi(self):
nepravilni = ""
for i in self.napacne_crke():
nepravilni += i + " "
return nepravilni.strip()
def ugibaj(self, crka):
velika_crka = crka.upper()
if not self.zmaga() and not self.poraz():
if velika_crka not in self.geslo and velika_crka not in self.crke:
self.crke.append(velika_crka)
return NAPACNA_CRKA
elif velika_crka in self.geslo and velika_crka not in self.crke:
self.crke.append(velika_crka)
return PRAVILNA_CRKA
else:
return PONOVLJENA_CRKA
else:
if self.zmaga() == True:
return ZMAGA
return PORAZ
bazen_besed = []
with open("besede.txt", "r", encoding="utf-8") as besede:
for geslo in besede.readlines():
bazen_besed.append(geslo.upper())
class Vislice():
def __init__(self, igre):
# V slovarju igre ima vsaka igra svoj ID.
# ID je celo število.
self.igre = {}
def prost_id_igre(self):
if self.igre == {}:
return 0
else:
for i in range(len(self.igre) + 1):
if i not in self.igre:
return i
def nova_igra(self): # Naredi novo igro z naključnim geslom
self.igre[self.prost_id_igre()] = (ZAČETEK, Igra(random.choice(bazen_besed), []))
def ugibaj(self, id_igre, crka):
igrica = self.igre[id_igre][1]
self.igre.update({id_igre : (igrica.ugibaj(crka), igrica)})
|
rockofsky/vislice | vislice.py | <reponame>rockofsky/vislice<filename>vislice.py
import bottle
import model
vislice = model.Vislice({})
@bottle.get("/")
def uvod():
return bottle.template("index.tpl")
@bottle.get("/igra/")
def nova_igra()
bottle.run(reloader=True, debug=True)
|
rockofsky/vislice | tekstovni_vmesnik.py | from model import Igra, bazen_besed
import random
def izpis_igre(igra):
if not igra.zmaga() and not igra.poraz():
return "Nadaljujmo z igro."
return None
def izpis_zmage(igra):
if igra.zmaga():
return "Zmagali ste!"
return None
def izpis_poraza(igra):
if igra.poraz():
return "Izgubili ste. Poskusite novo igro."
return None
def zahtevaj_vnos():
return str(input("Napiši (na blef ugibano) črko: "))
def pozeni_vmesnik():
print("Pozdravljen v igri VISLICE! Začnimo z igro.")
igra = Igra(random.choice(bazen_besed), [])
print("Geslo je določeno.")
while True:
print(igra.pravilni_del_gesla())
crka = zahtevaj_vnos()
izid_kroga = igra.ugibaj(crka)
|
deftech/cloudify-healer-plugin | cloudify_healer/healer.py | <reponame>deftech/cloudify-healer-plugin
########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
from cloudify_rest_client import CloudifyClient
import logging
import socket
import requests
import json
import time
import os
import sys
sys.path.append("/opt/mgmtworker/env/lib/python2.7/site-packages/")
DONE_STATES = ["failed", "completed", "cancelled", "terminated"]
logger = None
def main():
global logger
# Collect parms
username, password, tenant, target_ip, deployment_id, instance_id = \
sys.argv[1:7]
nodeconfig = json.loads(sys.argv[7])
script = sys.argv[8] if len(sys.argv) == 9 else None
testtype = nodeconfig['type']
freq = nodeconfig['config']['frequency']
count = nodeconfig['config']['count']
debug = bool(nodeconfig['debug'])
loglevel = logging.DEBUG if debug else logging.INFO
# Open logger
logfile = "/tmp/healer_" + deployment_id + "_" + str(os.getpid()) + ".log"
logging.basicConfig(
filename=logfile, format='%(asctime)s %(levelname)8s %(message)s',
level=loglevel)
logger = logging.getLogger("healer")
logger.info("\n---Starting {}---".format(time.asctime()))
# Wait for install workflow to complete, if it doesn't, exit
# Find install
client = CloudifyClient("1172.16.17.32", username=username,
password=password, tenant=tenant)
status = None
for i in range(120):
installid, status = get_last_install(client, deployment_id)
if not status:
logger.error("Failure: no install found. Exiting")
os._exit(1)
elif status != "started":
logger.debug("breaking on status {}".format(status))
break
logger.info("waiting for install {} to complete".format(installid))
time.sleep(5)
if status == "started":
logger.error("Timed out waiting for install to complete. Exiting.")
os._exit(1)
elif status != "terminated":
logger.error(
"Install execution stopped. Reason={}. Exiting..".format(status))
os._exit(1)
logger.info("install complete. continuing...")
failcnt = 0
while True:
logger.debug("{}: {}".format(nodeconfig['type'], target_ip))
failed = False
if testtype == 'ping':
failed = doPing(target_ip)
elif testtype == 'port':
failed = doSocket(target_ip, nodeconfig)
elif testtype == 'http':
failed = doHttp(target_ip, freq, nodeconfig)
elif testtype == 'custom':
os.execlp("python", "python", script, sys.argv[7])
else:
logger.error("ERROR: unknown test type: {}".format(testtype))
os._exit(1)
if failed:
failed = False
failcnt += 1
logger.error(
"Target test failure. Fail count = {}".format(failcnt))
if failcnt >= count:
# HEAL
failcnt = 0
logger.info("STARTING HEAL of {}".format(instance_id))
execution = None
try:
execution = client.executions.start(
deployment_id, "heal",
{"node_instance_id": instance_id})
except Exception as e:
logger.error("CAUGHT EXCEPTION {}".format(e))
logger.info("STARTED HEAL of {}".format(instance_id))
while True:
status = execution.status
logger.debug(
"polling execution status = {}".format(status))
if status == "failed":
logger.error("execution failed")
os._exit(0)
if status in DONE_STATES:
os._exit(0)
time.sleep(4)
time.sleep(freq)
def doPing(target_ip):
""" Does a ping against the ip address from the target relationship
"""
failed = False
pid = os.fork()
if pid == 0:
os.execlp("ping", "ping", "-q", "-c", "1", "-w", "1", target_ip)
_, returncode = os.waitpid(pid, 0)
if os.WIFEXITED(returncode) and os.WEXITSTATUS(returncode) != 0:
failed = True
return failed
def doHttp(target_ip, freq, nodeconfig):
""" perform an HTTP GET vs URL constructed from the ip, port, path from
properties
"""
failed = False
port = "80"
port = (nodeconfig['config']['port']
if 'port' in nodeconfig['config'] else port)
path = (nodeconfig['config']['path']
if 'path' in nodeconfig['config'] else "/")
prot = "http"
prot = ("https" if 'secure' in nodeconfig['config'] and
nodeconfig['config']['secure'] else prot)
url = prot + "://" + target_ip + ":" + str(port) + path
try:
ret = requests.get(url, timeout=int(freq))
if ret.status_code < 200 or ret.status_code > 299:
logger.error("unexpected response code from {}:{}".format(
url, ret.status_code))
failed = True
except requests.exceptions.ConnectTimeout:
logger.error("timeout GET {}:{}".format(url, freq))
failed = True
except Exception as e:
logger.error("caught exception in GET {}:{}".format(url, e.message))
failed = True
return failed
def doSocket(target_ip, nodeconfig):
""" Open a TCP socket constructed from the ip and port from
properties
"""
failed = False
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((target_ip, nodeconfig['config']['port']))
except Exception as e:
logger.error("exception: {}".format(str(e)))
failed = True
return failed
def get_last_install(client, deployment_id):
""" Gets the execution and status of the last install
for the supplied deployment
"""
executions = client.executions.list(deployment_id=deployment_id)
execution = None
for e in executions:
if e.workflow_id == "install":
execution = e
if not execution:
return None, None # no install found
if execution.status in ["started", "pending"]:
return execution.id, "started"
else:
return execution.id, execution.status
if __name__ == "__main__":
main()
|
deftech/cloudify-healer-plugin | tests/test_stopper.py | # Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
from cloudify.mocks import MockCloudifyContext, MockRelationshipSubjectContext
from cloudify.mocks import MockNodeInstanceContext
from cloudify.state import current_ctx
from time import sleep
import subprocess
def test_stop():
from cloudify_healer.stopper import stop
# start process then stop it
p = subprocess.Popen(["exec sleep 120"], shell=True)
ctx = MockCloudifyContext(source=MockRelationshipSubjectContext(None,
MockNodeInstanceContext(
runtime_properties={"pid": str(p.pid)})))
current_ctx.set(ctx)
stop()
sleep(2)
if p.poll() is None:
p.terminate()
p.wait()
assert False
|
deftech/cloudify-healer-plugin | tests/test_healer.py | ############
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
def test_doping():
from cloudify_healer.healer import doPing
res = doPing("8.8.8.8")
assert not res
def test_basic_dohttp():
from cloudify_healer.healer import doHttp
nodeconfig = {}
nodeconfig['config'] = {}
res = doHttp("http://cloudify.co", 1, nodeconfig)
assert not res
def test_dosocket():
from cloudify_healer.healer import doSocket
nodeconfig = {}
nodeconfig['config'] = {}
nodeconfig['config']['port'] = 22
res = doSocket("127.0.0.1", nodeconfig)
assert not res
|
deftech/cloudify-healer-plugin | cloudify_healer/launcher.py | ########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import json
from cloudify import ctx
def launch(**kwargs):
""" Launches the healer process
"""
depid = ctx.deployment.id
targetip = (ctx.target.instance.runtime_properties['ip']
if 'ip' in ctx.target.instance.runtime_properties
else '127.0.0.1')
targetid = ctx.target.instance.id
user, password, tenant = ctx.source.node.properties['cfy_creds'].split(',')
# Start healer process, put PID in attributes
pid = os.fork()
if pid > 0:
ctx.source.instance.runtime_properties["pid"] = str(pid)
return
nodeprops = json.dumps(ctx.source.node.properties)
# If user has configured custom healer, pass it along
customscript = (ctx.source.node.properties['config']['script']
if ctx.source.node.properties['type'] == 'custom' else "")
path = ctx.download_resource(customscript) if customscript != "" else ""
close_fds(leave_open=[])
try:
curdir = os.path.dirname(__file__)
os.execlp("python", "python", curdir+"/healer.py", user, password,
tenant, targetip, depid, targetid, nodeprops, path)
except Exception:
pass
def close_fds(leave_open=[0, 1, 2]):
fds = os.listdir(b'/proc/self/fd')
for fdn in fds:
fd = int(fdn)
if fd not in leave_open:
try:
os.close(fd)
except Exception:
pass
|
sudormroot/resnet-in-tensorflow-test | gpu_test.py | import tensorflow as tf
tf.test.is_gpu_available()
|
firstnattapon/N2 | app.py | <reponame>firstnattapon/N2
#___________________________________________________________________________________________________
import pandas as pd
import streamlit as st
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import json
import hiplot as hip
from pokereval.card import Card as c
from pokereval.hand_evaluator import HandEvaluator as ev
import SessionState
pd.set_option('precision', 3)
# sns.set_style("whitegrid")
if __name__ == '__main__':
@st.cache(suppress_st_warning=True)
def preflop():
df = pd.read_pickle('./preflop.pickle')
x = ["A", "K", "Q", "J","T", "9", "8" , "7" , "6" , "5" , "4" , "3" , "2"]
return df , x
session = SessionState.get(run_id=0)
# st.write('preflop')
df , x = preflop()
c_1 = st.radio("c_1",(x), key=session.run_id)
c_2 = st.radio("c_2",(x), key=session.run_id)
suit = st.radio("suit",("P" , "O" ,"S") , index= 0 if c_1==c_2 else 1 , key=session.run_id)
action = st.radio("action",("LIMPERS" , "UN_OPENED" ,"ONE_RAISE"), key=session.run_id)
position = st.radio("position",("U_HJ" , "C_B" , "BL" , "VS_3BET" , "VS_STEAL"), key=session.run_id)
st.write('<style>div.Widget.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True)
y = {'2':2 , '3':3, '4':4, '5':5, '6':6 ,'7':7, '8':8, '9':9 ,'T':10, 'J':11, 'Q':12 ,'K':13 , 'A':14 , 'O':-1 ,'P':0 , 'S':1}
n_card1 = y[c_1] ; n_card2 = y[c_2] ; n_suited = y[suit]
ev_c= np.where(n_suited == 1 , ev.evaluate_hand([c(n_card1 , 1), c(n_card2, 1)] ) ,
ev.evaluate_hand([c(n_card1 , 1), c(n_card2, 2)]))
df = df[df['ev'] == ev_c]
df = df[df['position'] == position ]
df = df[df['action'] == action ]
df_o = df.output_preflop.to_numpy()
df_c = df.class_preflop.to_numpy()
code = '''{} > {} > {} > {} > {}'''.format((c_1+c_2+suit) , position , action , df_c[-1] , df_o[-1] )
st.code(code, language='python')
if st.button("{}".format(df_o[-1])):
session.run_id += 1
st.write("_"*20)
# if st.checkbox("plot", value = 0):
# st.markdown("")
# st.markdown("[")
# if st.checkbox("hiplot_preflop" , value = 0):
# if st.button("{}".format('Reset')):
# session.run_id += 1
# df = preflop()
# data = df[['index', 'n_card1' , 'n_card2' , 's_suited' , 'class_preflop', 'position' , 'action' , 'output_preflop' ,'ev']]
# data = data.to_dict('r')
# xp = hip.Experiment.from_iterable(data)
# ret_val = xp.display_st(key=session.run_id)
# st.markdown("hiplot returned " + json.dumps(ret_val))
#_______________________________________________________________________________________________________
@st.cache(suppress_st_warning=True)
def postflop():
df_2 = pd.read_pickle('./postflop.pickle')
p = df_2.position.unique()
b = df_2.board.unique()
h = df_2.hit.unique()
return df_2 , p , b , h
# st.write('postflop')
df_2 , p , b , h = postflop()
op_p = st.radio('position',p, key=session.run_id)
op_b = st.radio('board',b, key=session.run_id)
op_t = st.radio('tier',(None , 'nut' , 'non-nut' ,'showdown' , 'air') , index= 0 , key=session.run_id)
if op_t != None:
if op_t == 'nut':op_h ='1)_two_pair+'
elif op_t == 'non-nut':op_h = '2)_overpair'
elif op_t == 'showdown':op_h = '3)_showdown'
elif op_t == 'air':op_h ='4)_air'
else:op_h= st.selectbox('hit', h, key=session.run_id)
Street = st.radio('Street',('flop' , 'turn' , 'river'), key=session.run_id)
df_2 = df_2[df_2['position'] == op_p]
df_2 = df_2[df_2['board'] == op_b]
df_2 = df_2[df_2['hit'] == op_h]
df_2_St = df_2[[Street]]
df_2_St = df_2_St.values.tolist()
code = '''{} > {} > {} > {} > {}'''.format(op_p , op_b , op_h , Street , df_2_St[-1][0])
st.code(code, language='python')
if st.button("{}".format( df_2_St[-1][0])):
session.run_id += 1
# st.write("_"*20)
# if st.checkbox("hiplot_postflop" , value = 0):
# if st.button("{}".format('Reset')):
# session.run_id += 1
# df_2 , p , b , h = postflop()
# data_2 = df_2
# data_2 = data_2.to_dict('r')
# xp = hip.Experiment.from_iterable(data_2)
# ret_val = xp.display_st(key=session.run_id)
# st.markdown("hiplot returned " + json.dumps(ret_val))
# import pandas as pd
# import streamlit as st
# import seaborn as sns
# import numpy as np
# import matplotlib.pyplot as plt
# import matplotlib.image as mpimg
# import json
# import hiplot as hip
# pd.set_option('precision', 3)
# # sns.set_style("whitegrid")
# @st.cache(suppress_st_warning=True)
# def preflop():
# df = pd.read_pickle('./preflop.pickle')
# df = df[['Human', 'EV' , 'y']]
# df = df.set_index(['y'])
# df['top_range'] = abs(df['EV'] - 1.)
# return df
# x = ["A", "K", "Q", "J","T", "9", "8" , "7" , "6" , "5" , "4" , "3" , "2"]
# Suit = st.radio("Suit",("o" , "s"))
# c_1 = st.radio("c_1",(x))
# c_2 = st.radio("c_2",(x))
# st.write('<style>div.Widget.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True)
# st.write("_"*50)
# h = c_1 + c_2 + Suit
# df = preflop()
# df = df[df['Human'] == h]
# if df.index == 1:
# st.markdown("")
# if df.index == 2:
# st.markdown("")
# if df.index == 3:
# st.markdown("")
# if df.index == 4:
# st.markdown("")
# if df.index == 5:
# st.markdown("")
# if df.index == 6:
# st.markdown("")
# if df.index == 7:
# st.markdown("")
# # st.markdown("")
# pd.set_option('precision', 3)
# data = pd.read_pickle('./preflop.pickle')
# # data = data.reset_index()
# data = data[['Human' , 'Ang_Card' , 'Suited' , 'EV' , 'y']]
# data = data.to_dict('r')
# xp = hip.Experiment.from_iterable(data)
# # Display with `display_st` instead of `display`
# ret_val = xp.display_st(key="hip")
# st.markdown("hiplot returned " + json.dumps(ret_val))
# # Z = {"A":0, "K":1, "Q":2, "J":3,"T":4, "9":5, "8":6, "7":7, "6":8, "5":9, "4":10, "3":11, "2":12}
# # A = 0.7 ; B = 0.6 ; C = 0.5 ; D= 0.4 ; E = 0.3 ; F = 0.2 ; G = 0.1 ; H = 0.0
# # data = np.array([
# # [A, A, B, C, D, E, F, F, F, F, F, F, F],
# # [A, A, D, E, G, G, G, G, G, G, G, G, G],
# # [B, D, A, E, G, G, G, G, G, G, G, G, G],
# # [C, E, E, A, G, G, G, G, G, G, G, G, G],
# # [D, G, G, F, B, G, G, G, G, G, G, G, G],
# # [E, G, G, H, F, B, G, G, G, G, G, G, G],
# # [F, G, G, H, H, F, C, G, G, G, G, G, G],
# # [F, G, G, H, H, H, F, C, G, G, G, G, G],
# # [F, G, G, H, H, H, H, F, D, G, G, G, G],
# # [F, G, G, H, H, H, H, H, F, D, G, G, G],
# # [F, G, G, H, H, H, H, H, H, F, E, G, G],
# # [F, G, G,H, H, H, H, H, H, H, G, E, G],
# # [F, G, G, H, H, H, H, H, H, H, H, G, E],
# # ])
# # data[[c_1] ,[c_2]] = 1.
# # df = pd.DataFrame(data)
# # c_1 = Z[c_1]
# # c_2 = Z[c_2]
# # data[[c_1] ,[c_2]] = 1.
# # fig, ax = plt.subplots(figsize=(5 , 5))
# # im = ax.imshow(data ,)
# # ax.set_xticks(np.arange(len(x)))
# # ax.set_yticks(np.arange(len(y)))
# # ax.xaxis.tick_top()
# # ax.set_xticklabels(x)
# # ax.set_yticklabels(y)
# # fig.tight_layout()
# # st.pyplot()
|
larryhou/py-futu-api | futu/tools/generate_code.py | # -*- coding: utf-8 -*-
import json
import os
import load_template
#__PBPrefixName__ = "GetGlobalState_pb2."
__TemplateCodeFileName__ = "template_code.txt"
__TemplateFileHeadName__ = "template_head.txt"
__TemplateRstName__ = "rst_template.txt"
template = load_template.FutuTemplate("function.template")
def change_variable_name(listx): # 修改变量名
"""把骆驼变量名变成下划线的方式(全大写)"""
listy = listx[0]
for i in range(1, len(listx) - 1):
if listx[i] == '_' or listx[i - 1] == '_' or listx[i + 1] == '_':
listy += listx[i]
elif listx[i].isupper() and not listx[i - 1].isupper(): # 加'_',当前为大写,前一个字母为小写
listy += '_'
listy += listx[i]
elif listx[i].isupper() and listx[i - 1].isupper() and listx[i + 1].islower():
# 加'_',当前为大写,前一个字母为小写
listy += '_'
listy += listx[i]
else:
listy += listx[i]
if len(listx) > 1:
listy += listx[-1]
return listy.upper()
def code_add_space(code, space_count=1, space_str=" "): # 逐行加空格
"""传入字符串,逐行加入空格,符合python规范"""
ls = code.split('\n')
ret_code = space_line = ""
for i in range(space_count):
space_line += space_str
for s in ls:
if len(s) > 0:
ret_code += (space_line + s + "\n")
else:
ret_code += "\n"
return ret_code[:-1]
class ClassItemStruct(object):
def __init__(self, json_obj, class_type, pb_prefix_name):
self.name = json_obj["longName"]
self.full_name = json_obj["fullName"]
self.description = json_obj["description"]
self.json_obj = json_obj
self.pb_prefix_name = pb_prefix_name
self.class_type = class_type
self.repeated_count = 0
self.var_list = list()
self.list_list = list()
self.values = list()
if class_type == "enums":
for v in json_obj["values"]:
s = EnumsItemStruct(v, self.name, self.pb_prefix_name)
self.values.append(s)
if class_type == "class":
for v in json_obj["fields"]:
s = ParameterItem(v, self)
if s.label == "repeated":
#self.has_repeated = True
self.repeated_count += 1
self.values.append(s)
def get_only_repeated_item(self):
for item in self.values:
if item.trim_type == "list" or item.trim_type == "code_list":
return item
# def is_repeated_type(self, full_name):
# for item in self.values:
# if item.full_type == full_name:
# return True
# return False
def set_parameter_class(self, class_obj):
for item in self.values:
item.set_class(class_obj)
def get_function_return(self):
var_names = list()
for item in self.var_list:
var_names.append(item.trim_name)
"""函数返回,主要是组成pd和解析"""
if self.repeated_count == 1:
str_code = '\'' + '\',\n\''.join(var_names) + '\''
return template["list_return"].format(var_name=code_add_space(str_code, 4))
elif self.repeated_count == 0:
str_code = '\'' + '\',\n\''.join(var_names) + '\''
return template["dict_return"].format(var_name=code_add_space(str_code, 4))
elif self.repeated_count > 1:
str_code = ""
for item in self.list_list:
var_names.clear()
col_list = item.col_list
for v in col_list:
var_names.append(v.trim_name)
var_name = '\'' + '\',\n\''.join(var_names) + '\''
trim_name = item.trim_name
str_code += template["list_list_item_return"].format(var_name=code_add_space(var_name, 4),
trim_name=trim_name,
name=trim_name,
description=item.description)
return template["list_list_return"].format(code=str_code)
def get_list_head(self):
"""获取列表的头部变量名代码"""
str_code = ""
for item in self.values:
str_code += item.get_unpack_var()
return str_code
def get_unpack_code(self):
self.var_list.clear()
if self.repeated_count == 1:
"""如果有多个重复数据(列表项),这种方式就不适合了"""
unpack_add_code = ""
list_name = "ret_list"
code, self.var_list = self.get_only_repeated_item().sub_list(list_name)
head_code = code_add_space("{} = list()\n".format(list_name), 1)
head_code += self.get_list_head()
unpack_add_code += code
head_code = code_add_space(head_code, 1)
return head_code + unpack_add_code
elif self.repeated_count == 0:
unpack_code = ""
"""开始遍历赋值"""
for item in self.values:
unpack_code += item.get_unpack_dict_code("ret")
self.var_list.append(item)
return template["class_unpack_var_add"].format(unpack_code=unpack_code.rstrip('\n'))
elif self.repeated_count > 1:
self.list_list.clear()
code_str = ""
head_code = "ret_dic = dict()\n" + self.get_list_head()
for item in self.values:
if item.trim_type == "list":
self.list_list.append(item)
list_name = "ret_list_" + item.trim_name
item.list_name = list_name
head_code += template["class_more_list_add"].format(description=item.description,
list_name=list_name,
var_name=item.trim_name)
for item in self.list_list:
s, l = item.sub_list(item.list_name)
item.col_list = l
code_str += s
return code_add_space(head_code, 1) + code_str
def get_unpack_return(self):
if self.repeated_count == 1:
return "return RET_OK, \"\", ret_list"
elif self.repeated_count == 0:
return "return RET_OK, \"\", ret"
elif self.repeated_count > 1:
return "return RET_OK, \"\", ret_dic"
class BaseClass(ClassItemStruct):
def __init__(self, full_type):
self.name = full_type
self.description = "base type " + full_type
self.json_obj = None
self.pb_prefix_name = None
self.class_type = "class"
self.repeated_count = 0
self.var_list = list()
self.vars_dict = dict()
self.full_name = full_type
self.values = list()
class EnumsItemStruct(object):
def __init__(self, obj, class_name, pb_prefix_name):
self.obj = obj
self.class_name = class_name # 所属的类名
self.pb_prefix_name = pb_prefix_name
self.full_name = obj["name"] # 原始变量名
self.number = int(obj["number"])
self.description = obj["description"].replace('\n', '')
self.trim_name = self.full_name # 整理后的变量名,主要是去掉前面的类名头
self.pb_value = self.pb_prefix_name + self.full_name # 对应的pb里面的变量名
if len(self.class_name) > 0 and len(self.trim_name) > 0:
self.trim_name = self.trim_name.replace(self.class_name + "_", "")
if self.trim_name[0] in list(map(lambda x: str(x), list(range(10)))):
self.trim_name = self.class_name[:2] + "_" + self.trim_name
if not isinstance(self.description, str):
self.description = ""
if len(self.description) == 0 and self.number == 0:
self.description = "未知"
self.underscore_name = change_variable_name(self.trim_name) # 整理后的变量名,把骆驼命名改成大小写命名
if self.underscore_name in ["UNKNOW", "UNKONW", "UNKNOWN"]:
self.underscore_name = "NONE"
self.str = self.underscore_name # 整理后的变量值,做一些约定修整
if self.number == 0 and self.underscore_name == "NONE":
self.str = "N/A"
class ParameterItem(object):
def __init__(self, obj, class_owner):
self.obj = obj
self.class_owner = class_owner
self.pb_prefix_name = class_owner.pb_prefix_name
self.self_class = None
# self.repeated_class = None
self.name = obj["name"] # 原始变量名
self.full_type = obj["fullType"] # 原始变量名
self.trim_type = obj["longType"] # 原始变量名
self.python_type = obj["longType"] # python对应的变量名
self.trim_name = change_variable_name(self.name).lower() # 整理后的名字
self.description = obj["description"].replace('\n', '')
self.label = obj["label"]
self.trim()
def trim(self):
if self.obj is None:
return
# 嵌套了复杂类型list
if self.label == "repeated":
if self.full_type == "Qot_Common.Security":
self.trim_type = "code_list"
if self.trim_name == "security_list":
self.trim_name = "code_list"
else:
self.trim_type = "list"
# 股票代码类型需要特殊处理
elif self.full_type == "Qot_Common.Security":
self.trim_type = "code"
if self.trim_name == "security":
self.trim_name = "stock_code"
# timestamp类型需要跳过
elif self.full_type == "double" and self.name.lower().find("timestamp") != -1:
self.trim_type = "timestamp"
# 时间特殊处理,因为time、code太笼统
if self.trim_name == "time" or self.trim_name == "code":
self.trim_name = "{}_{}".format(change_variable_name(self.class_owner.name).lower(), self.trim_name)
# 注释太短了的不知所云
if len(self.description) < 5:
if len(self.class_owner.description) > 0:
self.description = self.class_owner.description + " " + self.description
#名字太短了容易撞车
if len(self.trim_name) < 5:
owner_name = change_variable_name(self.class_owner.name).lower()
if owner_name != "s2c" and owner_name != "c2s" and owner_name != "":
self.trim_name = "{}_{}".format(owner_name, self.trim_name)
if self.trim_type in ["int32", "int64", "double",
"float", "string", "bool", "bytes", "code_list",
"code", "timestamp"]:
self.self_class = BaseClass(self.trim_type)
elif self.full_type in ["int32", "int64", "double", "float", "string", "bool", "bytes"]:
self.self_class = BaseClass(self.full_type)
self.python_type = self.trim_type
if self.trim_type in ["int32", "int64"]:
self.python_type = "int"
if self.trim_type in ["double", "float"]:
self.python_type = "float"
def get_args(self):
return " \"{}\": {},".format(self.trim_name, self.trim_name)
def get_warning_filter(self):
warning_filter = ""
if self.trim_type == "code_list":
warning_filter = template.get("code_list_warning_filter", 2)
elif self.trim_type == "code":
warning_filter = template.get("code_warning_filter", 2).format(trim_name=self.trim_name)
return warning_filter
def get_parameter_name(self):
if self.trim_type == "code_list":
return "code_list"
elif self.trim_type == "code":
return "stock_code"
return self.trim_name
def get_pack_req_filter(self):
if self.trim_type == "code_list":
return template.get("pack_code_list_filter", 2)
elif self.trim_type == "code":
return template.get("pack_code_filter", 2)
return ""
def get_pack_req_add(self):
if self.trim_type == "code_list":
return template.get("pack_code_list_add", 2).format(name=self.name)
elif self.trim_type == "code":
return " req.c2s.{0}.market = market_code\n" \
" req.c2s.{0}.code = stock_code".format(self.name)
else:
return " req.c2s.{0} = {1}\n".format(self.name, self.trim_name)
def set_class(self, class_obj):
if (self.self_class is None) and (self.full_type == class_obj.full_name):
self.self_class = class_obj
"""遍历问问子节点"""
if (self.self_class is not None) and (not isinstance(self.self_class, BaseClass)):
for item in self.self_class.values:
item.set_class(class_obj)
def get_unpack_var(self, parents_name="rsp_pb.s2c"):
if self.trim_type == "timestamp":
return ""
else:
if self.full_type == "Qot_Common.Security":
s_code = template["class_unpack_code"].format(trim_name=self.trim_name,
name=self.name,
parents_name=parents_name)
else:
s_code = "# {description} type = {trim_type}\n" \
"{trim_name} = {parents_name}.{name}\n".\
format(description=self.description,
trim_type=self.full_type,
trim_name=self.trim_name,
parents_name=parents_name,
name=self.name)
return s_code
def sub_list(self, list_name, sub_item_name="item", add_code=""):
if self.self_class is None or self.self_class.repeated_count > 1:
raise Exception("不能支持子节点有多个列表的情况")
str_code = for_code = ""
ret_list = list()
if sub_item_name == "item": # 顶级节点,把父节点的,也就是顶层父节点的数据加进去
if self.name == "test":
sub_item_name = "item"
for item in self.class_owner.values:
if item.trim_type != "list" and self.trim_type != "timestamp":
ret_list.append(item)
add_code += code_add_space(("data[\"{0}\"] = {0}\n".format(item.trim_name)), 1)
if self.self_class.repeated_count == 1: # 下面子节点仍然有list
for item in self.self_class.values:
if self.trim_type == "timestamp":
continue
"""加入自己那层的数据"""
str_code += code_add_space(item.get_unpack_var(sub_item_name), 1)
if item.trim_type != "list":
ret_list.append(item)
add_code += code_add_space(("data[\"{0}\"] = {0}\n".format(item.trim_name)), 1)
sub_sub_item_name = "sub_" + sub_item_name
s, l = self.self_class.get_only_repeated_item().sub_list(list_name, sub_sub_item_name, add_code)
str_code += s
ret_list.extend(l)
elif self.self_class.repeated_count == 0:
"""理解为最后一层"""
for item in self.self_class.values:
if self.trim_type == "timestamp":
continue
str_code += code_add_space(item.get_item_unpack_add(sub_item_name), 1)
ret_list.append(item)
str_code += add_code
for_code = "for {sub_item} in {trim_name}:\n". \
format(trim_name=self.trim_name,
sub_item=sub_item_name)
if self.self_class.repeated_count == 0:
for_code += " {list_name}.append(data)\n". \
format(list_name=list_name)
return code_add_space(for_code + str_code, 1), ret_list
def get_item_unpack_add(self, item_name=""):
if self.trim_type == "timestamp" or self.trim_type == "list":
return ""
else:
if len(item_name) > 0:
item_name += "."
if self.trim_type == "code" and item_name != "":
s_code = "merge_qot_mkt_stock_str({parents_name}.{name}.market,{parents_name}.{name}.code)".\
format(name=self.name,
parents_name=item_name).rstrip('\n')
return "# {description} type = {trim_type}\n" \
"data[\"{trim_name}\"] = {code}\n".\
format(description=self.description,
trim_type=self.trim_type,
trim_name=self.trim_name,
code=s_code)
else:
return "# {description} type = {trim_type}\n" \
"data[\"{trim_name}\"] = {item_name}{name}\n".\
format(description=self.description,
trim_type=self.trim_type,
trim_name=self.trim_name,
item_name=item_name,
name=self.name)
def get_unpack_dict_code(self, ret_name):
if self.trim_type == "timestamp":
return ""
else:
if self.full_type == "Qot_Common.Security":
s_code = "# {description} type={trim_type}\n" \
"{ret_name}[\"{trim_name}\"]= merge_qot_mkt_stock_str({trim_name}.market, {trim_name}.code)\n". \
format(description=self.description,
trim_type=self.trim_type,
trim_name=self.trim_name,
ret_name=ret_name)
else:
s_code = "# {description} type={trim_type}\n" \
"{ret_name}[\"{trim_name}\"]=rsp_pb.s2c.{name}\n".\
format(description=self.description,
trim_type=self.trim_type,
trim_name=self.trim_name,
name=self.name,
ret_name=ret_name)
return code_add_space(s_code, 2)
class GenerateCode(object):
def __init__(self, class_name, prefix=""):
print("GenerateCode class_name = {} | prefix = {}".format(class_name, prefix))
self.local_path = os.path.dirname(os.path.realpath(__file__))
self.common_pb_path = os.path.abspath(os.path.join(self.local_path, "../common/pb/"))
self.out_put_path = os.path.join(self.local_path, "code")
if not os.path.exists(self.out_put_path):
os.makedirs(self.out_put_path)
self.enums = list()
self.class_name = class_name
self.prefix = prefix
if prefix != "":
self.json_filename = "{}_{}.proto.json".format(prefix, class_name)
else:
self.json_filename = "{}.proto.json".format(class_name)
self.pb_prefix_name = "{}_pb2.".format(class_name)
self.obj = None
self.c2s = self.s2c = None
def load(self):
self.obj = None
with open(os.path.join(self.common_pb_path, self.json_filename), 'r', encoding='UTF-8') as load_f:
self.obj = json.load(load_f)
self.load_enums()
self.load_parameter()
for i in range(10):
self.set_parameter_class(self.obj) # 嵌套重复元素
with open(os.path.join(self.common_pb_path, "Qot_Common.proto.json"), 'r', encoding='UTF-8') as load_f:
self.set_parameter_class(json.load(load_f))
with open(os.path.join(self.common_pb_path, "Trd_Common.proto.json"), 'r', encoding='UTF-8') as load_f:
self.set_parameter_class(json.load(load_f))
def load_enums(self):
self.enums.clear()
if self.obj is not None:
files_json = self.obj["files"]
enums_json = files_json[0]["enums"]
for item in enums_json:
c = ClassItemStruct(item, "enums", self.pb_prefix_name)
self.enums.append(c)
def load_parameter(self):
self.c2s = self.s2c = None
if self.obj is not None:
files_json = self.obj["files"]
messages_json = files_json[0]["messages"]
for item in messages_json:
long_name = item["longName"]
if long_name == "C2S":
self.c2s = ClassItemStruct(item, "class", self.pb_prefix_name)
if long_name == "S2C":
self.s2c = ClassItemStruct(item, "class", self.pb_prefix_name)
def set_parameter_class(self, obj):
if obj is None or self.s2c is None or self.c2s is None:
return
files_json = obj["files"]
messages_json = files_json[0]["messages"]
for item in messages_json:
class_obj = ClassItemStruct(item, "class", self.pb_prefix_name)
self.s2c.set_parameter_class(class_obj)
self.c2s.set_parameter_class(class_obj)
def out_class(self):
if self.c2s is None or self.s2c is None:
return
export_file = os.path.join(self.out_put_path, "{}.py".
format(change_variable_name(self.class_name).lower()))
code_file = open(export_file, 'w', encoding='utf-8')
function_name = change_variable_name(self.class_name).lower() # 函数名
kargs = parameter = warning_filter = ""
notes = " " + self.class_name
pack_req_filter = pack_req_add = ""
if self.prefix != "":
pb_file_name = "{}_{}".format(self.prefix, self.class_name)
else:
pb_file_name = "{}".format(self.class_name)
# 大于5的时候变成结构体
for i in range(len(self.c2s.values)):
v = self.c2s.values[i]
parameter += v.get_parameter_name()
warning_filter += v.get_warning_filter()
kargs += v.get_args()
s = v.get_pack_req_filter()
pack_req_filter += ' \"\"\"check {1} {0}\"\"\"\n'.format(v.description, v.trim_name)
pack_req_filter += s
pack_req_add += v.get_pack_req_add()
if i < (len(self.c2s.values) - 1):
parameter += ","
kargs += ",\n"
# with open(os.path.join(self.local_path, __TemplateFileFunctionName__), 'r', encoding='UTF-8') as load_f:
template_code = template["class_function"]
unpack_code = code_add_space(self.s2c.get_unpack_code().rstrip('\n'),1)
code_str = template_code.format(function_name=function_name,
parameter=parameter,
notes=notes,
warning_filter=warning_filter,
class_name=self.class_name,
kargs=kargs,
get_function_return=self.s2c.get_function_return(),
pack_req_filter=pack_req_filter,
pack_req_add=pack_req_add,
pb_file_name=pb_file_name,
get_unpack_code= unpack_code,
get_unpack_return=self.s2c.get_unpack_return())
code_file.write(code_str)
code_file.close()
def out_enums(self):
if len(self.enums) == 0:
return
export_file = os.path.join(self.out_put_path, "{}_enums.py".format(change_variable_name(self.class_name).lower()))
with open(os.path.join(self.local_path, __TemplateCodeFileName__), 'r', encoding='UTF-8') as load_f:
template_code = load_f.read()
code_file = open(export_file, 'w', encoding='utf-8')
"""写入文件头"""
with open(os.path.join(self.local_path, __TemplateFileHeadName__), 'r', encoding='UTF-8') as f:
code_file.write(f.read())
for class_item in self.enums:
class_description = class_item.description.replace("\n", ",")
class_name = class_item.name
variable_code = ""
dic_code = ""
code_file.write("\n\n\n'''-------------------------{}----------------------------'''\n\n\n"
.format(class_name))
for item in class_item.values:
variablename = item.underscore_name
strvalue = item.str
description = item.description
pbvalue = item.pb_value
kvcode = "{variablename} = \"{strvalue}\"".format(variablename=variablename, strvalue=strvalue)
# < 50表示左对齐50个空格
code = " {kvcode: <50} # {description}\n".format(kvcode=kvcode, description=description)
variable_code += code
code = " self.{variablename}: {pbvalue},\n".format(
variablename=variablename, pbvalue=pbvalue)
dic_code += code
c = template_code.format(classdescription=class_description,
classname=class_name,
variablecode=variable_code.rstrip("\n,"),
diccode=dic_code.rstrip("\n,"))
code_file.write(c)
code_file.close()
def rst_enums(self):
if len(self.enums) == 0:
return
export_file = os.path.join(self.out_put_path, "{}_enums.rst".format(change_variable_name(self.class_name).lower()))
rst_template= ""
with open(os.path.join(self.local_path, __TemplateRstName__), 'r', encoding='UTF-8') as load_f:
rst_template = load_f.read()
code_file = open(export_file, 'w', encoding='utf-8')
for class_item in self.enums:
classdescription = class_item.description.replace("\n", ",")
classname = class_item.name
dicinfo = ""
for item in class_item.values:
name = item.underscore_name
description = item.description
kvcode = " .. py:attribute:: {name}\n\n {description}\n\n".format(name=name, description=description)
dicinfo += kvcode
c = rst_template.format(classname=classname,
classdescription=classdescription,
dicinfo=dicinfo)
code_file.write(c)
code_file.close()
def rst_class(self):
if self.c2s is None or self.s2c is None:
return
export_file = os.path.join(self.out_put_path, "{}.rst".
format(change_variable_name(self.class_name).lower()))
code_file = open(export_file, 'w', encoding='utf-8')
code_str = ""
for i in range(len(self.s2c.values)):
v = self.s2c.values[i]
description = v.description
python_type = v.python_type
name = v.trim_name
line = " {:<32}{:<15}{}\n".format(name, python_type, description)
code_str += line
code_file.write(code_str)
code_file.close()
def save(self):
self.out_enums()
self.out_class()
self.rst_enums()
self.rst_class()
class Generate(object):
def __init__(self):
self.local_path = os.path.dirname(os.path.realpath(__file__))
self.common_pb_path = os.path.abspath(os.path.join(self.local_path, "../common/pb/"))
self.pb_list = list()
"""统统载入进来再说"""
f_list = os.listdir(self.common_pb_path)
for f in f_list:
if os.path.splitext(f)[1] == '.proto':
self.pb_list.append(f)
def find_pb_by_names(self, names):
f_list = list()
for f in self.pb_list:
(_, file_name) = os.path.split(f)
for s in names:
if f.find(s) != -1:
f_list.append(f)
break
return f_list
def generate_json(self, names=None):
if names is not None:
f_list = self.find_pb_by_names(names)
else:
f_list = self.pb_list
for f in f_list:
(_, file_name) = os.path.split(f)
cmd = '''cd /d {work_path} & \
protoc.exe -I=. --python_out=./ {file_name} & \
protoc.exe --doc_out=. --doc_opt=json,{file_name}.json ./{file_name} &'''\
.format(file_name=file_name,
work_path=self.common_pb_path)
os.system(cmd)
print(cmd)
def generate_code(self, names):
if names is not None:
pb_list = self.find_pb_by_names(names)
else:
pb_list = self.pb_list
for f in pb_list:
(_, file_name) = os.path.split(f)
json_file_path = os.path.join(self.common_pb_path, file_name + ".json")
if not os.path.exists(json_file_path):
print(json_file_path + " not exists")
continue
(name, _) = os.path.splitext(file_name)
if name.find("Qot_") == 0:
prefix = "Qot"
class_name = name[4:]
elif name.find("Trd_") == 0:
prefix = "Trd"
class_name = name[4:]
else:
prefix = ""
class_name = name
if class_name in ["Common", "Sub"]:
continue
code = GenerateCode(class_name, prefix)
code.load()
code.save()
def generate(names):
if isinstance(names, str):
names = [names]
gener = Generate()
gener.generate_json(names)
gener.generate_code(names)
if __name__ =="__main__":
generate(["Qot_StockFilter"])
# c = GenerateCode("GetOwnerPlate", "Qot")
# c.load()
# c.save()
|
larryhou/py-futu-api | futu/examples/simple.py | <filename>futu/examples/simple.py
# -*- coding: utf-8 -*-
from futu import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
"""-------------------------------------------------"""
from futu.quote.quote_get_warrant import Request
from futu.quote.quote_stockfilter_info import SimpleFilter
field = SimpleFilter()
field.filter_min = 100
field.filter_max = 1000
field.stock_field = StockField.CUR_PRICE
field.is_no_filter = False
field.sort = SortDir.ASCEND
# field2 = SimpleFilter()
# field2.filter_min = 100
# field2.filter_max = 1000
# field2.stock_field = StockField.VOLUME_RATIO
# field2.sort = SortDir.ASCEND
# field2.is_no_filter = False
#
# field3 = SimpleFilter()
# field3.stock_field = StockField.CUR_PRICE_TO_HIGHEST52_WEEKS_RATIO
# field3.is_no_filter = True
ret, ls = quote_ctx.get_stock_filter(Market.HK, [field])
last_page, all_count, ret_list = ls
print(ret_list)
"""-------------------------------------------------"""
# req = Request()
# req.sort_field = SortField.CODE
# req.ascend = True
# req.type_list = [WrtType.BEAR, WrtType.CALL]
# req .issuer_list = [Issuer.CS, Issuer.CT, Issuer.EA]
# print(quote_ctx.get_warrant("HK.00700", req))
quote_ctx.close()
|
n4ts/ha-notione | custom_components/notione/device_tracker.py | <filename>custom_components/notione/device_tracker.py
"""Support for notiOne® Bluetooth trackers."""
from datetime import datetime,timedelta
import logging
import requests,json
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD, CONF_SCAN_INTERVAL
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.util import slugify
from homeassistant.util import dt
import urllib3
urllib3.disable_warnings()
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=300)
MDI_ICON = 'mdi:bluetooth-connect'
token_url = 'https://auth.notinote.me/oauth/token'
list_url = 'https://api.notinote.me/secured/internal/devicelist'
auth_login = 'test-oauth-client-id'
auth_pass = <PASSWORD>'
grant_type = 'password'
scope = 'NOTI'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SCAN_INTERVAL):
vol.All(cv.time_period, cv.positive_timedelta)
})
def setup_scanner(hass, config: dict, see, discovery_info=None):
NotiOneTracker(hass, config, see)
return True
class NotiOneTracker:
def __init__(self, hass, config: dict, see) -> None:
self.hass = hass
self.username = config.get(CONF_USERNAME)
self.password = config.get(CONF_PASSWORD)
interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
_LOGGER.info('Scan interval: %s', interval)
self.see = see
def update_interval(now):
try:
self._update_info()
finally:
hass.helpers.event.track_point_in_utc_time(
update_interval, dt.utcnow() + interval)
update_interval(None)
def _update_info(self, now=None) -> None:
"""Update info from notiOne."""
_LOGGER.info("Updating device info")
data = {
'grant_type': grant_type,
'username': self.username,
'password': <PASSWORD>,
'scope': scope
}
access_token_response = requests.post(token_url, data=data, verify=False, allow_redirects=False, auth=(auth_login, auth_pass))
tokens = json.loads(access_token_response.text)
access_token = tokens['access_token']
api_call_headers = {'Authorization': 'Bearer ' + access_token}
api_call_response = requests.get(list_url, headers=api_call_headers, verify=False)
json_object = json.loads(api_call_response.text)
for dev in json_object['deviceList']:
tracker_id = dev['deviceId']
dev_id = dev['name']
_LOGGER.info('New device: %s', dev_id)
if dev_id is None:
dev_id = tracker_id
lat = dev['lastPosition']['latitude']
lon = dev['lastPosition']['longitude']
beaconid = dev['deviceId']
deviceVersion = dev['deviceVersion']
gpstime = datetime.fromtimestamp(dev['lastPosition']['gpstime']/1000.0)
entity_picture = dev['avatar']
accuracy = dev['lastPosition']['accuracy']
city = dev['lastPosition']['geocodeCity']
street = dev['lastPosition']['geocodePlace']
battery = dev['notiOneDetails']['battery']
mac = dev['notiOneDetails']['mac']
if city is None:
city = ''
if street is None:
street = ''
if battery:
battery_status = 'low'
else:
battery_status = 'high'
if entity_picture[0:4] != 'http':
entity_picture = ''
attrs = {
'friendly_name': dev_id ,
'gpstime': gpstime ,
'entity_picture': entity_picture ,
'beaconid': beaconid ,
'location': street + ',' + city ,
'battery_status': battery_status ,
'deviceVersion': deviceVersion ,
'icon': MDI_ICON
}
self.see(
dev_id=tracker_id, host_name=dev_id, mac=mac, gps=(lat, lon), gps_accuracy=accuracy, attributes=attrs
)
|
n4ts/ha-notione | custom_components/notione/__init__.py | """The notione component."""
|
chemberger/bnil-graph | __init__.py | #!/usr/bin/env python
# Copyright 2017 <NAME> (@withzombies)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from binaryninja import *
from collections import defaultdict
import subprocess
import tempfile
# Show normal form instructions (non-SSA, non-mapped) in the output
show_normal = True
# Included MappedMediumLevelIL in the output
show_mapped = False
# Include SSA in the output
show_ssa = True
# Path to dot executable
dot = '/usr/local/bin/dot'
dotfile = None
def print_il_graphviz(name, il):
if isinstance(il, MediumLevelILInstruction) or isinstance(il, LowLevelILInstruction):
dotfile.write('{} [label="{}", style="rounded"];\n'.format(name, il.operation.name))
for i, o in enumerate(il.operands):
edge_label = il.ILOperations[il.operation][i][0]
child_name = "{}_{}".format(name, i)
print_il_graphviz(child_name, o)
# print edge
dotfile.write('{} -> {} [label=" {}"];\n'.format(name, child_name, edge_label))
elif isinstance(il, list):
dotfile.write('{} [label="[{}]", shape="diamond"];'.format(name, len(il)))
for i, item in enumerate(il):
item_name = "{}_{}".format(name, i)
print_il_graphviz(item_name, item)
dotfile.write('{} -> {} [label=" {}"];'.format(name, item_name, i))
else:
# terminal
if isinstance(il, long):
(signed, ) = struct.unpack("l", struct.pack("L", il))
il_str = "{: d} ({:#x})".format(signed, il)
else:
il_str = str(il)
dotfile.write('{} [label="{}", shape="oval"];\n'.format(name, il_str))
def graph_il(il_type, il):
# type: (LowLevelILInstruction) -> None
h = hash(il)
name = "g_{}_{}".format(h, il.address)
child_name = "{}c".format(name)
# print head
il_str = str(il).replace("{", "\\{").replace("}", "\\}").replace(">", "\\>").replace("<", "\\<")
dotfile.write('{} [label="{{ {} | {} @ {:#x} | {} }}", shape="record"];\n'.format(name, il_type, il.instr_index,
il.address, il_str))
print_il_graphviz(child_name, il)
#print edge
dotfile.write('{} -> {};\n'.format(name, child_name))
def graph_addr(func, addr):
lookup = defaultdict(lambda: defaultdict(list))
llil = func.low_level_il
mlil = func.medium_level_il
if show_normal:
for block in llil:
for il in block:
lookup['LowLevelIL'][il.address].append(il)
for block in mlil:
for mil in block:
lookup['MediumLevelIL'][mil.address].append(mil)
if show_ssa:
for block in llil.ssa_form:
for il in block:
lookup['LowLevelILSSA'][il.address].append(il)
for block in mlil.ssa_form:
for mil in block:
lookup['MediumLevelILSSA'][mil.address].append(mil)
if show_mapped:
mmlil = llil.mapped_medium_level_il
for block in mmlil:
for mil in block:
lookup['MappedMediumLevelIL'][mil.address].append(mil)
if show_ssa:
for block in mmlil.ssa_form:
for mil in block:
lookup['MappedMediumLevelILSSA'][mil.address].append(mil)
dotfile.write("digraph G {\n")
dotfile.write('node [shape="rect"];\n')
for il_type in sorted(lookup):
ils = lookup[il_type][addr]
for il in sorted(ils):
graph_il(il_type, il)
dotfile.write("}\n")
def graph_bnil(bv, addr):
global dotfile
dotfile = tempfile.NamedTemporaryFile(mode="w", suffix=".dot")
blocks = bv.get_basic_blocks_at(addr)
func = blocks[0].function
graph_addr(func, addr)
dotfile.flush()
subprocess.check_call([dot, '-Tpng', '-O', dotfile.name])
bv.show_html_report("BNIL Graph", "<html><img src='{}.png'></html>".format(dotfile.name))
PluginCommand.register_for_address("BNIL Instruction Graph", "View BNIL Instruction Information", graph_bnil)
|
AlperenCicek/uav-simulation | UAV-Code/object_detection_ss.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import config_util
from object_detection.utils import visualization_utils as viz_utils
from object_detection.builders import model_builder
tf.get_logger().setLevel('ERROR') # Suppress TensorFlow logging (2)
# Enable GPU dynamic memory allocation
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file("exported-models/my_model_faster_rcnn_resnet50_v1_640x640_Unfinished6ClassedDataset106_RHF_RVF_4batches_13k/pipeline.config")
model_config = configs['model']
detection_model = model_builder.build(model_config=model_config, is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join("exported-models/my_model_faster_rcnn_resnet50_v1_640x640_Unfinished6ClassedDataset106_RHF_RVF_4batches_13k/checkpoint", 'ckpt-0')).expect_partial()
@tf.function
def detect_fn(image):
"""Detect objects in image."""
image, shapes = detection_model.preprocess(image)
prediction_dict = detection_model.predict(image, shapes)
detections = detection_model.postprocess(prediction_dict, shapes)
return detections, prediction_dict, tf.reshape(shapes, [-1])
# %%
# Load label map data (for plotting)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Label maps correspond index numbers to category names, so that when our convolution network
# predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility
# functions, but anything that returns a dictionary mapping integers to appropriate string labels
# would be fine.
category_index = label_map_util.create_category_index_from_labelmap("annotations/label_map.pbtxt",
use_display_name=True)
# %%
# Define the video stream
# ~~~~~~~~~~~~~~~~~~~~~~~
# We will use `OpenCV <https://pypi.org/project/opencv-python/>`_ to capture the video stream
# generated by our webcam. For more information you can refer to the `OpenCV-Python Tutorials <https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#capture-video-from-camera>`_
import cv2
#cap = cv2.VideoCapture(0)
# %%
# Putting everything together
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The code shown below loads an image, runs it through the detection model and visualizes the
# detection results, including the keypoints.
#
# Note that this will take a long time (several minutes) the first time you run this code due to
# tf.function's trace-compilation --- on subsequent runs (e.g. on new images), things will be
# faster.
#
# Here are some simple things to try out if you are curious:
#
# * Modify some of the input images and see if detection still works. Some simple things to try out here (just uncomment the relevant portions of code) include flipping the image horizontally, or converting to grayscale (note that we still expect the input image to have 3 channels).
# * Print out `detections['detection_boxes']` and try to match the box locations to the boxes in the image. Notice that coordinates are given in normalized form (i.e., in the interval [0, 1]).
# * Set ``min_score_thresh`` to other values (between 0 and 1) to allow more detections in or to filter out more detections.
import numpy as np
import win32api
import pyautogui
from PIL import ImageGrab
import time
def click_coordinates():
for pos in range(2):
state_prev = win32api.GetKeyState(0x01)
while True:
state_current = win32api.GetKeyState(0x01)
if state_current != state_prev:
pos = pyautogui.position()
print("**Positions set: ", pos)
return pos
def conversionOfCoordinates(coord, w, h):
return int(coord[0] * h), int(coord[1] * w), int(coord[2] * h), int(coord[3] * w)
def set_pos():
print("Set the area to process")
print("Upper corner")
mouse_posX1, mouse_posY1 = click_coordinates()
print("Lower corner")
time.sleep(0.8)
mouse_posX2, mouse_posY2 = click_coordinates()
x = int(mouse_posX1)
y = int(mouse_posY1)
w = int(mouse_posX2)
h = int(mouse_posY2)
return x, y, w, h
x, y, w, h = set_pos()
while True:
# Read frame from camera
wholeScreen = ImageGrab.grab((x, y, w, h))
"""
cv2.imshow("temp", np.array(wholeScreen))
cv2.waitKey(0)
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
"""
image_np = np.array(wholeScreen)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Things to try:
# Flip horizontally
# image_np = np.fliplr(image_np).copy()
# Convert image to grayscale
# image_np = np.tile(
# np.mean(image_np, 2, keepdims=True), (1, 1, 3)).astype(np.uint8)
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
detections, predictions_dict, shapes = detect_fn(input_tensor)
label_id_offset = 1
image_np_with_detections = image_np.copy()
image_np_with_detections = cv2.cvtColor(image_np_with_detections, cv2.COLOR_BGR2RGB)
img, coordinates, classOfVehicles = viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'][0].numpy(),
(detections['detection_classes'][0].numpy() + label_id_offset).astype(int),
detections['detection_scores'][0].numpy(),
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=500,
min_score_thresh=.30,
agnostic_mode=False)
try:
print("---CLASS & COORDINATES OF FRAME---")
for i in range(0, len(classOfVehicles)):
print(classOfVehicles[i], " : ", conversionOfCoordinates(coordinates[i], w, h))
except:
print(".....")
# Display output
cv2.imshow('object detection', image_np_with_detections)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.destroyAllWindows() |
ancabilloni/sea-vision | misc/edge_detection_example.py | <reponame>ancabilloni/sea-vision
#!/usr/bin/env python
"""
Edge detection with Sobel operator on webcam
"""
from cv_tools import sobel_thresh
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while (cap.isOpened()):
_, frame = cap.read()
sobel_x = sobel_thresh(frame, orient='x', sobel_kernel=9, thresh=(30,100))
sobel_y = sobel_thresh(frame, orient='y', sobel_kernel=9, thresh=(30,100))
sobel = np.zeros_like(sobel_x)
sobel[(sobel_x==1)|(sobel_y==1)] = 255
mask_sobel = np.dstack((sobel, sobel, sobel))
cv2.imshow('frame', np.hstack((frame,mask_sobel)))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# time.sleep(0.01)
cv2.waitKey(20)
cap.release()
cv2.destroyAllWindows() |
ancabilloni/sea-vision | misc/cv_tools.py | <gh_stars>1-10
#!/usr/bin/env python
"""
Selections of computer vision tools/methods
"""
import numpy as np
import cv2
def sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0,255), gray=False):
"""Applying sobel operator for edge detection"""
if not gray:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
if orient == 'x':
sobel = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
elif orient == 'y':
sobel = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0])&(scaled_sobel <= thresh[1])] = 1
return grad_binary
def increase_brightness(img, value=30):
"""Increase image brightness by increase V channel of HSV"""
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
def white_balance(img):
"""Adjust while balance of image"""
result = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
avg_a = np.average(result[:, :, 1])
avg_b = np.average(result[:, :, 2])
result[:, :, 1] = result[:, :, 1] - ((avg_a - 128) * (result[:, :, 0] / 255.0) * 1.1)
result[:, :, 2] = result[:, :, 2] - ((avg_b - 128) * (result[:, :, 0] / 255.0) * 1.1)
result = cv2.cvtColor(result, cv2.COLOR_LAB2BGR)
return result
def histogram_equalization(image, gray=False):
"""Apply histogram equalization"""
if not gray:
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
img = image
equ = cv2.equalizeHist(img)
return equ
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return cv2.Canny(gray, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
|
ancabilloni/sea-vision | misc/write_to_video.py | #!/usr/bin/env python
import cv2
# cap = cv2.VideoCapture("/path/to/your_video.mp4")
cap = cv2.VideoCapture(0) #Try out with your webcam
save_as = "/your/video/folder"
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # set video extension type
video_writer = cv2.VideoWriter(save_as + '/output.mp4', fourcc, 20.0, (640,480)) # path_name, video type, frame rate, (image_width, image_height)
# Note: image_width and image_height must match the image information that feed to video writer
while (cap.isOpened()):
_, frame = cap.read()
if frame is None:
break
else:
image = frame # apply any processing here to frame if desire
video_writer.write(image)
cv2.imshow('frame', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
video_writer.release()
cv2.destroyAllWindows() |
ancabilloni/sea-vision | misc/grabbing_frames.py | <gh_stars>1-10
#!/usr/bin/env python
import cv2
cap = cv2.VideoCapture('path/to/your/video/project_video.mp4')
save_as = "/path/to/your/save_folder"
count = 0
while (cap.isOpened()):
_, frame = cap.read()
if frame is None:
break
else:
cv2.imwrite(save_as + "/frame%d.jpg" % count, frame)
count += 1
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() |
BrightBurningPark/Naruda-office_support_robot | robot/lib/pathengine.py | <reponame>BrightBurningPark/Naruda-office_support_robot
from gridmap import OccupancyGridMap as ogm
from a_star import a_star
from utils import plot_path
from rpslam import *
import matplotlib.pyplot as plt
class navigation:
def __init__(self, map_path_with_name):
self.gmap = ogm.from_png(map_path_with_name, 1)
self.start_node = (0.0, 0.0)
self.goal_node = (0.0, 0.0)
self.path = []
self.path_px = []
self.path_rally = [] #only this one is milimeter scale cordination
def search(self, start_milimeter, goal_milimeter):
self.start_node = (start_milimeter[0]*PPM, start_milimeter[1]*PPM)
self.goal_node = (goal_milimeter[0]*PPM, goal_milimeter[1]*PPM)
self.path, self.path_px = a_star(self.start_node, self.goal_node, self.gmap, movement='8N')
self.gmap.plot()
if self.path:
plot_path(self.path_px)
else:
print('goal is not reachable')
plt.show()
def switch(x, y):
return {(-1, -1):1, (0, -1):2, (1, -1):3, (-1, 0):4, (1, 0):6, (-1, 1):7, (0, 1):8, (1, 1):9}.get((x, y), 5)
def extract_rally(self):
old_direction = 5 # initial direction is 5, and direction number follows calculator number keypad
for i in range(len(self.path)):
if i == len(self.path)-1:
rally_milimeter = (self.path[i][0]/PPM, self.path[i][1]/PPM)
self.path_rally.append(rally_milimeter)
else:
diff_x = self.path[i][0] - self.path[i+1][0]
diff_y = self.path[i][1] - self.path[i+1][1]
direction = navigation.switch(diff_x, diff_y)
if old_direction != direction:
rally_milimeter_x = self.path[i][0]/PPM
rally_milimeter_y = self.path[i][1]/PPM
if rally_milimeter_x > self.path[-1][0]/PPM:
rally_milimeter_x = rally_milimeter_x + 50
elif rally_milimeter_x < self.path[-1][0]/PPM:
rally_milimeter_x = rally_milimeter_x - 50
if rally_milimeter_y > self.path[-1][1]/PPM:
rally_milimeter_y = rally_milimeter_y + 50
elif rally_milimeter_y < self.path[-1][1]/PPM:
rally_milimeter_y = rally_milimeter_y - 50
rally_milimeter = (rally_milimeter_x, rally_milimeter_y)
self.path_rally.append(rally_milimeter)
old_direction = direction
|
BrightBurningPark/Naruda-office_support_robot | robot/lib/utils.py | import math
import png
import numpy
import matplotlib.pyplot as plt
from PIL import Image, ImageOps
def dist2d(point1, point2):
"""
Euclidean distance between two points
:param point1:
:param point2:
:return:
"""
x1, y1 = point1[0:2]
x2, y2 = point2[0:2]
dist2 = (x1 - x2)**2 + (y1 - y2)**2
return math.sqrt(dist2)
def png_to_ogm(filename, normalized=False, origin='lower'):
"""
Convert a png image to occupancy data.
:param filename: the image filename
:param normalized: whether the data should be normalised, i.e. to be in value range [0, 1]
:param origin:
:return:
"""
# additional edit: it inverses image. because it has opposite representation with SLAM driven map
image = Image.open(filename)
if image.mode == 'RGBA':
r, g, b, a = image.split()
rgb_image = Image.merge('RGB', (r, g, b))
inverted_image = ImageOps.invert(rgb_image)
r2, g2, b2 = inverted_image.split()
final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
final_transparent_image.save('./maps/inverted_map.png')
else:
inverted_image = ImageOps.invert(image)
inverted_image.save('./maps/inverted_map.png')
r = png.Reader('./maps/inverted_map.png')
img = r.read()
img_data = list(img[2])
out_img = []
bitdepth = img[3]['bitdepth']
for i in range(len(img_data)):
out_img_row = []
for j in range(len(img_data[0])):
if j % img[3]['planes'] == 0:
if normalized:
out_img_row.append(img_data[i][j]*1.0/(2**bitdepth))
else:
out_img_row.append(img_data[i][j])
out_img.append(out_img_row)
if origin == 'lower':
out_img.reverse()
return out_img
def plot_path(path):
start_x, start_y = path[0]
goal_x, goal_y = path[-1]
# plot path
path_arr = numpy.array(path)
plt.plot(path_arr[:, 0], path_arr[:, 1], 'y')
# plot start point
plt.plot(start_x, start_y, 'ro')
# plot goal point
plt.plot(goal_x, goal_y, 'go')
plt.show()
|
BrightBurningPark/Naruda-office_support_robot | robot/lib/pgm_utils.py | '''
pgm_utils.py : Python utilties for PGM files
Copyright (C) 2014 <NAME>
This code is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this code. If not, see <http://www.gnu.org/licenses/>.
Change log:
20-APR-2014 - <NAME> - Get params from command line
'''
def pgm_load(filename):
print('Loading image from file %s...' % filename)
fd = open(filename, 'rt')
# Skip constant header
fd.readline()
# Grab image size (assume square)
imgsize = [int(tok) for tok in fd.readline().split()]
# Start with empty list
imglist = []
# Read lines and append them to list until done
while True:
line = fd.readline()
if len(line) == 0:
break
imglist.extend([int(tok) for tok in line.split()])
fd.close()
# Convert list into bytes
imgbytes = bytearray(imglist)
return imgbytes, imgsize
def pgm_save(filename, imgbytes, imgsize):
print('\nSaving image to file %s' % filename)
output = open(filename, 'wt')
output.write('P2\n%d %d 255\n' % imgsize)
wid, hgt = imgsize
for y in range(hgt):
for x in range(wid):
output.write('%d ' % imgbytes[y * wid + x])
output.write('\n')
output.close()
|
BrightBurningPark/Naruda-office_support_robot | robot/use_map_autonomous_drive.py | <reponame>BrightBurningPark/Naruda-office_support_robot<filename>robot/use_map_autonomous_drive.py<gh_stars>1-10
'''
Naruda: 2019-1 AJOU Univ. major of Software department Capstone project
Robot main firmware made by "<NAME>" (github nickname 'BrightBurningPark').
Robot can drive by itself, localize position and direction in given map.
it can also build the map from zero.
I Love my school and the Capstone Program SO MUCH. it's true story ^^.
'''
# python basic or pip-installed library import
import sys
import math
import time
import signal
import threading
# adding ./lib dir to use modules
import sys
sys.path.append('./lib')
# modules under lib directory
import rpslam # BreezySLAM(tinySLAM Implementation) with RPLidar A1
import pathengine # shortest path finding engine
import ntdriver # network driver set
# General variables like Path, Var, Name, etc...
PATH_ROBOT = "/home/odroid/capdi/robot" # robot SW top path
PATH_MAP = PATH_ROBOT + "/maps" # map directory
PATH_LIB = PATH_ROBOT + "/lib" # libraries
MAP_NAME_NO_SLAM = 'MAP_NO_SLAM.pgm' # map name generated by no_map_slam
MAP_NAME_YES_SLAM = 'MAP_YES_SLAM.pgm' # map name pre-drawn
MAP_NAME_PATH_PLANNING = 'MAP_PATH_PLANNING.png' # map name used by pathplanning algorithm. this one is the only png file
def auto_drive(dest):
print('current position / ', narslam.x, narslam.y)
dest_x = dest[0]#int(input('x>> '))
dest_y = dest[1]#int(input('y>> '))
while math.hypot(dest_x - narslam.x, dest_y - narslam.y) > 50:
print('DISTANCE: ', math.hypot(dest_x - narslam.x, dest_y - narslam.y), '| while entered', )
dx = dest_x - narslam.x
dy = dest_y - narslam.y
if abs(dx) <= 10:
dx = 0
if abs(dy) <= 10:
dy = 0
rad = math.atan2(dx, dy)
deg = math.degrees(rad)
if deg < 0:
deg = 360 + deg
#add 180 and %360 here
#deg = (deg + 180) % 360
deg = (deg+90)%360
print('degree: ', deg, ' | ', narslam.theta, ' | (', narslam.x, ', ', narslam.y, ')')
if abs(deg - narslam.theta) <= 180:
if narslam.theta - 7.5 > deg:
nxt.send(ntdriver.LEFT)
elif narslam.theta + 7.5 < deg:
nxt.send(ntdriver.RIGHT)
else:
nxt.send(ntdriver.FORWARD)
else:
if narslam.theta - 7.5 > deg:
nxt.send(ntdriver.RIGHT)
elif narslam.theta + 7.5 < deg:
nxt.send(ntdriver.LEFT)
else:
nxt.send(ntdriver.FORWARD)
time.sleep(0.2)
nxt.send(ntdriver.STOP)
print('arrived to destination')
print('(', narslam.x, narslam.y, narslam.theta, ')')
def testcode(x, y):
print(narslam.x, narslam.y, narslam.theta)
print('input destination cordination in milimeter here')
dest_x_milimeter = x
dest_y_milimeter = y
dest_milimeter = (dest_x_milimeter, dest_y_milimeter)
start_milimeter = (narslam.x, narslam.y)
navi = pathengine.navigation(PATH_MAP + '/' + MAP_NAME_PATH_PLANNING)
navi.search(start_milimeter, dest_milimeter)
navi.extract_rally()
print(navi.path_rally)
for point in navi.path_rally:
auto_drive(point)
print('drive done')
#time.sleep(0.5)
print('arrived on final destination')
def handler(signum, frame):
nxt.send('0')
narslam.flag = 1
t_slam.join()
print('ctrl+Z handling called')
sys.exit(0)
if __name__ == "__main__":
signal.signal(signal.SIGTSTP, handler)
print ('firmware started')
narslam = rpslam.narlam()
#TODO: do yes map slam
t_slam = threading.Thread(target=narslam.slam_yes_map, args=(PATH_MAP, MAP_NAME_YES_SLAM))
t_slam.start()
print('SLAM activated')
nxt = ntdriver.lego_nxt()
nxt.connect()
nxt.send('s40')
print('nxt connected')
while(1):
#if not narslam.viz.display(narslam.x/1000, narslam.y/1000, narslam.theta, narslam.mapbytes):
# exit(0)
cmd = input("please give me order\n(\"goto\": run testcode | 0,1,2,3,4: move)\n>> ")
if cmd == 'goto':
x = input('x>> ')
y = input('y>> ')
testcode(x, y)
print('testcode done')
elif cmd == 'run':
while True:
testcode(1800, 2200)
testcode(2300, 1800)
testcode(900, 900)
testcode(1400, 1400)
testcode(900, 1400)
elif cmd == 'exit':
print('exit')
nxt.send('0')
narslam.flag = 1
t_slam.join()
sys.exit(0)
else:
nxt.send(cmd)
print('(', narslam.x, '|', narslam.y, '| Angle: ', narslam.theta, ')')
|
BrightBurningPark/Naruda-office_support_robot | robot/build_map_by_manual.py | '''
Naruda: 2019-1 AJOU Univ. major of Software department Capstone project
Robot main firmware made by "<NAME>" (github nickname 'BrightBurningPark').
Robot can drive by itself, localize position and direction in given map.
it can also build the map from zero.
I Love my school and the Capstone Program SO MUCH. it's true story ^^.
'''
# python basic or pip-installed library import
import os
import math
import time
import signal
import threading
# adding ./lib dir to use modules
import sys
sys.path.append('./lib')
# modules under lib directory
import ntdriver # network driver
import pathengine # shortest path finding engine
import rpslam # BreezySLAM(tinySLAM Implementation) with RPLidar A1
# General variables like Path, Var, Name, etc...
PATH_ROBOT = "/home/odroid/capdi/robot" # robot SW top path
PATH_MAP = PATH_ROBOT + "/maps" # map directory
PATH_LIB = PATH_ROBOT + "/lib" # libraries
MAP_NAME_NO_SLAM = 'MAP_NO_SLAM.pgm' # map name generated by no_map_slam
MAP_NAME_YES_SLAM = 'MAP_YES_SLAM.pgm' # map name pre-drawn
MAP_NAME_PATH_PLANNING = 'MAP_PATH_PLANNING.png' #png map name, for pathplanning
def handler(signum, frame):
narslam.flag = 1
t_slam.join()
sys.exit(-1)
if __name__ == "__main__":
print ('firmware started')
narslam = rpslam.narlam()
t_slam = threading.Thread(target=narslam.slam_no_map, args=(PATH_MAP, MAP_NAME_NO_SLAM, MAP_NAME_PATH_PLANNING))
t_slam.start()
nxt = ntdriver.lego_nxt()
nxt.connect()
signal.signal(signal.SIGTSTP, handler)
while True:
'''
if not narslam.viz.display(narslam.x/1000, narslam.y/1000, narslam.theta, narslam.mapbytes):
exit(0)
'''
cmd = input('Command>> ')
if cmd == 'exit':
narslam.flag = 1
t_slam.join()
#os.rename(PATH_MAP+'/'+MAP_NAME_NO_SLAM, PATH_MAP+'/'+MAP_NAME_YES_SLAM)
print(narslam.x, narslam.y, narslam.theta)
sys.exit(0)
nxt.send(cmd)
print('(', narslam.x/1000, '|', narslam.y, '|', narslam.theta, ')')
|
BrightBurningPark/Naruda-office_support_robot | robot/lib/ntdriver.py | import usb.core
import usb.util
import array
import sys
import os
import socketio
import time
SERVER_ADDR = 'http://172.16.31.10:3010'
class server:
sio = socketio.Client()
request = None
def __init__(self):
self.flag = 0
def connect(self):
server.sio.connect(SERVER_ADDR)
print('server connect called')
def report_position(self, narslam):
self.flag = 0
while True:
if self.flag == 1:
break
server.sio.emit('position', [narslam.x, narslam.y])
print('position emitted')
time.sleep(1)
def report_progress(self):
server.sio.emit('ready_to_move')
print('ready to move emitted')
@sio.on('connect')
def on_connect():
print('connected to server')
@sio.on('start_move')
def on_start_move(data):
print(data)
server.request = data
print('start move is called')
@sio.on('disconnect')
def on_disconnect():
print('disconnected from server')
# Vendor and product identifiers for the brick
ID_VENDOR_LEGO = 0x0694
ID_PRODUCT_NXT = 0x0002
#NXT Protocol
# Sends a command to the brick and expects a reply
SYSTEM_COMMAND_REPLY = 0x01
# Replied command after SYSTEM_COMMAND_REPLY
REPLY_COMMAND = 0x02
# Signals to the brick that the remote is
# operating in robot mode
USB_ECROBOT_MODE = 0xFF
# Signature from the brick that acknowledges
# the robo mode
USB_ECROBOT_SIGNATURE = 'ECROBOT'
# Remote wants to disconnect from the brick
DISCONNECT_REQ = 0xFF
# Next bytes belong to the string
COMM_STRING = 0x01
# Acknowledgment from USBLoopBack
ACK_STRING = 0x02
#command protocol for robot harware control automation
STOP = '0'
FORWARD = '1'
BACKWARD = '2'
LEFT = '3'
RIGHT = '4'
class lego_nxt:
def __init__(self):
self.NXTdevice = None
def connect(self):
# find our device
print ('Seeking for the first NXT block')
# seek amongst the connected USB devices and picks
# the first brick
for bus in usb.busses():
for device in bus.devices:
if device.idVendor == ID_VENDOR_LEGO and device.idProduct == ID_PRODUCT_NXT:
self.NXTdevice = device
break
# Check if an NXT brick was found
if self.NXTdevice is None:
print ('Device not found')
sys.exit( -1 )
# get the first (and only?) configuration for the brick
self.config = self.NXTdevice.configurations[0]
# get the the appropriate brick interface
self.iface = self.config.interfaces[0][0]
# and get the data source/sinks for the brick
self.NXTout, self.NXTin = self.iface.endpoints
# let's open the device
self.handle = self.NXTdevice.open()
# and get the interface 0 all for us
self.handle.claimInterface( 0 )
if os.name != 'nt':
self.handle.reset()
self.data = array.array( 'B', [SYSTEM_COMMAND_REPLY, USB_ECROBOT_MODE] )
self.handle.bulkWrite( self.NXTout.address, self.data )
# read the response from the brick
self.data = self.handle.bulkRead( self.NXTin.address, len( USB_ECROBOT_SIGNATURE ) + 1 )
# make sure the response makes sense
if self.data[0] != REPLY_COMMAND or self.data[1:].tostring().decode('utf-8') != USB_ECROBOT_SIGNATURE:
print ('Invalid NXT signature (', self.data[1:].tostring(), ')')
sys.exit( -1 )
print('connection established')
def send(self, message):
#function that sends message to nxt and receives ack from it
self.msg = message
if self.msg == 'terminate':
# ask USBLoopBack to disconnect
self.data = array.array( 'B', list( chr( DISCONNECT_REQ ).encode() ) )
self.handle.bulkWrite( self.NXTout.address, self.data )
return
# otherwise let's prepare the string to submit
self.data = array.array( 'B', list( chr( COMM_STRING ).encode() ) + list( self.msg.encode() ) )
# send it in bulk
self.handle.bulkWrite( self.NXTout.address, self.data )
# now let's wait for the brick to respond expecting 'ok' string
# we are expecting 4 bytes in total
self.data = self.handle.bulkRead( self.NXTin.address, 4, timeout = 5000)
if self.data[0] == ACK_STRING and self.data[1:3].tostring().decode('utf-8') == 'ok':
print ('Acknowledgment string received from the brick')
else:
print (self.data[1:3])
print ('No acknowledgment')
sys.exit( -1 )
|
BrightBurningPark/Naruda-office_support_robot | robot/lib/rpslam.py | from breezyslam.algorithms import RMHC_SLAM
from breezyslam.sensors import RPLidarA1 as LaserModel
from rplidar import RPLidar as Lidar
#from roboviz import MapVisualizer
from pgm_utils import *
from scipy.interpolate import interp1d
import numpy as np
from PIL import Image
import io
import os
MAP_SIZE_PIXELS = 1000
MAP_SIZE_METERS = 3
PPM = MAP_SIZE_PIXELS / (MAP_SIZE_METERS * 1000) #pixel per milimeter
LIDAR_DEVICE = '/dev/ttyUSB0'
MIN_SAMPLES = 80
class narlam:
def __init__(self):
self.flag = 0
self.pause = 0
self.lidar = Lidar(LIDAR_DEVICE)
self.slam = RMHC_SLAM(LaserModel(), MAP_SIZE_PIXELS, MAP_SIZE_METERS)
#self.viz = MapVisualizer(MAP_SIZE_PIXELS, MAP_SIZE_METERS, 'SLAM')
self.trajectory = []
self.mapbytes = bytearray(MAP_SIZE_PIXELS * MAP_SIZE_PIXELS)
self.iterator = self.lidar.iter_scans()
self.previous_distances = None
self.x = 0.0
self.y = 0.0
self.theta = 0.0
def slam_no_map(self, path_map, map_name_pgm, map_name_png):
self.flag = 0
self.pause = 0
path_map_name = path_map + '/' + map_name_pgm # map for reusing
next(self.iterator)
while True:
if self.flag == 1:
break
if self.pause == 1:
continue
items = [item for item in next(self.iterator)]
distances = [item[2] for item in items]
angles = [item[1] for item in items]
if len(distances) > MIN_SAMPLES:
f = interp1d(angles, distances, fill_value='extrapolate')
distances = list(f(np.arange(360)))
self.slam.update(distances)
self.previous_distances = distances.copy()
elif self.previous_distances is not None:
self.slam.update(self.previous_distances)
self.x, self.y, local_theta = self.slam.getpos()
local_theta = local_theta % 360
if local_theta < 0:
self.theta = 360 + local.theta
else:
self.theta = local_theta
self.slam.getmap(self.mapbytes)
# On SLAM thread termination, save map image in the map directory
# PNG format(To see/pathplannig)
image = Image.frombuffer('L', (MAP_SIZE_PIXELS, MAP_SIZE_PIXELS), self.mapbytes, 'raw', 'L', 0, 1)
image.save(path_map + '/' + map_name_png)
# PGM format(To reuse map)
pgm_save(path_map_name, self.mapbytes, (MAP_SIZE_PIXELS, MAP_SIZE_PIXELS))
self.lidar.stop()
self.lidar.disconnect()
def slam_yes_map(self, path_map, map_name):
self.flag = 0
self.pause = 0
path_map_name = path_map + '/' + map_name
map_bytearray, map_size = pgm_load(path_map_name)
self.slam.setmap(map_bytearray)
next(self.iterator)
while True:
if self.flag == 1:
break
if self.pause == 1:
pass
items = [item for item in next(self.iterator)]
distances = [item[2] for item in items]
angles = [item[1] for item in items]
if len(distances) > MIN_SAMPLES:
f = interp1d(angles, distances, fill_value='extrapolate')
distances = list(f(np.arange(360)))
self.slam.update(distances, should_update_map = False)
self.previous_distances = distances.copy()
elif self.previous_distances is not None:
self.slam.update(self.previous_distances, should_update_map = False)
self.x, local_y, local_theta = self.slam.getpos()
self.y = MAP_SIZE_METERS * 1000 - local_y
local_theta = local_theta % 360
if local_theta < 0:
local_theta = 360 + local.theta
else:
local_theta = local_theta
#6/11 -> we found that the vehicle's angle was reversed on the map
self.theta = (local_theta+180)%360
self.slam.getmap(self.mapbytes)
self.lidar.stop()
self.lidar.disconnect()
|
BrightBurningPark/Naruda-office_support_robot | robot/garage/firmware_final.py | '''
Naruda: 2019-1 AJOU Univ. major of Software department Capstone project
Robot main firmware made by "<NAME>" (github nickname 'BrightBurningPark').
Robot can drive by itself, localize position and direction in given map.
it can also build the map from zero.
I Love my school and the Capstone Program SO MUCH. it's true story ^^.
'''
'''
the final version of the narumi firmware.
integrated version of Mapping, Localization, Path Planning, Autonomous Driving, Network Communication.
'''
# python basic or pip-installed library import
import sys
import math
import time
import threading
# adding ./lib dir to use modules
import sys
sys.path.append('./lib')
# modules under lib directory
import ntdriver # network driver
import pathengine # shortest path finding engine
import rpslam # BreezySLAM(tinySLAM Implementation) with RPLidar A1
# General variables like Path, Var, Name, etc...
PATH_ROBOT = "/home/odroid/capdi/robot" # robot SW top path
PATH_MAP = PATH_ROBOT + "/maps" # map directory
PATH_LIB = PATH_ROBOT + "/lib" # libraries
MAP_NAME_NO_SLAM = 'MAP_NO_SLAM.png' # map name generated by no_map_slam
MAP_NAME_YES_SLAM = 'MAP_YES_SLAM.png' # map name pre-drawn
# flag_slam_yn = None # this variable is under the __main__ code
def autonomous_driving(dest):
# drive robot to the selected position from current position.
# this would be used for driving robot from rally to rally
print('current position / ', narslam.x, narslam.y)
dest_x = dest[0] #int(input('x>> '))
dest_y = dest[1] #int(input('y>> '))
while math.hypot(dest_x - narslam.x, dest_y - narslam.y) > 50:
print('DISTANCE: ', math.hypot(dest_x - narslam.x, dest_y - narslam.y), '| while entered', )
dx = dest_x - narslam.x
dy = dest_y - narslam.y
if abs(dx) <= 10:
dx = 0
if abs(dy) <= 10:
dy = 0
rad = math.atan2(dx, dy)
deg = math.degrees(rad)
if deg < 0:
deg = 360 + deg
print('degree: ', deg, ' | ', narslam.theta, ' | (', narslam.x, ', ', narslam.y, ')')
if abs(deg - narslam.theta) <= 180:
if narslam.theta - 7.5 > deg:
nxt.send(ntdriver.LEFT)
elif narslam.theta + 7.5 < deg:
nxt.send(ntdriver.RIGHT)
else:
nxt.send(ntdriver.FORWARD)
else:
if narslam.theta - 7.5 > deg:
nxt.send(ntdriver.RIGHT)
elif narslam.theta + 7.5 < deg:
nxt.send(ntdriver.LEFT)
else:
nxt.send(ntdriver.FORWARD)
time.sleep(0.2)
nxt.send(ntdriver.STOP)
print('arrived to destination')
print("done")
print(narslam.x, narslam.y, narslam.theta)
def server_setup():
#TODO: server setup code. function call from ntdriver
pass
if __name__ == "__main__":
print ('firmware start')
narslam = rpslam.narlam()
flag_slam_yn = input('select SLAM mode (y: Do SLAM with pre-drawn map / n: Do full SLAM) >> ')
if flag_slam_yn == 'y':
#TODO: do yes map slam
path_map_name = PATH_MAP + '/' + MAP_NAME_YES_SLAM
t_slam = threading.Thread(target=narslam.slam_yes_map, args=(path_map_name,))
elif flag_slam_yn == 'n':
#TODO: do no map slam
path_map_name = PATH_MAP + '/' + MAP_NAME_NO_SLAM
t_slam = threading.Thread(target=narslam.slam_no_map, args=(path_map_name,))
else:
print('error: invalid selection')
sys.exit(-1)
t_slam.start()
#setup LEGO NXT
nxt = ntdriver.lego_nxt()
nxt.connect()
#server communication setup
server = server_setup() #this function is defined in this file. it calls functions in the ntdriver file.
print('Basic Robot Firmware Setup Finished')
while True:
#TODO: check the server instance, to see there's request or not.
|
BrightBurningPark/Naruda-office_support_robot | robot/garage/USBLoopBackTester_python3.py | import usb.core
import usb.util
import array
import sys
import os
# Vendor and product identifiers for the brick
ID_VENDOR_LEGO = 0x0694
ID_PRODUCT_NXT = 0x0002
#NXT Protocol
# Sends a command to the brick and expects a reply
SYSTEM_COMMAND_REPLY = 0x01
# Replied command after SYSTEM_COMMAND_REPLY
REPLY_COMMAND = 0x02
# Signals to the brick that the remote is
# operating in robot mode
USB_ECROBOT_MODE = 0xFF
# Signature from the brick that acknowledges
# the robo mode
USB_ECROBOT_SIGNATURE = 'ECROBOT'
# Remote wants to disconnect from the brick
DISCONNECT_REQ = 0xFF
# Next bytes belong to the string
COMM_STRING = 0x01
# Acknowledgment from USBLoopBack
ACK_STRING = 0x02
if __name__ == "__main__":
# find our device
print ('Seeking for the first NXT block')
# seek amongst the connected USB devices and picks
# the first brick
NXTdevice = None
for bus in usb.busses():
for device in bus.devices:
if device.idVendor == ID_VENDOR_LEGO and device.idProduct == ID_PRODUCT_NXT:
NXTdevice = device
break
# Check if an NXT brick was found
if NXTdevice is None:
print ('Device not found')
sys.exit( -1 )
# get the first (and only?) configuration for the brick
config = NXTdevice.configurations[0]
# get the the appropriate brick interface
iface = config.interfaces[0][0]
# and get the data source/sinks for the brick
NXTout, NXTin = iface.endpoints
# let's open the device
handle = NXTdevice.open()
# and get the interface 0 all for us
handle.claimInterface( 0 )
# Not sure why this is here.., I do not use Windoz
# http://code.google.com/p/nxt-python/issues/detail?id=33
if os.name != 'nt':
handle.reset()
# Conform to the protocol to which the funciton ecrobot_process1ms_usb
#conforms to, i.e. the one of the LEGO fantom driver
# Send two bytes: the first one is SYSTEM_COMMAND_REPLY,
# the second is USB_ECROBOT_MODE.
data = array.array( 'B', [SYSTEM_COMMAND_REPLY, USB_ECROBOT_MODE] )
# We will use bulk transfers
# This type of communication is used to transfer large bursty data.
# The features of this type of communication are:
# -Error detection via CRC, with guarantee of delivery.
# -No guarantee of bandwidth or minimum latency.
# - Stream Pipe - Unidirectional
# -Full & high speed modes only.
handle.bulkWrite( NXTout.address, data )
# read the response from the brick
data = handle.bulkRead( NXTin.address, len( USB_ECROBOT_SIGNATURE ) + 1 )
# make sure the response makes sense
if data[0] != REPLY_COMMAND or data[1:].tostring().decode('utf-8') != USB_ECROBOT_SIGNATURE:
print ('Invalid NXT signature (', data[1:].tostring(), ')')
sys.exit( -1 )
# now send the string we want to be processed by the device
while True:
print ('Enter the string to send to the NXT block (followed by return)')
print ('Enter stop to terminate')
msg = input( '>> ' )
if msg == 'stop':
# ask USBLoopBack to disconnect
data = array.array( 'b', list( chr( DISCONNECT_REQ ) ) )
handle.bulkWrite( NXTout.address, data )
break
# otherwise let's prepare the string to submit
data = array.array( 'b', list( chr( COMM_STRING ).encode('ascii', 'strict') ) + list( msg.encode('ascii', 'strict') ) )
# send it in bulk
handle.bulkWrite( NXTout.address, data )
# now let's wait for the brick to respond expecting 'ok' string
# we are expecting 4 bytes in total
data = handle.bulkRead( NXTin.address, 4 )
if data[0] == ACK_STRING and data[1:3].tostring().decode('utf-8') == 'ok':
print ('Acknowledgment string received from the brick')
else:
print (data[1:3])
print ('No acknowledgment')
sys.exit( -1 )
|
BrightBurningPark/Naruda-office_support_robot | robot/garage/backup_rpslam.py | <gh_stars>1-10
from breezyslam.algorithms import RMHC_SLAM
from breezyslam.sensors import RPLidarA1 as LaserModel
from rplidar import RPLidar as Lidar
from roboviz import MapVisualizer # this occurs error when there's no display connected on odroid
from PIL import Image
import io
import os
MAP_SIZE_PIXELS = 3000
MAP_SIZE_METERS = 3 #10m * 10m plain
LIDAR_DEVICE = '/dev/ttyUSB0'
MIN_SAMPLES = 120 #default value 200, maximum 250, odroid maximum 140
class narlam:
def __init__(self):
self.flag = 0;
self.lidar = Lidar(LIDAR_DEVICE)
self.slam = RMHC_SLAM(LaserModel(), MAP_SIZE_PIXELS, MAP_SIZE_METERS)
self.viz = MapVisualizer(MAP_SIZE_PIXELS, MAP_SIZE_METERS, 'SLAM MAP') # no visualizer needed
self.trajectory = []
self.mapbytes = bytearray(MAP_SIZE_PIXELS * MAP_SIZE_PIXELS)
self.iterator = self.lidar.iter_scans()
self.previous_distances = None
self.previous_angles = None
self.x = 0.0
self.y = 0.0
self.theta = 0.0
def slam_no_map(self, path_map_name):
# doing slam with building maps from zero simultaneously
next(self.iterator)
while True:
if self.flag == 1:
break
items = [item for item in next(self.iterator)]
distances = [item[2] for item in items]
angles = [item[1] for item in items]
if len(distances) > MIN_SAMPLES:
self.slam.update(distances, scan_angles_degrees=angles)
self.previous_distances = distances.copy()
self.previous_angles = angles.copy()
elif self.previous_distances is not None:
self.slam.update(self.previous_distances, scan_angles_degrees=self.previous_angles)
self.x, local_y, local_theta = self.slam.getpos()
local_theta = local_theta % 360
if local_theta < 0:
self.theta = 360 + local.theta
else:
self.theta = local_theta
self.slam.getmap(self.mapbytes)
# save map generated by slam
image = Image.frombuffer('L', (MAP_SIZE_PIXELS, MAP_SIZE_PIXELS), self.mapbytes, 'raw', 'L', 0, 1)
image.save(path_map_name)
self.lidar.stop()
self.lidar.disconnect()
def slam_yes_map(self, path_map_name):
# doing localization only, with pre-built map image file.
with open(path_map_name, "rb") as map_img:
f = map_img.read()
b = bytearray(f)
self.slam.setmap(b)
next(self.iterator)
while True:
if self.flag == 1:
break
items = [item for item in next(self.iterator)]
distances = [item[2] for item in items]
angles = [item[1] for item in items]
if len(distances) > MIN_SAMPLES:
self.slam.update(distances, scan_angles_degrees=angles, should_update_map = False)
self.previous_distances = distances.copy()
self.previous_angles = angles.copy()
elif self.previous_distances is not None:
self.slam.update(self.previous_distances, scan_angles_degrees=self.previous_angles, should_update_map = False)
self.x, self.y, self.theta = self.slam.getpos()
self.lidar.stop()
self.lidar.disconnect()
|
sarmadgulzar/pyutilx | pyutilx/__init__.py | from pyutilx.utils import *
|
sarmadgulzar/pyutilx | pyutilx/utils.py | def list_of_lists(l, n):
q = len(l) // n
r = len(l) % n
if r == 0:
return [l[i * n:(i + 1) * n] for i in range(q)]
else:
return [l[i * n:(i + 1) * n] if i <= q else l[i * n:i * n + r] for i in range(q + 1)]
|
sarmadgulzar/pyutilx | setup.py | from distutils.core import setup
setup(
name = 'pyutilx',
packages = ['pyutilx'],
version = '0.1',
license = 'MIT',
description = 'Useful Python Functions',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/sarmadgulzar/pyutilx',
download_url = 'https://github.com/sarmadgulzar/pyutilx/archive/v_01.tar.gz',
keywords = ['PYTHON', 'UTILITIES'],
install_requires=[
],
) |
wagnerandreoli/clitools | src/clib/docker.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""Docker module."""
import argparse
import json
from pathlib import Path
from subprocess import PIPE
from typing import List
from clib.config import JsonConfig
from clib.files import existing_directory_type, existing_file_type, shell, shell_find
from clib.types import JsonDict
YML_DIRS = JsonConfig("docker-find-yml-dirs.json")
YML_FILES = JsonConfig("docker-find-yml-files.json")
class DockerContainer:
"""A helper for Docker containers."""
def __init__(self, container_name: str) -> None:
"""Init instance."""
self.container_name = container_name
self.inspect_json: List[JsonDict] = []
def inspect(self) -> "DockerContainer":
"""Inspect a Docker container and save its JSON info."""
if not self.inspect_json:
raw_info = shell(f"docker inspect {self.container_name}", quiet=True, capture_output=True).stdout
self.inspect_json = json.loads(raw_info)
return self
def replace_mount_dir(self, path: Path) -> Path:
"""Replace a mounted dir on a file/dir path inside a Docker container."""
self.inspect()
for mount in self.inspect_json[0]["Mounts"]:
source = mount["Source"]
if str(path).startswith(source):
new_path = str(path).replace(source, mount["Destination"])
return Path(new_path)
return path
def rescan_files(dirs):
"""Rescan all directories and save the yml files that were found."""
sorted_dirs = sorted(dirs)
YML_DIRS.dump(sorted_dirs)
files = set()
for dir in sorted_dirs:
print(f"Files on {dir}")
for file in shell_find(f"{dir} -name docker-compose.yml"):
print(f" {file}")
files.add(str(file))
sorted_files = sorted(files)
YML_FILES.dump(sorted_files)
def scan_command(parser, args):
"""Scan directories and add them to the list."""
dirs = YML_DIRS.load_set()
if not args.dir:
print("Rescanning existing directories")
for dir in args.dir:
dirs.add(str(dir))
print(f"Directory added: {dir}")
rescan_files(dirs)
def rm_command(parser, args):
"""Remove directories from the list."""
dirs = YML_DIRS.load_set()
for one_dir in args.dir:
str_dir = str(one_dir)
if str_dir in dirs:
dirs.remove(str_dir)
print(f"Directory removed: {one_dir}")
else:
print(f"Directory was not configured: {one_dir}")
rescan_files(dirs)
def ls_command(parser, args):
"""List registered yml files."""
for yml_file in sorted(YML_FILES.load_set()):
print(yml_file)
def yml_command(parser, args):
"""Run a docker-compose command on one of the yml files."""
found = set()
partial_name = args.yml_file
for file in YML_FILES.load_set():
if partial_name in file:
found.add(file)
if not found:
print(f"No .yml file was found with the string '{partial_name}'")
exit(1)
sorted_found = sorted(found)
if len(sorted_found) > 1:
choices = "\n".join(sorted_found)
chosen_yml = shell(
f"echo '{choices}' | fzf --height={len(sorted_found) + 2} --cycle --tac", quiet=True, stdout=PIPE
).stdout.strip()
if not chosen_yml:
print("No .yml file was chosen")
exit(2)
else:
chosen_yml = sorted_found[0]
shell(f"docker-compose -f {chosen_yml} {' '.join(args.docker_compose_arg)}")
# TODO: Convert to click
def docker_find():
"""Find docker.compose.yml files."""
parser = argparse.ArgumentParser(description="find docker.compose.yml files")
parser.set_defaults(chosen_function=None)
subparsers = parser.add_subparsers(title="commands")
parser_scan = subparsers.add_parser("scan", help="scan directories and add them to the list")
parser_scan.add_argument("dir", nargs="*", help="directory to scan", type=existing_directory_type)
parser_scan.set_defaults(chosen_function=scan_command)
parser_rm = subparsers.add_parser("rm", help="remove directories from the list")
parser_rm.add_argument("dir", nargs="+", help="directory to remove", type=existing_directory_type)
parser_rm.set_defaults(chosen_function=rm_command)
parser_ls = subparsers.add_parser("ls", help="list yml files")
parser_ls.set_defaults(chosen_function=ls_command)
parser_yml = subparsers.add_parser("yml", help="choose one of the yml files to call docker-compose on")
parser_yml.add_argument("yml_file", help="partial name of the desired .yml file")
parser_yml.add_argument("docker_compose_arg", nargs=argparse.REMAINDER, help="docker-compose arguments")
parser_yml.set_defaults(chosen_function=yml_command)
args = parser.parse_args()
if not args.chosen_function:
parser.print_help()
return
args.chosen_function(parser, args)
return
def backup(parser, args):
"""Backup a Docker volume."""
for volume in args.volume_name:
# TODO: when piping from stdin, stdout is printed only at the end (buffered)
shell(
"docker run --rm -i -v /var/lib/docker/volumes:/volumes -v {dir}:/backup busybox "
"tar czf /backup/{volume}.tgz /volumes/{volume}".format(dir=args.backup_dir, volume=volume)
)
def restore(parser, args):
"""Restore a Docker volume."""
tgz_file: Path = args.tgz_file
backup_dir = tgz_file.parent
new_volume_name = args.volume_name if args.volume_name else tgz_file.stem
busybox = "docker run --rm -i -v /var/lib/docker:/docker -v {backup_dir}:/backup busybox ".format(
backup_dir=backup_dir
)
# Delete the destination directory before restoring
shell(busybox + "rm -rf /docker/volumes/{new_volume_name}".format(new_volume_name=new_volume_name))
# Create the full path
shell(busybox + "mkdir /docker/volumes/{new_volume_name}".format(new_volume_name=new_volume_name))
# Restore the .tgz file in the new empty directory
shell(
busybox
+ "tar xzf /backup/{tgz} -C /docker/volumes/{new_volume_name}/ --strip-components 2".format(
tgz=tgz_file.name, new_volume_name=new_volume_name
)
)
# TODO: Convert to click
def docker_volume():
"""Backup and restore Docker volumes.
See also https://stackoverflow.com/a/23778599/1391315.
"""
parser = argparse.ArgumentParser(description="backup and restore Docker volumes")
parser.set_defaults(chosen_function=None)
subparsers = parser.add_subparsers(title="commands")
parser_backup = subparsers.add_parser("backup", aliases=["b"], help="backup a Docker volume")
parser_backup.add_argument("backup_dir", type=existing_directory_type, help="directory to store the backups")
parser_backup.add_argument("volume_name", nargs="+", help="Docker volume name")
parser_backup.set_defaults(chosen_function=backup)
parser_restore = subparsers.add_parser("restore", aliases=["r"], help="restore a Docker volume")
parser_restore.add_argument(
"tgz_file", type=existing_file_type, help="full path of the .tgz file created by the 'backup' command"
)
parser_restore.add_argument("volume_name", nargs="?", help="volume name (default: basename of .tgz file)")
parser_restore.set_defaults(chosen_function=restore)
args = parser.parse_args()
if not args.chosen_function:
parser.print_help()
return
args.chosen_function(parser, args)
return
|
wagnerandreoli/clitools | src/clib/constants.py | """Constants."""
TIME_FORMAT = "%H:%M:%S"
# Mimick Ansible colors to simulate "ok" (nothing changed) and "changed" in CLI scripts.
COLOR_OK = "green"
COLOR_CHANGED = "yellow"
|
wagnerandreoli/clitools | src/clib/dev/__init__.py | # -*- coding: utf-8 -*-
"""Development helpers."""
import os
import re
from pathlib import Path
from typing import List, Tuple
import click
from plumbum import FG, RETCODE
from requests_html import HTMLSession
from clib.files import shell
# Possible formats for tests:
# ___ test_name ___
# ___ Error on setup of test_name ___
# ___ test_name[Parameter] ___
TEST_NAMES_REGEX = re.compile(r"___ .*(test[^\[\] ]+)[\[\]A-Za-z]* ___")
# https://www.jetbrains.com/help/pycharm/directories-used-by-the-ide-to-store-settings-caches-plugins-and-logs.html
PYCHARM_MACOS_APP_PATH = Path("/Applications/PyCharm.app/Contents/MacOS/pycharm")
LIBRARY_LOGS_DIR = Path.home() / "Library/Logs/JetBrains"
@click.group()
def pycharmx():
"""Extra commands for PyCharm."""
pass
@pycharmx.command()
@click.argument("files", nargs=-1)
def open(files):
"""Invoke PyCharm on the command line.
If a file doesn't exist, call `which` to find out the real location.
"""
full_paths: List[str] = []
errors = False
for possible_file in files:
path = Path(possible_file).absolute()
if path.is_file():
full_paths.append(str(path))
else:
which_file = shell(f"which {possible_file}", quiet=True, return_lines=True)
if which_file:
full_paths.append(which_file[0])
else:
click.secho(f"File not found on $PATH: {possible_file}", fg="red")
errors = True
if full_paths:
shell(f"{PYCHARM_MACOS_APP_PATH} {' '.join(full_paths)}")
exit(1 if errors else 0)
@pycharmx.command()
def logs():
"""Tail the logs on PyCharm's latest version."""
all_versions = sorted(LIBRARY_LOGS_DIR.glob("PyCharm20*"), reverse=True)
if not all_versions:
click.echo(f"No PyCharm logs found on {str(LIBRARY_LOGS_DIR)}")
exit(0)
log_dir = LIBRARY_LOGS_DIR / all_versions[0]
shell(f"tail -f {str(log_dir)}/*.log")
@click.group()
def pytestx():
"""Extra commands for py.test."""
pass
@pytestx.command()
@click.option("--delete", "-d", default=False, is_flag=True, help="Delete pytest directory first")
@click.option("--failed", "-f", default=False, is_flag=True, help="Run only failed tests")
@click.option("--count", "-c", default=0, help="Repeat the same test several times")
@click.option("--reruns", "-r", default=0, help="Re-run a failed test several times")
@click.argument("class_names_or_args", nargs=-1)
def run(delete: bool, failed: bool, count: int, reruns: int, class_names_or_args: Tuple[str]):
"""Run pytest with some shortcut options."""
# Import locally, so we get an error only in this function, and not in other functions of this module.
from plumbum.cmd import rm, time as time_cmd
if delete:
click.secho("Removing .pytest directory", fg="green", bold=True)
rm["-rf", ".pytest"] & FG
pytest_plus_args = ["pytest", "-vv", "--run-intermittent"]
if reruns:
pytest_plus_args.extend(["--reruns", str(reruns)])
if failed:
pytest_plus_args.append("--failed")
if count:
pytest_plus_args.extend(["--count", str(count)])
if class_names_or_args:
targets = []
for name in class_names_or_args:
if "." in name:
parts = name.split(".")
targets.append("{}.py::{}".format("/".join(parts[0:-1]), parts[-1]))
else:
# It might be an extra argument, let's just append it
targets.append(name)
pytest_plus_args.append("-s")
pytest_plus_args.extend(targets)
click.secho(f"Running tests: time {' '.join(pytest_plus_args)}", fg="green", bold=True)
rv = time_cmd[pytest_plus_args] & RETCODE(FG=True)
exit(rv)
@pytestx.command()
@click.option("-f", "--result-file", type=click.File())
@click.option("-j", "--jenkins-url", multiple=True)
@click.option("-s", "dont_capture", flag_value="-s", help="Don't capture output")
@click.pass_context
def results(ctx, result_file, jenkins_url: Tuple[str, ...], dont_capture):
"""Parse a file with the output of failed tests, then re-run only those failed tests."""
if result_file:
contents = result_file.read()
elif jenkins_url:
responses = []
for url in set(jenkins_url):
request = HTMLSession().get(url, auth=(os.environ["JENKINS_USERNAME"], os.environ["JENKINS_PASSWORD"]))
responses.append(request.html.html)
contents = "\n".join(responses)
else:
click.echo(ctx.get_help())
return
match = re.search(r"<title>(?P<error>.+Invalid password.+)</title>", contents)
if match:
click.secho(match.group("error"), fg="red")
exit(1)
all_tests = set(TEST_NAMES_REGEX.findall(contents))
expression = " or ".join(all_tests)
if not dont_capture:
dont_capture = ""
shell(f"pytest -vv {dont_capture} -k '{expression}'")
|
wagnerandreoli/clitools | src/clib/files.py | """Files, symbolic links, operating system utilities."""
import os
import re
import sys
import unicodedata
from argparse import ArgumentTypeError
from functools import partial
from pathlib import Path
from shlex import split
from subprocess import PIPE, run
from time import sleep
from typing import Any, List, Optional, Set, Union
import click
import pendulum
from plumbum import FG
from slugify import slugify
from clib import dry_run_option, verbose_option, yes_option
from clib.constants import COLOR_OK
from clib.types import PathOrStr
from clib.ui import echo_dry_run
REMOVE_CHARS_FROM_DIR = "/ \t\n"
SLUG_SEPARATOR = "_"
REGEX_EXISTING_TIME = re.compile(r"(-[0-9]{2})[ _]?[Aa]?[Tt][ _]?([0-9]{2}[-._])")
REGEX_UPPER_CASE_LETTER = re.compile(r"([a-z])([A-Z]+)")
REGEX_UNDERLINE_LOWER_CASE = re.compile("_[a-z]")
REGEX_DATE_TIME = re.compile(r"([0-9][0-9-_\.]+[0-9])")
REGEX_MULTIPLE_SEPARATORS = re.compile("_+")
REGEX_UNIQUE_FILE = re.compile(r"(?P<original_stem>.+)_copy(?P<index>\d+)?", re.IGNORECASE)
IGNORE_FILES_ON_MERGE = {".DS_Store"}
POSSIBLE_FORMATS = (
# Human formats first
"MM_YYYY",
"DD_MM_YYYY",
"DD_MM_YY",
"DDMMYYYY",
"DDMMYY",
"DD_MM_YYYY_HH_mm_ss",
"DD_MM_YY_HH_mm_ss",
"DDMMYYYYHHmm",
# Then inverted formats
"YYYY_MM",
"YYYY_MM_DD",
"YYYYMMDD",
"YY_MM_DD_HH_mm_ss",
"YYYY_MM_DD_HH_mm_ss",
"YYYYMMDDHHmmss",
"YYYYMMDD_HHmmss",
)
def sync_dir(source_dirs: List[str], destination_dirs: List[str], dry_run: bool = False, kill: bool = False):
"""Synchronize a source directory with a destination."""
# Import locally, so we get an error only in this function, and not in other functions of this module.
from plumbum.cmd import rsync
from clib.environments import RSYNC_EXCLUDE
for dest_dir in destination_dirs:
if not dest_dir:
continue
for src_dir in source_dirs:
if not src_dir:
continue
# Remove the user home and concatenate the source after the destination
full_dest_dir = os.path.join(dest_dir, src_dir.replace(os.path.expanduser("~"), "")[1:])
rsync_args = "{dry_run}{kill}-trOlhDuzv --modify-window=2 --progress {exclude} {src}/ {dest}/".format(
dry_run="-n " if dry_run else "",
kill="--del " if kill else "",
exclude=" ".join([f"--exclude={e}" for e in RSYNC_EXCLUDE]),
src=src_dir,
dest=full_dest_dir,
)
click.secho(f"rsync {rsync_args}", fg="green")
os.makedirs(full_dest_dir, exist_ok=True)
rsync[split(rsync_args)] & FG
@click.command()
@dry_run_option
@click.option("--kill", "-k", default=False, is_flag=True, help="Kill files when using rsync (--del)")
@click.option("--pictures", "-p", default=False, is_flag=True, help="Backup pictures")
@click.pass_context
def backup_full(ctx, dry_run: bool, kill: bool, pictures: bool):
"""Perform all backups in a single script."""
if pictures:
from clib.environments import BACKUP_DIRS, PICTURE_DIRS
click.secho("Pictures backup", bold=True, fg="green")
sync_dir(PICTURE_DIRS, BACKUP_DIRS, dry_run, kill)
else:
click.secho("Choose one of the options below.", fg="red")
print(ctx.get_help())
def shell(
command_line,
quiet=False,
exit_on_failure: bool = False,
return_lines=False,
dry_run=False,
header: str = "",
**kwargs,
):
"""Print and run a shell command.
:param quiet: Don't print the command line that will be executed.
:param exit_on_failure: Exit if the command failed (return code is not zero).
:param return_lines: Return a list of lines instead of a ``CompletedProcess`` instance.
:param dry_run: Only print the command that would be executed, and return.
:param header: Print a header before the command.
"""
if not quiet or dry_run:
if header:
click.secho(f"\n# {header}", fg="bright_white")
click.secho("$ ", fg="magenta", nl=False)
click.secho(command_line, fg="yellow")
if dry_run:
return
if return_lines:
kwargs.setdefault("stdout", PIPE)
completed_process = run(command_line, shell=True, universal_newlines=True, **kwargs)
if exit_on_failure and completed_process.returncode != 0:
sys.exit(completed_process.returncode)
if not return_lines:
return completed_process
stdout = completed_process.stdout.strip().strip("\n")
return stdout.split("\n") if stdout else []
def shell_find(command_line, **kwargs) -> List[str]:
"""Run a find command using the shell, and return its output as a list."""
if not command_line.startswith("find"):
command_line = f"find {command_line}"
kwargs.setdefault("quiet", True)
kwargs.setdefault("check", True)
return shell(command_line, return_lines=True, **kwargs)
def fzf(
items: List[Any], *, reverse=False, query: str = None, auto_select: bool = None, exit_no_match: bool = None
) -> Optional[str]:
"""Run fzf to select among multiple choices."""
choices = "\n".join([str(item) for item in items])
query_opt = ""
if query:
query_opt = f" --query={query}"
# If there is a query, set auto-select flags when no explicit booleans were informed
if auto_select is None:
auto_select = True
if exit_no_match is None:
exit_no_match = True
select_one_opt = " --select-1" if auto_select else ""
tac_opt = " --tac" if reverse else ""
exit_zero_opt = " --exit-0" if exit_no_match else ""
return min(
shell(
f'echo "{choices}" | fzf --height 40% --reverse --inline-info '
f"{query_opt}{tac_opt}{select_one_opt}{exit_zero_opt} --cycle",
quiet=True,
return_lines=True,
),
default=None,
)
def relative_to_home(full_path: Union[str, Path]):
"""Return a directory with ``~`` instead of printing the home dir full path."""
path_obj = Path(full_path)
return "~/{}".format(path_obj.relative_to(path_obj.home()))
def _check_type(full_path, method, msg):
"""Check a path, raise an error if it's not valid."""
obj = Path(full_path)
if not method(obj):
raise ArgumentTypeError(f"{full_path} is not a valid existing {msg}")
return obj
def existing_directory_type(directory):
"""Convert the string to a Path object, raising an error if it's not a directory. Use with argparse."""
return _check_type(directory, Path.is_dir, "directory")
def existing_file_type(file):
"""Convert the string to a Path object, raising an error if it's not a file. Use with argparse."""
return _check_type(file, Path.is_file, "file")
def wait_for_process(process_name: str) -> None:
"""Wait for a process to finish.
https://stackoverflow.com/questions/1058047/wait-for-any-process-to-finish
"""
pid = shell(f"pidof {process_name}", quiet=True, stdout=PIPE).stdout.strip()
if not pid:
return
pid_path = Path(f"/proc/{pid}")
while pid_path.exists():
sleep(0.5)
@click.command()
@dry_run_option
@click.argument("directories", nargs=-1, required=True, type=click.Path(exists=True), metavar="[DIR1 [DIR2]...]")
def rm_broken_symlinks(dry_run: bool, directories):
"""Remove broken symlinks from directories (asks for confirmation)."""
clean_dirs = [dir_str.rstrip("/") for dir_str in directories]
base_command = r"find {dir} -type l ! -exec test -e {{}} \; -print{extra}"
all_broken_links = []
for clean_dir in clean_dirs:
broken_links = shell_find(base_command.format(dir=clean_dir, extra=""), quiet=False)
all_broken_links.extend(broken_links)
for file in broken_links:
echo_dry_run(file, dry_run=dry_run)
if not all_broken_links:
echo_dry_run("There are no broken links to be removed", dry_run=dry_run, fg="green")
exit(0)
if dry_run:
exit(0)
click.confirm("These broken links will be removed. Continue?", default=False, abort=True)
for clean_dir in clean_dirs:
click.secho(f"Removing broken symlinks in {click.format_filename(clean_dir)}...", fg="green")
shell(base_command.format(dir=clean_dir, extra=" -delete"))
def slugify_camel_iso(old_string: str) -> str:
"""Slugify a string with camel case, underscores and ISO date/time formats.
>>> slugify_camel_iso("some name Here 2017_12_30")
'Some_Name_Here_2017-12-30'
>>> slugify_camel_iso("DONT_PAY_this-bill-10-05-2015")
'Dont_Pay_This_Bill_2015-05-10'
>>> slugify_camel_iso("normal DATE 01012019 with no DASHES")
'Normal_Date_2019-01-01_With_No_Dashes'
>>> slugify_camel_iso("normal DATE 23_05_2019 with underscores")
'Normal_Date_2019-05-23_With_Underscores'
>>> slugify_camel_iso("inverted DATE 20191020 with no DASHES")
'Inverted_Date_2019-10-20_With_No_Dashes'
>>> slugify_camel_iso("blablabla-SCREAM LOUD AGAIN - XXX UTILIZAÇÃO 27.11.17")
'Blablabla_Scream_Loud_Again_Xxx_Utilizacao_2017-11-27'
>>> slugify_camel_iso("something-614 ATA AUG IN 25-04-17")
'Something_614_Ata_Aug_In_2017-04-25'
>>> slugify_camel_iso("inverted 2017_12_30_10_44_56 bla")
'Inverted_2017-12-30T10-44-56_Bla'
>>> slugify_camel_iso("normal 30.12.2017_10_44_56 bla")
'Normal_2017-12-30T10-44-56_Bla'
>>> slugify_camel_iso(" no day inverted 1975 08 ")
'No_Day_Inverted_1975-08'
>>> slugify_camel_iso(" no day normal 08 1975 ")
'No_Day_Normal_1975-08'
>>> slugify_camel_iso(" CamelCase pascalCase JSONfile WhatsApp")
'Camel_Case_Pascal_Case_Jsonfile_Whats_App'
>>> slugify_camel_iso(" 2019-08-22T16-01-22 keep formatted times ")
'2019-08-22T16-01-22_Keep_Formatted_Times'
>>> slugify_camel_iso("WhatsApp Ptt 2019-08-21 at 14.24.19")
'Whats_App_Ptt_2019-08-21T14-24-19'
>>> slugify_camel_iso("Whats_App_Image_2019-08-23_At_12_34_55 fix times on whatsapp files")
'Whats_App_Image_2019-08-23T12-34-55_Fix_Times_On_Whatsapp_Files'
>>> slugify_camel_iso("Whats_App_Zip_2019-08-23_At_13_23.36")
'Whats_App_Zip_2019-08-23T13-23-36'
>>> slugify_camel_iso("fwdConsultaCognicao")
'Fwd_Consulta_Cognicao'
>>> slugify_camel_iso("bla Bancários - Atenção ble")
'Bla_Bancarios_Atencao_Ble'
>>> slugify_camel_iso(" 240819 human day month year 290875 ")
'2019-08-24_Human_Day_Month_Year_1975-08-29'
>>> slugify_camel_iso("2019-08-23T12-48-26words with numbers")
'2019-08-23T12-48-26_Words_With_Numbers'
>>> slugify_camel_iso("some 20180726_224001 thing")
'Some_2018-07-26T22-40-01_Thing'
>>> slugify_camel_iso("glued14092019")
'Glued_2019-09-14'
>>> slugify_camel_iso("glued2019-08-23T12-48-26")
'Glued_2019-08-23T12-48-26'
>>> slugify_camel_iso("yeah-1975-08")
'Yeah_1975-08'
>>> slugify_camel_iso("xxx visa-2013-07 yyy")
'Xxx_Visa_2013-07_Yyy'
>>> slugify_camel_iso("date without seconds 101020191830 ")
'Date_Without_Seconds_2019-10-10T18-30-00'
>>> slugify_camel_iso(" p2p b2b 1on1 P2P B2B 1ON1 ")
'P2p_B2b_1on1_P2p_B2b_1on1'
"""
temp_string = unicodedata.normalize("NFKC", old_string)
# Insert separator in these cases
for regex in (REGEX_EXISTING_TIME, REGEX_UPPER_CASE_LETTER):
temp_string = regex.sub(r"\1_\2", temp_string)
slugged = slugify(temp_string, separator=SLUG_SEPARATOR).capitalize()
next_ten_years = pendulum.today().year + 10
def try_date(matchobj):
original_string = matchobj.group(0)
actual_date = None
which_format = "YYYY-MM-DD"
for date_format in POSSIBLE_FORMATS:
# Only try formats with the same size; Pendulum is too permissive and returns wrong dates.
if len(original_string) != len(date_format):
continue
try:
actual_date = pendulum.from_format(original_string, date_format)
# If the year has only 2 digits, consider it as between 1929 and 2029
format_has_century = "YYYY" in date_format
if not format_has_century and actual_date.year > next_ten_years:
actual_date = actual_date.subtract(years=100)
if "HH" in date_format:
which_format = "YYYY-MM-DDTHH-mm-ss"
elif "DD" not in date_format:
which_format = "YYYY-MM"
break
except ValueError:
continue
new_date = actual_date.format(which_format) if actual_date else original_string
return f"{SLUG_SEPARATOR}{new_date}{SLUG_SEPARATOR}"
replaced_dates_multiple_seps = REGEX_DATE_TIME.sub(try_date, slugged)
single_seps = REGEX_MULTIPLE_SEPARATORS.sub(SLUG_SEPARATOR, replaced_dates_multiple_seps)
corrected_case = REGEX_UNDERLINE_LOWER_CASE.sub(lambda match_obj: match_obj.group(0).upper(), single_seps)
return corrected_case.strip(SLUG_SEPARATOR)
def rename_batch(yes: bool, dry_run: bool, is_dir: bool, root_dir: Path, items: Set[Path]) -> bool:
"""Rename a batch of items (directories or files)."""
which_type = "directories" if is_dir else "files"
pairs = []
for item in sorted(items):
if is_dir:
new_name = slugify_camel_iso(item.name)
else:
new_name = slugify_camel_iso(item.stem) + item.suffix.lower()
if item.name == new_name:
continue
relative_dir = str(item.parent.relative_to(root_dir))
echo_dry_run(f"from: {relative_dir}/{item.name}", dry_run=dry_run)
echo_dry_run(f" to: {relative_dir}/", nl=False, dry_run=dry_run)
click.secho(new_name, fg="yellow")
pairs.append((item, item.with_name(new_name)))
if not dry_run and pairs:
pretty_root = relative_to_home(root_dir)
if not yes:
click.confirm(f"{pretty_root}: Rename these {which_type}?", default=False, abort=True)
for original, new in pairs:
if str(original) == str(new) and new.exists():
# Don't rename files with the exact same name that already exist
click.secho(f"New file already exists! {new}", err=True, fg="red")
else:
try:
os.rename(original, new)
except OSError as err:
if err.errno == 66: # Directory not empty
merge_directories(new, original)
else:
raise err
click.secho(f"{pretty_root}: {which_type.capitalize()} renamed succesfully.", fg="yellow")
return bool(pairs)
@click.command()
@click.option(
"-x",
"--exclude",
# resolve_path doesn't expand the tilde (~) to the home dir
type=click.Path(exists=False, resolve_path=False),
multiple=True,
help="Exclude one or more directories",
)
@yes_option
@dry_run_option
@verbose_option
@click.argument("directories", nargs=-1, type=click.Path(exists=True, file_okay=False, dir_okay=True), required=True)
def rename_slugify(exclude, yes: bool, dry_run: bool, verbose: bool, directories):
"""Rename files recursively, slugifying them. Format dates in file names as ISO. Ignore hidden dirs/files."""
excluded_dirs = set()
excluded_files = set()
for file_system_object in exclude:
path = Path(file_system_object).expanduser()
if not path.exists():
continue
if path.is_dir():
excluded_dirs.add(path)
else:
excluded_files.add(path)
if excluded_dirs and verbose:
pretty_dirs = sorted({relative_to_home(path) for path in excluded_dirs})
click.echo(f"Excluding directories: {', '.join(pretty_dirs)}")
if excluded_files and verbose:
pretty_files = sorted({relative_to_home(path) for path in excluded_files})
click.echo(f"Excluding files: {', '.join(pretty_files)}")
for directory in directories:
original_dir = Path(directory).expanduser()
dirs_to_rename = set()
files_to_rename = set()
for child in original_dir.glob("**/*"):
if child.name.startswith(".") or "/." in str(child):
if verbose:
click.echo(f"Ignoring hidden {relative_to_home(child)}")
continue
add = True
for dir_to_exclude in excluded_dirs:
if str(child).startswith(str(dir_to_exclude)):
if verbose:
click.echo(f"Ignoring {relative_to_home(child)}")
add = False
break
if not add:
continue
if child.is_dir():
dirs_to_rename.add(child)
else:
if child not in excluded_files:
files_to_rename.add(child)
elif verbose:
click.echo(f"Ignoring file {relative_to_home(child)}")
# Rename directories first
rename_batch(yes, dry_run, True, original_dir, dirs_to_rename)
# Glob the renamed directories for files
files_found = rename_batch(yes, dry_run, False, original_dir, files_to_rename)
if not files_found:
click.secho(f"{relative_to_home(directory)}: All files already have correct names.", fg=COLOR_OK)
@click.command()
@dry_run_option
@click.argument(
"target_directory", nargs=1, type=click.Path(exists=True, file_okay=False, dir_okay=True), required=True
)
@click.argument(
"source_directories", nargs=-1, type=click.Path(exists=True, file_okay=False, dir_okay=True), required=True
)
def merge_dirs(dry_run: bool, target_directory, source_directories):
"""Merge directories into one, keeping sub-directories and renaming files with the same name."""
merge_directories(target_directory, *source_directories, dry_run=dry_run)
def merge_directories(target_dir: PathOrStr, *source_dirs: PathOrStr, dry_run: bool = False):
"""Merge directories into one, keeping sub-directories and renaming files with the same name."""
echo = partial(echo_dry_run, dry_run=dry_run)
target_color = "green"
source_color = "bright_blue"
echo(f"Target: {target_dir}", fg=target_color)
if not Path(target_dir).is_dir():
click.secho("Target is not a directory", err=True, fg="red")
return False
for source_dir in source_dirs:
echo(f"Source: {source_dir}", fg=source_color)
if not Path(source_dir).is_dir():
click.secho("Source is not a directory", err=True, fg="red")
continue
for path in sorted(Path(source_dir).rglob("*")):
if path.is_dir() or path.stem in IGNORE_FILES_ON_MERGE:
continue
new_path = unique_file_name(target_dir / path.relative_to(source_dir))
echo(f"Moving {dir_with_end_slash(source_dir)}", nl=False)
click.secho(str(path.relative_to(source_dir)), fg=source_color, nl=False)
click.secho(f" to {dir_with_end_slash(target_dir)}", nl=False)
click.secho(str(new_path.relative_to(target_dir)), fg=target_color)
if not dry_run:
new_path.parent.mkdir(parents=True, exist_ok=True)
path.rename(new_path)
def dir_with_end_slash(path: PathOrStr) -> str:
r"""Always add a slash at the end of a directory.
>>> dir_with_end_slash('/tmp/dir \t\n')
'/tmp/dir/'
>>> dir_with_end_slash(Path('/tmp/dir'))
'/tmp/dir/'
>>> dir_with_end_slash('/tmp/dir/file.txt')
'/tmp/dir/file.txt/'
>>> dir_with_end_slash(Path('/tmp/dir/file.txt'))
'/tmp/dir/file.txt/'
"""
if isinstance(path, str):
path = Path(path.rstrip(REMOVE_CHARS_FROM_DIR))
else:
path = Path(path)
return str(path) + os.sep
def unique_file_name(path_or_str: PathOrStr) -> Path:
"""Unique file name: append a number to the file name until the file is not found."""
path = Path(path_or_str)
while path.exists():
original_stem = None
index = None
for match in REGEX_UNIQUE_FILE.finditer(path.stem):
original_stem = match.group("original_stem")
index = int(match.group("index") or 0) + 1
if not original_stem:
new_stem = path.stem
else:
new_stem = original_stem
new_name = f"{new_stem}_Copy{index if index else ''}{path.suffix}"
path = path.with_name(new_name)
return path
|
wagnerandreoli/clitools | src/clib/git.py | # -*- coding: utf-8 -*-
"""Git tools."""
from shlex import split
from plumbum import ProcessExecutionError
from plumbum.cmd import git
DEVELOPMENT_BRANCH = "develop"
def run_git(*args, dry_run=False, quiet=False):
"""Run a git command, print it before executing and capture the output."""
command = git[split(" ".join(args))]
if not quiet:
print("{}{}".format("[DRY-RUN] " if dry_run else "", command))
if dry_run:
return ""
rv = command()
if not quiet and rv:
print(rv)
return rv
def branch_exists(branch):
"""Return True if the branch exists."""
try:
run_git("rev-parse --verify {}".format(branch), quiet=True)
return True
except ProcessExecutionError:
return False
def get_current_branch():
"""Get the current branch name."""
return run_git("rev-parse --abbrev-ref HEAD", quiet=True).strip()
|
wagnerandreoli/clitools | src/clib/iter.py | <reponame>wagnerandreoli/clitools<filename>src/clib/iter.py
"""Tools for iterators and iterables."""
from itertools import cycle, islice
def roundrobin(*iterables):
"""Get one item from each iterable.
Recipe credited to <NAME>.
Taken from https://docs.python.org/3/library/itertools.html#itertools-recipes
>>> list(roundrobin('ABC', 'D', 'EF'))
['A', 'D', 'E', 'B', 'F', 'C']
"""
num_active = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = cycle(islice(nexts, num_active))
|
wagnerandreoli/clitools | src/clib/environments.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""Environment variables."""
import os
from typing import List
from prettyconf import config
from clib.config import cast_to_directory_list
config.starting_path = os.path.expanduser("~/.config/clib")
RSYNC_EXCLUDE: List[str] = config(
"RSYNC_EXCLUDE", cast=config.list, default="lost+found/,.dropbox.cache,.Trash-*,.DS_Store"
)
BACKUP_DIRS: List[str] = config("BACKUP_DIRS", cast=cast_to_directory_list(), default="")
PICTURE_DIRS: List[str] = config("PICTURE_DIRS", cast=cast_to_directory_list(), default="")
|
wagnerandreoli/clitools | src/clib/dev/packaging.py | # -*- coding: utf-8 -*-
"""Packaging tools to publish projects on PyPI and GitHub."""
import os
from pathlib import Path
from shutil import rmtree
from typing import List, Optional, Tuple
import click
from clib import dry_run_option, verbose_option
from clib.files import shell
from clib.ui import prompt
HeaderCommand = Tuple[str, str]
class Publisher:
"""Helper to publish packages."""
TOOL_BUMPVERSION = "bumpversion"
TOOL_CONVENTIONAL_CHANGELOG = "conventional-changelog"
TOOL_POETRY = "poetry"
TOOL_GIT = "git"
TOOL_HUB = "hub"
TOOL_TWINE = "twine"
TOOL_CONVENTIONAL_GITHUB_RELEASER = "conventional-github-releaser"
NEEDED_TOOLS = {
TOOL_BUMPVERSION: "Install from https://github.com/peritus/bumpversion#installation and configure setup.cfg",
TOOL_CONVENTIONAL_CHANGELOG: (
"Install from https://github.com/conventional-changelog/conventional-changelog/tree/master"
+ "/packages/conventional-changelog-cli#quick-start"
),
TOOL_POETRY: "Install from https://github.com/sdispater/poetry#installation",
TOOL_GIT: "Install using your OS package tools",
TOOL_HUB: "Install from https://github.com/github/hub#installation",
TOOL_TWINE: "Install from https://github.com/pypa/twine#installation",
TOOL_CONVENTIONAL_GITHUB_RELEASER: (
"Install from https://github.com/conventional-changelog/releaser-tools/tree"
+ "/master/packages/conventional-github-releaser#quick-start and configure a GitHub Access token"
),
}
NEEDED_FILES = {
"package.json": (
f"Used by {TOOL_CONVENTIONAL_CHANGELOG}. See https://github.com/conventional-changelog/"
+ "conventional-changelog/blob/master/packages/conventional-changelog-cli/package.json"
)
}
# https://github.com/peritus/bumpversion
CMD_BUMP_VERSION = TOOL_BUMPVERSION + " {allow_dirty} {part}"
CMD_BUMP_VERSION_SIMPLE_CHECK = f"{CMD_BUMP_VERSION} --dry-run"
CMD_BUMP_VERSION_VERBOSE = f"{CMD_BUMP_VERSION_SIMPLE_CHECK} --verbose 2>&1"
CMD_BUMP_VERSION_VERBOSE_FILES = f"{CMD_BUMP_VERSION_VERBOSE} | grep -i -E -e '^would'"
CMD_BUMP_VERSION_GREP = f'{CMD_BUMP_VERSION_VERBOSE} | grep -i -E -e "would commit to git.+bump" -e "^new version" | grep -E -o "\'(.+)\'"'
# https://github.com/conventional-changelog/conventional-changelog/tree/master/packages/conventional-changelog-cli
CMD_CHANGELOG = f"{TOOL_CONVENTIONAL_CHANGELOG} -i CHANGELOG.md -p angular"
CMD_BUILD_SETUP_PY = "python setup.py sdist bdist_wheel --universal"
# https://poetry.eustace.io/
CMD_POETRY_BUILD = f"{TOOL_POETRY} build"
CMD_GIT_ADD_AND_COMMIT = TOOL_GIT + " add . && git commit -m'{}' --no-verify"
CMD_GIT_PUSH = f"{TOOL_GIT} push"
CMD_GIT_CHECKOUT_MASTER = f"echo {TOOL_GIT} checkout master && echo {TOOL_GIT} pull"
# https://github.com/pypa/twine
# I tried using "poetry publish -u $TWINE_USERNAME -p $TWINE_PASSWORD"; the command didn't fail,
# but nothing was uploaded
# I also tried setting $TWINE_USERNAME and $TWINE_PASSWORD on the environment,
# but then "twine upload" didn't work for some reason.
CMD_TWINE_UPLOAD = TOOL_TWINE + " upload {repo} dist/*"
# https://www.npmjs.com/package/conventional-github-releaser
CMD_GITHUB_RELEASE = TOOL_CONVENTIONAL_GITHUB_RELEASER + " -p angular -v --token {}"
CMD_MANUAL_GITHUB_RELEASE = f"echo {TOOL_HUB} browse"
CMD_GITHUB_RELEASE_ENVVAR = "CONVENTIONAL_GITHUB_RELEASER_TOKEN"
def __init__(self, dry_run: bool):
self.dry_run = dry_run
self.github_access_token: Optional[str] = None
@classmethod
def part_option(cls):
"""Add a --part option."""
return click.option(
"--part",
"-p",
default="minor",
type=click.Choice(["major", "minor", "patch"]),
help="Which part of the version number to bump",
)
@classmethod
def allow_dirty_option(cls):
"""Add a --allow-dirty option."""
return click.option(
"--allow-dirty",
"-d",
default=False,
is_flag=True,
type=bool,
help="Allow bumpversion to run on a dirty repo",
)
@classmethod
def github_access_token_option(cls):
"""Add a --github-access-token option."""
return click.option(
"--github-access-token",
"-t",
help=(
f"GitHub access token used by {cls.TOOL_CONVENTIONAL_GITHUB_RELEASER}. If not defined, will use the value"
+ f" from the ${cls.CMD_GITHUB_RELEASE_ENVVAR} environment variable"
),
)
def check_tools(self, github_access_token: str = None) -> None:
"""Check if all needed tools and files are present."""
all_ok = True
for executable, help_text in self.NEEDED_TOOLS.items():
output = shell(f"which {executable}", quiet=True, return_lines=True)
if not output:
click.secho(f"Executable not found on the $PATH: {executable}. {help_text}", fg="bright_red")
all_ok = False
for file, help_text in self.NEEDED_FILES.items():
path = Path(file)
if not path.exists():
click.secho(f"File not found: {path}. {help_text}", fg="bright_red")
all_ok = False
if github_access_token:
self.github_access_token = github_access_token
else:
error_message = "Missing access token"
if self.CMD_GITHUB_RELEASE_ENVVAR in os.environ:
variable = self.CMD_GITHUB_RELEASE_ENVVAR
else:
token_keys = {k for k in os.environ.keys() if "github_access_token".casefold() in k.casefold()}
if len(token_keys) == 1:
variable = token_keys.pop()
else:
variable = ""
error_message = f"You have multiple access tokens: {', '.join(token_keys)}"
if variable:
self.github_access_token = os.environ[variable]
click.echo(f"Using environment variable {variable} as GitHub access token")
else:
click.secho(f"{error_message}. ", fg="bright_red", nl=False)
click.echo(
f"Set the variable ${self.CMD_GITHUB_RELEASE_ENVVAR} or use"
+ " --github-access-token to define a GitHub access token"
)
all_ok = False
if self.dry_run:
return
if all_ok:
click.secho("All the necessary tools are installed.", fg="bright_white")
else:
click.secho("Install the tools and create the missing files.")
exit(1)
@classmethod
def _bump(cls, base_command: str, part: str, allow_dirty: bool):
"""Prepare the bump command."""
return base_command.format(allow_dirty="--allow-dirty" if allow_dirty else "", part=part)
def check_bumped_version(self, part: str, allow_dirty: bool) -> Tuple[str, str]:
"""Check the version that will be bumped."""
shell(
self._bump(self.CMD_BUMP_VERSION_SIMPLE_CHECK, part, allow_dirty),
exit_on_failure=True,
header="Check the version that will be bumped",
)
bump_cmd = self._bump(self.CMD_BUMP_VERSION_VERBOSE_FILES, part, allow_dirty)
shell(bump_cmd, dry_run=self.dry_run, header="Display what files would be changed", exit_on_failure=True)
if not self.dry_run:
chosen_lines = shell(self._bump(self.CMD_BUMP_VERSION_GREP, part, allow_dirty), return_lines=True)
new_version = chosen_lines[0].strip("'")
commit_message = chosen_lines[1].strip("'").lower()
click.echo(f"New version: {new_version}\nCommit message: {commit_message}")
prompt("Were all versions correctly displayed?")
else:
commit_message = "bump version from X to Y"
new_version = "<new version here>"
return f"build: {commit_message}", new_version
def actually_bump_version(self, part: str, allow_dirty: bool) -> None:
"""Actually bump the version."""
shell(self._bump(self.CMD_BUMP_VERSION, part, allow_dirty), dry_run=self.dry_run, header="Bump versions")
def generate_changelog(self) -> None:
"""Generate the changelog."""
shell(f"{Publisher.CMD_CHANGELOG} -s", dry_run=self.dry_run, header="Generate the changelog")
def build_with_poetry(self) -> None:
"""Build the project with poetry."""
if not self.dry_run:
remove_previous_builds()
shell(
Publisher.CMD_POETRY_BUILD, dry_run=self.dry_run, header=f"Build the project with {Publisher.TOOL_POETRY}"
)
if not self.dry_run:
shell("ls -l dist")
prompt("Was a dist/ directory created with a .tar.gz and a wheel?")
def show_diff(self) -> None:
"""Show the diff of changed files so far."""
diff_command = f"{Publisher.TOOL_GIT} diff"
shell(diff_command, dry_run=self.dry_run, header="Show a diff of the changes, as a sanity check")
if self.dry_run:
return
prompt(f"Is the {diff_command} correct?")
shell(f"{Publisher.TOOL_GIT} status", dry_run=self.dry_run, header="Show the list of changed files")
prompt(
"Last confirmation (point of no return):\n"
+ "Changes will be committed, files will be uploaded, a GitHub release will be created"
)
@classmethod
def commit_push_tag(cls, commit_message: str, new_version: str, manual_release: bool) -> List[HeaderCommand]:
"""Prepare the commands to commit, push and tag."""
commands = [
("Add all files and commit (skipping hooks)", Publisher.CMD_GIT_ADD_AND_COMMIT.format(commit_message)),
("Push", Publisher.CMD_GIT_PUSH),
]
if manual_release:
commands.extend(
[
(
"Approve the pull request on GitHub, then return here and run the following commands",
Publisher.CMD_GIT_CHECKOUT_MASTER,
),
("Create the tag manually", cls.cmd_tag(new_version, echo=True)),
("Push the tags manually", cls.cmd_push_tags()),
]
)
else:
commands.append(
(
f"Create the tag but don't push it yet ({Publisher.TOOL_CONVENTIONAL_GITHUB_RELEASER} will do that)",
cls.cmd_tag(new_version),
)
)
return commands
@classmethod
def cmd_tag(cls, version: str, echo=False) -> str:
"""Command to create a Git tag."""
return f"{'echo ' if echo else ''}{cls.TOOL_GIT} tag v{version}"
@classmethod
def cmd_push_tags(cls) -> str:
"""Command to push tags."""
return f"echo {cls.TOOL_GIT} push --tags"
@classmethod
def upload_pypi(cls) -> List[HeaderCommand]:
"""Prepare commands to upload to PyPI."""
return [
("Test upload the files to TestPyPI via Twine", Publisher.CMD_TWINE_UPLOAD.format(repo="-r testpypi")),
("Upload the files to PyPI via Twine", Publisher.CMD_TWINE_UPLOAD.format(repo="")),
]
def release(self, manual_release) -> List[HeaderCommand]:
"""Prepare release commands."""
if manual_release:
return [
(
"Open GitHub and create a GitHub release manually, copying the content from CHANGELOG.md",
Publisher.CMD_MANUAL_GITHUB_RELEASE,
)
]
return [("Create a GitHub release", Publisher.CMD_GITHUB_RELEASE.format(self.github_access_token))]
def run_commands(self, commands: List[HeaderCommand]):
"""Run a list of commands."""
for header, command in commands:
while True:
process = shell(command, dry_run=self.dry_run, header=header)
if self.dry_run or process.returncode == 0:
break
prompt("Something went wrong, hit ENTER to run the same command again.", fg="red")
def success(self, new_version: str, upload_destination: str):
"""Display a sucess message."""
if self.dry_run:
return
click.secho(f"The new version {new_version} was uploaded to {upload_destination}! ✨ 🍰 ✨", fg="bright_white")
def publish(
self,
pypi: bool,
ctx,
part: str,
allow_dirty: bool,
github_access_token: str = None,
manual_release: bool = False,
):
"""Publish a package."""
self.check_tools(github_access_token)
commit_message, new_version = self.check_bumped_version(part, allow_dirty)
self.actually_bump_version(part, allow_dirty)
self.generate_changelog()
self.build_with_poetry()
self.show_diff()
commands = self.commit_push_tag(commit_message, new_version, manual_release)
if pypi:
commands.extend(self.upload_pypi())
commands.extend(self.release(manual_release))
self.run_commands(commands)
self.success(new_version, "PyPI" if pypi else "GitHub")
def remove_previous_builds() -> bool:
"""Remove previous builds under the /dist directory."""
dist_dir = (Path(os.curdir) / "dist").resolve()
if not dist_dir.exists():
return False
click.echo(f"Removing previous builds on {dist_dir}")
try:
rmtree(str(dist_dir))
except OSError:
return False
return True
@click.group()
def pypub():
"""Commands to publish packages on PyPI."""
pass
@pypub.command()
@Publisher.github_access_token_option()
def check(github_access_token: str = None):
"""Check if all needed tools and files are present."""
Publisher(False).check_tools(github_access_token)
@pypub.command()
@verbose_option
def tools(verbose: bool):
"""Show needed tools and files for the deployment."""
for tool, help_text in Publisher.NEEDED_TOOLS.items():
if verbose:
click.echo("")
click.echo(click.style(tool, "bright_green") + f": {help_text}")
if verbose:
shell(f"{tool} --help")
for file, help_text in Publisher.NEEDED_FILES.items():
click.echo(click.style(file, "bright_green") + f": {help_text}")
@pypub.command()
@dry_run_option
@Publisher.part_option()
@Publisher.allow_dirty_option()
@Publisher.github_access_token_option()
@click.pass_context
def pypi(ctx, dry_run: bool, part: str, allow_dirty: bool, github_access_token: str = None):
"""Package and upload to PyPI (bump version, changelog, package, upload)."""
Publisher(dry_run).publish(True, ctx, part, allow_dirty, github_access_token)
@pypub.command()
@dry_run_option
@Publisher.part_option()
@Publisher.allow_dirty_option()
@Publisher.github_access_token_option()
@click.option(
"--manual-release",
"-r",
default=False,
is_flag=True,
type=bool,
help="Run commands up until tagging. Tag, merge, create the release: all have to be done manually",
)
@click.pass_context
def github(
ctx, dry_run: bool, part: str, allow_dirty: bool, github_access_token: str = None, manual_release: bool = False
):
"""Release to GitHub only (bump version, changelog, package, upload)."""
Publisher(dry_run).publish(False, ctx, part, allow_dirty, github_access_token, manual_release)
@pypub.command()
def changelog():
"""Preview the changelog."""
shell(f"{Publisher.CMD_CHANGELOG} -u | less")
|
wagnerandreoli/clitools | src/clib/db.py | # -*- coding: utf-8 -*-
"""Database module."""
import argparse
from pathlib import Path
from subprocess import PIPE
from typing import List, Optional
from clib.docker import DockerContainer
from clib.files import existing_directory_type, existing_file_type, shell
POSTGRES_DOCKER_CONTAINER_NAME = "postgres12"
class DatabaseServer:
"""A database server URI parser."""
uri: str
protocol: str
user: Optional[str]
password: Optional[str]
server: str
port: Optional[int]
def __init__(self, uri):
"""Parser the server URI and extract needed parts."""
self.uri = uri
protocol_user_password, server_port = uri.split("@")
self.protocol, user_password = protocol_user_password.split("://")
if ":" in user_password:
self.user, self.password = user_password.split(":")
else:
self.user, self.password = None, None
if ":" in server_port:
self.server, self.port = server_port.split(":")
self.port = int(self.port)
else:
self.server, self.port = server_port, None
@property
def uri_without_port(self):
"""Return the URI without the port."""
parts = self.uri.split(":")
if len(parts) != 4:
# Return the unmodified URI if we don't have port.
return self.uri
return ":".join(parts[:-1])
class PostgreSQLServer(DatabaseServer):
"""A PostgreSQL database server URI parser and more stuff."""
databases: List[str] = []
inside_docker = False
psql: str = ""
pg_dump: str = ""
def __init__(self, *args, **kwargs):
"""Determine which psql executable exists on this machine."""
super().__init__(*args, **kwargs)
self.psql = shell("which psql", quiet=True, return_lines=True)[0]
if not self.psql:
self.psql = "psql_docker"
self.inside_docker = True
self.pg_dump = shell("which pg_dump", quiet=True, return_lines=True)[0]
if not self.pg_dump:
self.pg_dump = "pg_dump_docker"
self.inside_docker = True
@property
def docker_uri(self):
"""Return a URI without port if we are inside Docker."""
return self.uri_without_port if self.inside_docker else self.uri
def list_databases(self) -> "PostgreSQLServer":
"""List databases."""
process = shell(
f"{self.psql} -c 'SELECT datname FROM pg_database WHERE datistemplate = false' "
f"--tuples-only {self.docker_uri}",
quiet=True,
stdout=PIPE,
)
if process.returncode:
print(f"Error while listing databases.\nstdout={process.stdout}\nstderr={process.stderr}")
exit(10)
self.databases = sorted(db.strip() for db in process.stdout.strip().split())
return self
def backup(parser, args):
"""Backup PostgreSQL databases."""
pg = PostgreSQLServer(args.server_uri).list_databases()
container = DockerContainer(POSTGRES_DOCKER_CONTAINER_NAME)
for database in pg.databases:
sql_file: Path = Path(args.backup_dir) / f"{pg.protocol}_{pg.server}_{pg.port}" / f"{database}.sql"
sql_file.parent.mkdir(parents=True, exist_ok=True)
if pg.inside_docker:
sql_file = container.replace_mount_dir(sql_file)
shell(f"{pg.pg_dump} --clean --create --if-exists --file={sql_file} {pg.docker_uri}/{database}")
def restore(parser, args):
"""Restore PostgreSQL databases."""
pg = PostgreSQLServer(args.server_uri).list_databases()
new_database = args.database_name or args.sql_file.stem
if new_database in pg.databases:
print(f"The database {new_database!r} already exists in the server. Provide a new database name.")
exit(1)
if new_database != args.sql_file.stem:
# TODO Optional argument --owner to set the database owner
print(f"TODO: Create a user named {new_database!r} if it doesn't exist (or raise an error)")
print(f"TODO: Parse the .sql file and replace DATABASE/OWNER {args.sql_file.stem!r} by {new_database!r}")
exit(2)
shell(f"{pg.psql} {args.server_uri} < {args.sql_file}")
# TODO: Convert to click
def postgresx():
"""Extra PostgreSQL tools like backup, restore, user creation, etc."""
parser = argparse.ArgumentParser(description="PostgreSQL helper tools")
parser.add_argument("server_uri", help="database server URI (postgresql://user:password@server:port)")
parser.set_defaults(chosen_function=None)
subparsers = parser.add_subparsers(title="commands")
parser_backup = subparsers.add_parser("backup", help="backup a PostgreSQL database to a SQL file")
parser_backup.add_argument("backup_dir", type=existing_directory_type, help="directory to store the backups")
parser_backup.set_defaults(chosen_function=backup)
parser_restore = subparsers.add_parser("restore", help="restore a PostgreSQL database from a SQL file")
parser_restore.add_argument(
"sql_file", type=existing_file_type, help="full path of the .sql file created by the 'backup' command"
)
parser_restore.add_argument("database_name", nargs="?", help="database name (default: basename of .sql file)")
parser_restore.set_defaults(chosen_function=restore)
# TODO Subcommand create-user new-user-name or alias user new-user-name to create a new user
# TODO postgresx user myuser [mypass]
args = parser.parse_args()
if not args.chosen_function:
parser.print_help()
return
args.chosen_function(parser, args)
return
|
wagnerandreoli/clitools | src/clib/ui.py | # -*- coding: utf-8 -*-
"""User interface."""
import sys
import time
from pathlib import Path
from subprocess import PIPE, CalledProcessError
import click
def notify(title, message):
"""If terminal-notifier is installed, use it to display a notification."""
from clib.files import shell
check = "which" if sys.platform == "linux" else "command -v"
try:
terminal_notifier_path = shell("{} terminal-notifier".format(check), check=True, stdout=PIPE).stdout.strip()
except CalledProcessError:
terminal_notifier_path = ""
if terminal_notifier_path:
shell(
'terminal-notifier -title "{}: {} complete" -message "Successfully {} dev environment."'.format(
Path(__file__).name, title, message
)
)
def prompt(message: str, fg: str = "bright_white") -> None:
"""Display a prompt with a message. Wait a little bit before, so stdout is flushed before the input message."""
print()
click.secho(message, fg=fg)
time.sleep(0.2)
input("Press ENTER to continue or Ctrl-C to abort: ")
def success(message: str) -> None:
"""Display a success message."""
click.secho(message, fg="bright_green")
def failure(message: str, exit_code: int = None) -> None:
"""Display an error message and optionally exit."""
click.secho(message, fg="bright_red", err=True)
if exit_code is not None:
sys.exit(exit_code)
def echo_dry_run(message: str, *, nl: bool = True, dry_run: bool = False, **styles) -> None:
"""Display a message with the optional dry-run prefix on each line."""
if dry_run:
click.secho("[dry-run] ", fg="bright_cyan", nl=False)
click.secho(message, nl=nl, **styles)
class AliasedGroup(click.Group):
"""A click group that allows aliases.
Taken from ``click``'s documentation: `Command Aliases <https://click.palletsprojects.com/en/7.x/advanced/#command-aliases>`_.
"""
def get_command(self, ctx, cmd_name):
"""Get a click command."""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
matches = [x for x in self.list_commands(ctx) if x.startswith(cmd_name)]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail("Too many matches: %s" % ", ".join(sorted(matches)))
|
wagnerandreoli/clitools | src/clib/config.py | # -*- coding: utf-8 -*-
"""Configuration helpers."""
import json
import os
from pathlib import Path
from typing import List
CONFIG_DIR = Path("~/.config/dotfiles/").expanduser()
class JsonConfig:
"""A JSON config file."""
def __init__(self, partial_path):
"""Create or get a JSON config file inside the config directory."""
self.full_path = CONFIG_DIR / partial_path
self.full_path.parent.mkdir(parents=True, exist_ok=True)
def _generic_load(self, default):
"""Try to load file data, and use a default when there is no data."""
try:
data = json.loads(self.full_path.read_text())
except (json.decoder.JSONDecodeError, FileNotFoundError):
data = default
return data
def load_set(self):
"""Load file data as a set."""
return set(self._generic_load(set()))
def dump(self, new_data):
"""Dump new JSON data in the config file."""
if isinstance(new_data, set):
new_data = list(new_data)
self.full_path.write_text(json.dumps(new_data))
def cast_to_directory_list(check_existing: bool = True):
"""Cast from a string of directories separated by colons.
Useful functions for the prettyconf module.
Optional check existing directories: throw an error if any directory does not exist.
"""
def cast_function(value) -> List[str]:
"""Cast function expected by prettyconf."""
expanded_dirs = [os.path.expanduser(dir_).rstrip("/") for dir_ in value.split(":")]
if check_existing:
non_existent = [d for d in expanded_dirs if d and not os.path.isdir(d)]
if non_existent:
raise RuntimeError(
"Some directories were not found or are not directories: {}".format(":".join(non_existent))
)
return expanded_dirs
return cast_function
|
wagnerandreoli/clitools | tests/test_files.py | """File tests."""
from pathlib import Path
from textwrap import dedent
from testfixtures import compare
from clib.files import merge_directories, unique_file_name
def test_unique_file_name(tmp_path):
"""Test unique file names with numeric index."""
path = tmp_path / "file.txt"
assert unique_file_name(path) == path
path.touch()
first = path.with_name("file_Copy.txt")
assert unique_file_name(path) == first
first.touch()
second = path.with_name("file_Copy1.txt")
assert unique_file_name(first) == second
second.touch()
third = path.with_name("file_Copy2.txt")
assert unique_file_name(second) == third
def create(file: Path):
"""Create an empty file and its parent dirs."""
file.parent.mkdir(parents=True, exist_ok=True)
file.touch()
def test_merge_directories(tmp_path):
"""Test merge directories."""
create(tmp_path / "2020" / "12" / "one.txt")
create(tmp_path / "2020" / "root.txt")
other = Path(tmp_path / "other")
create(other / "2020" / "root_Copy.txt")
create(other / "2020" / "12" / "one.txt")
create(other / "2020" / "12" / "two.txt")
create(other / "2021" / "01" / "three.txt")
another = Path(tmp_path / "another")
create(another / "2020" / "12" / "one.txt")
create(another / "2020" / "12" / "two.txt")
create(another / "2020" / "root_Copy.txt")
merge_directories(tmp_path, other, another)
expected = """
2020/12/one.txt
2020/12/one_Copy.txt
2020/12/one_Copy1.txt
2020/12/two.txt
2020/12/two_Copy.txt
2020/root.txt
2020/root_Copy.txt
2020/root_Copy1.txt
2021/01/three.txt
"""
actual = sorted(str(path.relative_to(tmp_path)) for path in tmp_path.rglob("*") if path.is_file())
compare(actual=actual, expected=dedent(expected).strip().splitlines())
|
wagnerandreoli/clitools | src/clib/__init__.py | <reponame>wagnerandreoli/clitools
# -*- coding: utf-8 -*-
"""Main module for clib."""
import logging
import os
from configparser import ConfigParser
import click
from colorlog import ColoredFormatter
__author__ = "<NAME>"
__email__ = "<EMAIL>"
CONFIG_DIR = os.path.expanduser(os.path.join("~/.config", os.path.basename(os.path.dirname(__file__)), ""))
os.makedirs(CONFIG_DIR, exist_ok=True)
CONFIG_FILENAME = os.path.join(CONFIG_DIR, "config.ini")
CONFIG = ConfigParser()
# http://stackoverflow.com/questions/19359556/configparser-reads-capital-keys-and-make-them-lower-case
CONFIG.optionxform = str # type: ignore
CONFIG.read(CONFIG_FILENAME)
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
if not LOGGER.hasHandlers():
CHANNEL = logging.StreamHandler()
CHANNEL.setFormatter(
ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s",
datefmt=None,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bg_white",
},
secondary_log_colors={},
)
)
LOGGER.addHandler(CHANNEL)
# Options to use as decorators on commands.
yes_option = click.option("--yes", "-y", default=False, is_flag=True, help="Answer yes on all prompts")
dry_run_option = click.option(
"--dry-run", "-n", default=False, is_flag=True, help="Only show what would be done, without actually doing it"
)
verbose_option = click.option("--verbose", "-v", default=False, is_flag=True, type=bool, help="Verbose display")
def read_config(section_name, key_name, default=None):
"""Read a value from the config file.
Create section and key in the config object, if they don't exist.
The config must be saved with save_config(), to persist the values.
:param section_name: Name of the section in the .ini file.
:param key_name: Name of the key to read the value from.
:param default: Default value in case the key doesn't exist.
:return: Section if key_name is empty; otherwise, return the key value or the default.
"""
try:
section = CONFIG[section_name]
except KeyError:
CONFIG[section_name] = {}
section = CONFIG[section_name]
if not key_name:
return section
try:
return section[key_name]
except KeyError:
section[key_name] = default
return section[key_name]
def save_config():
"""Save the config file."""
os.makedirs(os.path.dirname(CONFIG_FILENAME), exist_ok=True)
with open(CONFIG_FILENAME, "w") as handle:
CONFIG.write(handle)
|
wagnerandreoli/clitools | src/clib/contacts.py | <filename>src/clib/contacts.py
"""Utilities to deal with contact data (people and places with name, address, phone)."""
from collections import defaultdict
from pathlib import Path
from tempfile import NamedTemporaryFile
from textwrap import dedent
from typing import DefaultDict, List, Optional, Set, Tuple, Union
import click
import phonenumbers
from phonenumbers import NumberParseException
from ruamel.yaml import YAML, YAMLError
from ruamel.yaml.scalarstring import LiteralScalarString
from clib.files import shell
from clib.types import JsonDict
CONTACT_SEPARATOR = "---"
URL_PREFIX = "http"
KEY_CONTACTS = "contacts"
@click.group()
def contacts():
"""Utilities to deal with contact data (people and places with name, address, phone)."""
pass
class Contact:
"""A contact with name, address, phones and notes."""
def __init__(self, raw_original: Optional[str], contact_dict: JsonDict = None) -> None:
data = contact_dict.copy() if contact_dict else {}
self.name: Union[str, LiteralScalarString] = data.pop("name", "")
self.address: LiteralScalarString = LiteralScalarString(data.pop("address", ""))
self.notes: LiteralScalarString = LiteralScalarString(data.pop("notes", ""))
self.phones: Set[str] = set(data.pop("phones", []))
self.emails: Set[str] = set(data.pop("emails", []))
self.links: Set[str] = set(data.pop("links", []))
self.raw_original: LiteralScalarString = LiteralScalarString(
raw_original.strip() if raw_original else data.pop("raw_original", "")
)
self.existing_data = data
self.parse_contact()
def parse_contact(self):
"""Parse contact data from a string."""
if not self.raw_original:
return
contact_lines = []
for line in self.raw_original.split("\n"):
clean_line = line.strip()
if not clean_line:
continue
if self.parse_phone(clean_line):
continue
if self.parse_email(clean_line):
continue
if self.parse_link(clean_line):
continue
contact_lines.append(clean_line)
from postal.parser import parse_address
tokens: List[Tuple[str, str]] = parse_address("\n".join(contact_lines))
result = defaultdict(list)
for value, variable in tokens:
result[variable].append(value)
self.format_address(result)
def format_address(self, address_dict: DefaultDict):
"""Format an address as a multiline string."""
valid = {}
for key in ("road", "house_number", "postcode", "city", "country"):
flat_value = " ".join(address_dict.pop(key, [])).strip()
valid[key] = flat_value
templated_address = dedent(
f"""
{valid["road"]} {valid["house_number"]}
{valid["postcode"]} {valid["city"]}
{valid["country"]}
"""
)
# Remove empty lines
valid_lines = []
for line in templated_address.split("\n"):
clean_line = line.strip(" ,\n").title()
if not clean_line:
continue
valid_lines.append(clean_line)
self.address = LiteralScalarString("\n".join(valid_lines))
notes = address_dict.pop("house", [])
if notes:
first_line = notes[0].title()
self.name = LiteralScalarString(first_line) if len(first_line) > 80 else first_line
self.notes = LiteralScalarString("\n".join(notes[1:]).title())
self.existing_data.update(address_dict)
def as_dict(self):
"""Return the contact as a dict."""
rv = {}
for key in ("name", "address", "notes", "phones", "emails", "links", "raw_original"):
value = getattr(self, key)
if isinstance(value, set):
value = sorted(value)
if value:
rv[key] = value
rv.update(self.existing_data)
return rv
def parse_phone(self, clean_line: str) -> bool:
"""Parse a phone number."""
try:
phone_obj = phonenumbers.parse(clean_line, "DE")
except NumberParseException:
return False
if not phonenumbers.is_valid_number(phone_obj):
return False
formatted_phone = phonenumbers.format_number(phone_obj, phonenumbers.PhoneNumberFormat.INTERNATIONAL)
self.phones.add(formatted_phone)
return True
def parse_link(self, clean_line: str) -> bool:
"""Parse a URL link."""
if clean_line.startswith(URL_PREFIX) or "www" in clean_line:
self.links.add(clean_line)
return True
return False
def parse_email(self, clean_line: str) -> bool:
"""Parse an email."""
if "@" in clean_line:
found = False
for possible_email in clean_line.split(" "):
if "@" in possible_email:
self.emails.add(possible_email)
found = True
return found
return False
@contacts.command()
@click.option("--strict", "-s", is_flag=True, default=False, help="Strict mode, don't ignore case nor whitespace")
@click.argument("files", nargs=-1, type=click.Path(exists=True, file_okay=True, dir_okay=False), required=True)
def parse(strict: bool, files):
"""Parse a file with contacts and structure data in a YAML file."""
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
for arg_file in files:
output_dict: JsonDict = {}
original_file = Path(arg_file)
structured_contacts = []
yaml_content: JsonDict = {}
if original_file.suffix == ".yaml":
try:
yaml_content = yaml.load(original_file)
except YAMLError:
click.secho(f"Not a valid YAML file: {original_file}. it will be read as a .txt file", fg="red")
if yaml_content:
if KEY_CONTACTS not in yaml_content:
click.secho(f"Not a valid contacts file: {original_file}. Missing 'contacts' root key.", fg="red")
continue
click.echo(f"Reading contacts from YAML file {original_file}")
for contact_dict in yaml_content[KEY_CONTACTS] or []:
contact = Contact(None, contact_dict)
structured_contacts.append(contact.as_dict())
# Preserve existing extra YAML content
output_dict.update(yaml_content)
else:
click.echo(f"Reading contacts from text file {original_file}")
for raw_contact_string in original_file.read_text().split(CONTACT_SEPARATOR):
contact = Contact(raw_contact_string)
structured_contacts.append(contact.as_dict())
output_file = original_file.with_suffix(".yaml")
output_dict[KEY_CONTACTS] = structured_contacts
if output_file == original_file:
if not strict:
flags = (
"ignore-case",
"ignore-tab-expansion",
"ignore-space-change",
"ignore-all-space",
"ignore-blank-lines",
)
ignore_flags = " --" + " --".join(flags)
else:
ignore_flags = ""
with NamedTemporaryFile() as fp:
yaml.dump(output_dict, fp)
diff = shell(f"colordiff --unified{ignore_flags} {original_file} {fp.name}", quiet=True)
if diff.returncode == 0:
click.secho("Skipping file, content has not changed", fg="green")
continue
if not click.confirm("Replace this file?", default=False):
continue
verb = "Replacing"
else:
verb = "Creating"
click.secho(f"{verb} contacts on file {output_file}", fg="yellow")
yaml.dump(output_dict, output_file)
|
wagnerandreoli/clitools | src/clib/types.py | """Types."""
from pathlib import Path
from typing import Any, Dict, Union
JsonDict = Dict[str, Any]
PathOrStr = Union[Path, str]
|
adellewigley/Spooky-Southampton | ghostmap.py | from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html')
@app.route("/404")
def page2():
return render_template('page2.html')
if __name__ == "__main__":
app.run()
|
WilliamKMLai/ansible-scGalaxy | galaxy/files/galaxy/tools/scATAC_debarcode.py | <reponame>WilliamKMLai/ansible-scGalaxy<filename>galaxy/files/galaxy/tools/scATAC_debarcode.py
#!/usr/bin/env python
#https://raw.githubusercontent.com/epigen-UCSD/snATAC_pipeline/master/bin/scATAC_debarcode
import gzip
import bz2
import sys
import collections
import os
import operator
import os.path
import optparse
def main():
""" main function """
parser = optparse.OptionParser(usage='%prog [-h] [-a I1.fastq] [-b I2.fastq] [-c R1.fastq]',
description='Decomplex single-cell ATAC-seq barcode allowing mismatch.')
parser.add_option('-a',
dest="I1",
help='I1.fastq.gz'
)
parser.add_option('-b',
dest='I2',
help='I2.fastq.gz'
)
parser.add_option('-c',
dest='R1',
help='R1.fastq.gz'
)
parser.add_option('--version',
dest="version",
default=1.0,
type="float",
)
if len(sys.argv) < 6:
parser.print_help()
exit('error: too few arguments')
args = parser.parse_args()[0]
fi1_name = args.I1
fi2_name = args.I2
fr1_name = args.R1
if not os.path.isfile(fi1_name): exit("error: \'%s\' not exist" % fi1_name)
if not os.path.isfile(fi2_name): exit("error: \'%s\' not exist" % fi2_name)
if not os.path.isfile(fr1_name): exit("error: \'%s\' not exist" % fr1_name)
fi1 = gzip.open(fi1_name, 'rt')
fi2 = gzip.open(fi2_name, 'rt')
fr1 = gzip.open(fr1_name, 'rt')
# Galaxy enforces a GZIP file format
# # check file format
# if fi1_name.endswith('.gz'):
# fi1 = gzip.open(fi1_name, 'rb')
# elif fi1_name.endswith('.bz2'):
# fi1 = bz2.BZ2File(fi1_name, 'r')
# elif fi1_name.endswith('.fastq'):
# fi1 = open(fi1_name, 'r')
# elif fi1_name.endswith('.fq'):
# fi1 = open(fi1_name, 'r')
#
# if fi2_name.endswith('.gz'):
# fi2 = gzip.open(fi2_name, 'rb')
# elif fi2_name.endswith('.bz2'):
# fi2 = bz2.BZ2File(fi2_name, 'r')
# elif fi2_name.endswith('.fastq'):
# fi2 = open(fi2_name, 'r')
# elif fi2_name.endswith('.fq'):
# fi2 = open(fi2_name, 'r')
#
# if fr1_name.endswith('.gz'):
# fr1 = gzip.open(fr1_name, 'rb')
# elif fr1_name.endswith('.bz2'):
# fr1 = bz2.BZ2File(fr1_name, 'r')
# elif fr1_name.endswith('.fastq'):
# fr1 = open(fr1_name, 'r')
# elif fr1_name.endswith('.fq'):
# fr1 = open(fr1_name, 'r')
while True:
cur_i1_name = fi1.readline().strip()[1:]
cur_i1_read = fi1.readline().strip()
cur_i1_plus = fi1.readline().strip()
cur_i1_qual = fi1.readline().strip()
cur_i2_name = fi2.readline().strip()[1:]
cur_i2_read = fi2.readline().strip()
cur_i2_plus = fi2.readline().strip()
cur_i2_qual = fi2.readline().strip()
cur_r1_name = fr1.readline().strip()[1:]
cur_r1_read = fr1.readline().strip()
cur_r1_plus = fr1.readline().strip()
cur_r1_qual = fr1.readline().strip()
if cur_i1_name == "" or cur_i2_name == "" or cur_r1_name == "": break
if not (cur_i1_name.split()[0] == cur_i2_name.split()[0] == cur_r1_name.split()[0]): sys.exit("error(main): read name not matched")
cur_r7 = cur_i1_read[:8]
cur_i7 = cur_i1_read[-8:]
cur_i5 = cur_i2_read[:8]
cur_r5 = cur_i2_read[-8:]
cur_barcode = cur_r7 + cur_i7 + cur_i5 + cur_r5
if cur_barcode.count('N') >= 12: continue
try:
print('@' + cur_barcode + ':' + cur_r1_name)
print(cur_r1_read)
print('+')
print(cur_r1_qual)
except IOError:
try:
sys.stdout.close()
except IOError:
pass
try:
sys.stderr.close()
except IOError:
pass
fi1.close()
fi2.close()
fr1.close()
if __name__ == '__main__':
main()
|
WilliamKMLai/ansible-scGalaxy | galaxy/files/galaxy/tools/scATAC_barcode_err_correct.py | #!/usr/bin/env python
import sys
import collections
import os
import optparse
def min_dist(s, sl):
""" return the string with min edit distance """
ss = sl[:]
if len(s) == 0: sys.exit("error(min_dist): inquiry string has length 0")
if len(ss) == 0: sys.exit("error(min_dist): ref string lib has 0 elements")
if ([len(s) == len(sj) for sj in ss].count(False) > 0): sys.exit("error(min_dist): different string length")
dists = [[a == b for (a,b) in zip(s, sj)].count(False) for sj in ss]
min_value = min(dists)
min_index = dists.index(min(dists))
min_s = ss[min_index]
# find the 2nd min element in the list
del dists[min_index]
del ss[min_index]
min_value2 = min(dists)
min_index2 = dists.index(min(dists))
min_s2 = ss[min_index2]
return (min_s, min_value, min_s2, min_value2)
def main():
""" main function """
parser = optparse.OptionParser(usage='%prog [-h] [-m mismatches allowed] [-a r7_ATAC] [-b i7_ATAC] [-c i5_ATAC] [-d r5_ATAC] [-o Output Barcode ID]',
description='Barcode error correction single-cell ATAC-seq allowing mismatch.')
parser.add_option('-m',
dest="mismatch",
help='Mismatches allowed',
type="int",
)
parser.add_option('-a',
dest="r7",
help='r7 Barcodes'
)
parser.add_option('-b',
dest="i7",
help='i7 Barcodes'
)
parser.add_option('-c',
dest="i5",
help='i5 Barcodes'
)
parser.add_option('-d',
dest="r5",
help='r5 Barcodes'
)
parser.add_option('-o',
dest="output",
help='Uniq barcode list'
)
if len(sys.argv) < 12:
parser.print_help()
exit('error: too few arguments')
args = parser.parse_args()[0]
max_mm = args.mismatch
r7_ATAC = args.r7
i7_ATAC = args.i7
i5_ATAC = args.i5
r5_ATAC = args.r5
output = args.output
table_r7 = [x.strip() for x in open(r7_ATAC).readlines()]
table_i7 = [x.strip() for x in open(i7_ATAC).readlines()]
table_i5 = [x.strip() for x in open(i5_ATAC).readlines()]
table_r5 = [x.strip() for x in open(r5_ATAC).readlines()]
if len(table_r7) == 0: sys.exit("error(main): r7 table has 0 elements")
if len(table_i7) == 0: sys.exit("error(main): i7 table has 0 elements")
if len(table_r5) == 0: sys.exit("error(main): r5 table has 0 elements")
if len(table_i5) == 0: sys.exit("error(main): i5 table has 0 elements")
uniqBarcode = set()
for line in sys.stdin:
# head of bam file
if line[0] == '@':
try:
print(line,end='')
except IOError:
try:
sys.stdout.close()
except IOError:
pass
try:
sys.stderr.close()
except IOError:
pass
continue
barcode = line.split()[0].split(':')[0]
cur_r7 = barcode[:8]
cur_i7 = barcode[8:16]
cur_i5 = barcode[16:24]
cur_r5 = barcode[24:]
#CTGAAGCT r7
#TAAGGCGA i7
#TCAGAGCC r5
#ATTATACG
#CGGCTATG r7
#CTCTCTAC i7
#GTCAGTAC r5
#AAGGCTAT i5
# skip this read if barcode has mismatch with r5 or r7
if not cur_r7 in table_r7: # if not perfectly matched
(opt_match, num_mm, opt_match2, num_mm2) = min_dist(cur_r7, table_r7)
if num_mm <= max_mm and abs(num_mm2 - num_mm) > 1:
cur_r7 = opt_match
else:
continue
if not cur_r5 in table_r5:
(opt_match, num_mm, opt_match2, num_mm2) = min_dist(cur_r5, table_r5)
if num_mm <= max_mm and abs(num_mm2 - num_mm) > 1:
cur_r5 = opt_match
else:
continue
if cur_i5 not in table_i5:
(opt_match, num_mm, opt_match2, num_mm2) = min_dist(cur_i5, table_i5)
if num_mm <= max_mm and abs(num_mm2 - num_mm) > 1:
cur_i5 = opt_match
else:
continue
if cur_i7 not in table_i7:
(opt_match, num_mm, opt_match2, num_mm2) = min_dist(cur_i7, table_i7)
if num_mm <= max_mm and abs(num_mm2 - num_mm) > 1:
cur_i7 = opt_match
else:
continue
# new barcode
barcode = cur_r7 + cur_i7 + cur_i5 + cur_r5
uniqBarcode.add(barcode)
try:
print(barcode + line[len(barcode):],end='')
except IOError:
try:
sys.stdout.close()
except IOError:
pass
try:
sys.stderr.close()
except IOError:
pass
with open(output, 'w') as fout:
for elem in uniqBarcode:
fout.write(elem + "\n")
if __name__ == '__main__':
main()
|
WilliamKMLai/ansible-scGalaxy | galaxy/files/galaxy/tools/scATAC_summary.py | #!/usr/bin/env python
import sys
import optparse
import gzip
import pysam
import statistics
def main():
""" main function """
parser = optparse.OptionParser(usage='%prog [-h] [-a Raw Reads] [-b Unique BAM] [-c Barcode-corrected BAM] [-d Unique Barcodes][-e PCR duplicated BAM] [-f Final BAM] [-g Minimum read threshold] [-i Final Barcode count]',
description='Generate scATAC-seq summary statistics.')
parser.add_option('-a',
dest="fastqc_raw",
help='FASTQC raw file',
)
parser.add_option('-b',
dest="uniq_bam",
help='Uniquely-mapped BAM file'
)
parser.add_option('-c',
dest="barcode_bam",
help='Barcode-corrected BAM file'
)
parser.add_option('-d',
dest="uniq_bar",
help='List of unique barcodes'
)
parser.add_option('-e',
dest="pcr_bam",
help='PCR de-duplicated BAM file'
)
parser.add_option('-f',
dest="final_bam",
help='Final processed BAM file'
)
parser.add_option('-g',
dest="min_reads",
help='Minimum read threshold'
)
parser.add_option('-i',
dest="final_barcode",
help='Final barcode utilization statistics file'
)
if len(sys.argv) < 16:
parser.print_help()
exit('error: too few arguments')
args = parser.parse_args()[0]
raw_reads = args.fastqc_raw
uniq_bam = args.uniq_bam
barcode_bam = args.barcode_bam
uniq_bar = args.uniq_bar
pcr_bam = args.pcr_bam
final_bam = args.final_bam
min_reads = int(args.min_reads)
final_barcode = args.final_barcode
print("================================ Summary ==================================")
readCount_RAW = 0
with open(raw_reads) as fb:
for i, line in enumerate(fb):
if line.startswith('Total Sequences'):
readCount_RAW = int(line.split()[2])
print("Total number of raw reads: "+str(readCount_RAW))
readCount = pysam.AlignmentFile(uniq_bam, "rb").mapped
print("Uniquely mapped reads (MAPQ>=30): " + str(readCount))
readCount_BAR = pysam.AlignmentFile(barcode_bam, "rb").mapped
print("Reads left after barcode correction: " + str(readCount_BAR))
with open(uniq_bar) as fb:
for barcount, line in enumerate(fb):
pass
barcount = barcount + 1
print("Unique barcode combinations detected: " + str(barcount))
readCount_PCR = pysam.AlignmentFile(pcr_bam, "rb").mapped
dupRate = round(((readCount_BAR - readCount_PCR) / readCount_BAR) * 100,2)
print("Estimated PCR duplication rate: " + str(dupRate))
readCount = pysam.AlignmentFile(final_bam, "rb").mapped
print("Total number of reads left: " + str(readCount))
readStats = []
with open(final_barcode) as fb:
for count, line in enumerate(fb):
if int(line.split()[1]) > min_reads:
readStats.append(int(line.split()[1]))
print("Number of cells with more than "+ str(min_reads) + " reads: " + str(len(readStats)))
print("Min number of reads for selected cells: " + str(min(readStats)))
print("Median number of reads for selected cells: " + str(statistics.median(readStats)))
print("Max number of reads for selected cells: "+ str(max(readStats)))
if __name__ == '__main__':
main()
|
WilliamKMLai/ansible-scGalaxy | galaxy/files/galaxy/tools/scATAC_parseDedup.py | #!/usr/bin/env python
import os
import sys
import collections
import optparse
import pysam
def main():
""" main function """
parser = optparse.OptionParser(usage='%prog [-h] [-m Min reads per cell]',
description='Parse individual cells from scATAC-seq data.')
parser.add_option('-m',
dest="min",
help='Minimum number of reads',
type="int"
)
if len(sys.argv) < 2:
parser.print_help()
exit('error: too few arguments')
args = parser.parse_args()[0]
min_reads = args.min
os.mkdir('output')
prev_barcode = ""
instances = []
heads = []
for line in sys.stdin:
if line.split()[0]=='@SQ' or line.split()[0]=='@PG':
heads.append(line)
else:
cur_barcode = line.strip().split()[0].split(':')[0]
if cur_barcode != prev_barcode:
if(len(instances) >= min_reads) and prev_barcode != "":
with open(os.path.join('output', prev_barcode + '.sam'), 'w') as fout:
for elem in heads:
fout.write(elem)
for elem in instances:
fout.write(elem)
prev_barcode = cur_barcode
instances = []
instances.append(line)
else:
instances.append(line)
os.chdir('output')
for file in os.listdir():
if file.endswith('.sam'):
barID = file.split(".")[0]
pysam.fixmate("-m", file, barID + "_fixmate.bam")
pysam.sort("-o", barID + "_sort.bam", barID + "_fixmate.bam")
pysam.markdup("-r", barID + "_sort.bam", barID + ".bam")
os.remove(file)
os.remove(barID + "_fixmate.bam")
os.remove(barID + "_sort.bam")
if __name__ == '__main__':
main()
|
WilliamKMLai/ansible-scGalaxy | galaxy/files/galaxy/tools/scATAC_filterCellnumber.py | #!/usr/bin/env python
import sys
import optparse
def main():
""" main function """
parser = optparse.OptionParser(usage='%prog [-h] [-m Minimum Reads] [-f Barcode Frequency]',
description='Filter BAM file, retaining cells with minimum number of associated reads.')
parser.add_option('-m',
dest="min_reads",
help='Minimum reads to retain cell',
type="int",
)
parser.add_option('-f',
dest="freq",
help='Barcode frequency'
)
if len(sys.argv) < 4:
parser.print_help()
exit('error: too few arguments')
args = parser.parse_args()[0]
min_cov = args.min_reads
fname = args.freq
barcode_list = []
# load in the barcode
with open(fname) as fin:
for line in fin:
[barcode, num] = line.split()
num = int(num)
if num >= min_cov: barcode_list.append(barcode)
# iterate sam file
for line in sys.stdin:
# head of bam file
if line[0] == '@':
try:
print(line,end='')
except IOError:
try:
sys.stdout.close()
except IOError:
pass
try:
sys.stderr.close()
except IOError:
pass
continue
barcode_cur = line.split()[0].split(":")[0]
if barcode_cur in barcode_list:
try:
print(line,end='')
except IOError:
try:
sys.stdout.close()
except IOError:
pass
try:
sys.stderr.close()
except IOError:
pass
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.