repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
Stimela | Stimela-master/stimela/cargo/cab/tigger_tag/src/run.py | import os
import sys
import glob
import subprocess
import yaml
import shutil
import shlex
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
params = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value in [False, None]:
continue
if value is True:
value = ""
# Positional arguments
if name == 'input-skymodel':
inlsm = value
continue
elif name == 'tag':
tag = value
continue
params[name] = value
# TODO: Need fix tigger-tag, these kludges are annoying
if params.pop('transfer-tags', False) in [True, ""]:
if params.get('tolerance', None) is None:
raise RuntimeError(
"Parameter 'tolerance' is required when 'transfer-tags' is enables")
args = [
'{0}transfer-tags {1}:{2}'.format(cab['prefix'], inlsm, params.pop('tolerance'))]
inlsm = params.get('output')
else:
args = []
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)
for name, value in params.iteritems()]
_runc = " ".join([cab.binary, inlsm, tag] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,657 | 23.746269 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_fluxscale/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
import os
print(f"Running CASA task '{config.binary}'")
save_result = parameters_dict.pop("save_result", None)
overwrite = parameters_dict.pop("overwrite", False)
fluxtable = parameters_dict['fluxtable']
if overwrite:
os.system(f"rm -fr {fluxtable}")
task = crasa.CasaTask(config.binary, save_result=save_result, **parameters_dict)
task.run()
| 461 | 27.875 | 80 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_oldsplit/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_listobs/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa47_polcal/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
from casacore.tables import table
import numpy
import glob
import yaml
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
gtab = args["caltable"]
if not os.path.exists(gtab):
raise RuntimeError("The gaintable was not created. Please refer to CASA {0:s} logfile for further details".format(cab["binary"]))
tab = table(gtab)
field_ids = numpy.unique(tab.getcol("FIELD_ID"))
tab.close()
tab = table(gtab+"::FIELD")
field_names = tab.getcol("NAME")
tab.close()
field_in = args["field"].split(",")
try:
ids = map(int, field_in)
except ValueError:
ids = map(lambda a: field_names.index(a), field_in)
if not set(ids).intersection(field_ids):
raise RuntimeError("None of the fields has solutions after the calibration. Please refer to CASA the {} logfile for further details".format(cab["binary"]))
| 1,666 | 24.257576 | 159 | py |
Stimela | Stimela-master/stimela/cargo/cab/rmsynth3d/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parse_parameters, prun
# If a list of fields is given, insert them as repeated arguments.
# Other arguments not allowed to be lists.
args = [config.binary] + parse_parameters(repeat=True,
positional=["fitsq", "fitsu", "freqs"], mandatory=["fitsq", "fitsu", "freqs"])
# run the command
if prun(args) != 0:
sys.exit(1)
| 438 | 30.357143 | 120 | py |
Stimela | Stimela-master/stimela/cargo/cab/tricolour/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parse_parameters, prun
# If a list of fields is given, insert them as repeated arguments.
# Other arguments not allowed to be lists.
args = [config.binary] + parse_parameters(repeat=None,
positional=["ms"], mandatory=["ms"],
repeat_dict={'field-names':','})
# run the command
if prun(args) != 0:
sys.exit(1)
| 471 | 30.466667 | 78 | py |
Stimela | Stimela-master/stimela/cargo/cab/sharpener/src/run.py | import os
import sys
import yaml
import sharpener
import glob
import shlex
import subprocess
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
msname = None
pkg_path = os.path.dirname(os.path.realpath(sharpener.__file__))
sharpener_file = '{:s}/sharpener_default.yml'.format(pkg_path)
with open(sharpener_file) as f:
list_doc = yaml.load(f)
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
for key, val in list_doc.items():
if type(val) == dict:
for k1, v1 in val.items():
if 'enable' in name:
if key in name:
list_doc[key]['enable'] = value
elif k1 == name:
list_doc[key][k1] = value
else:
if key == name:
list_doc[key] = value
# Get the relative path from workdir
list_doc['general']['contname'] = os.path.relpath(
list_doc['general']['contname'], list_doc['general']['workdir'])
list_doc['general']['cubename'] = os.path.relpath(
list_doc['general']['cubename'], list_doc['general']['workdir'])
list_doc['source_catalog']['catalog_file'] = os.path.relpath(
list_doc['source_catalog']['catalog_file'], list_doc['general']['workdir'])
edited_file = 'sharpener_default.yml'
with open(edited_file, "w") as f:
yaml.dump(list_doc, f)
_runc = "run_sharpener -c %s" % edited_file
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 2,072 | 27.39726 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/tigger_restore/src/run.py | import os
import sys
import subprocess
import glob
import yaml
import shlex
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value in [False, None]:
continue
if name in 'restoring-beam scale'.split() and hasattr(value, '__iter__'):
value = ','.join(value)
if value is True:
value = ""
if name == 'f':
args.append('-f')
continue
# Positional arguments
if name == 'input-image':
inim = value
continue
elif name == 'input-skymodel':
inlsm = value
continue
elif name == 'output-image':
outim = value
continue
args.append('{0}{1} {2}'.format(cab['prefix'], name, value))
_runc = " ".join([cab['binary']] + args + [inim, inlsm, outim])
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,455 | 21.4 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/sunblocker/src/run.py | import sys
import os
from sunblocker.sunblocker import Sunblocker
import inspect
import yaml
import subprocess
import glob
import shlex
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if name == "command":
function = value
continue
if value is None:
continue
args[name] = value
args['showdir'] = OUTPUT
run_func = getattr(Sunblocker(), function, None)
if run_func is None:
raise RuntimeError(
"Function '{}' is not part of Sunblocker()".format(function))
func_args = inspect.getargspec(run_func)[0]
for arg in args.keys():
if arg not in func_args:
args.pop(arg, None)
try:
run_func(**args)
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,284 | 22.363636 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/ddfacet/src/run.py | import sys
import os
import astropy.io.fits as pyfits
import glob
import subprocess
import shutil
import shlex
import yaml
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
parset = None
for param in cab['parameters']:
name = param['name']
value = param['value']
if name == 'Parset' and value is not None:
parset = value
continue
if name == 'Parset' and value is None:
continue
if name == 'Noise-Image' and value is None:
continue
if isinstance(value, list):
arg = "{0}{1} {2}".format(cab['prefix'], name, ",".join(value))
else:
arg = '{0}{1} {2}'.format(cab['prefix'], name, value)
args.append(arg)
removed = False
for item1 in args:
if 'Noise-Image' in item1:
noise_image = item1.split('{0}Noise-Image '.format(cab['prefix']))[-1]
args.remove('{0}Noise-Image {1}'.format(cab['prefix'], noise_image))
noise_hdu = pyfits.open(noise_image)
noise_data = noise_hdu[0].data
noise_std = noise_data.std()
noise_hdu.close()
for item2 in args:
if 'Noise-Sigma' in item2:
noise_sigma = item2.split(
'{0}Noise-Sigma '.format(cab['prefix']))[-1]
args.remove(
'{0}Noise-Sigma {1}'.format(cab['prefix'], noise_sigma))
removed = True
threshold = float(noise_sigma)*noise_std
for item3 in args:
if '{0}Deconv-FluxThreshold'.format(cab['prefix']) in item3:
args.remove(item3)
args.append(
'{0}Deconv-FluxThreshold {1}'.format(cab['prefix'], threshold))
if not removed:
args.remove('{0}Noise-Sigma 3.0'.format(cab['prefix']))
if parset is not None:
args.insert(0, parset)
_runc = " ".join([cab['binary']] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 2,478 | 28.164706 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/cleanmask/src/run.py | import os
import sys
import shlex
import shutil
import yaml
import glob
import subprocess
OUTPUT = os.environ["OUTPUT"]
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
params = cab["parameters"]
args = []
for param in params:
if param['value'] in [False, None]:
continue
elif param['value'] is True:
arg = "{0}{1}".format(cab["prefix"], param["name"])
else:
arg = "{0}{1} {2}".format(cab["prefix"], param["name"], param["value"])
args.append(arg)
_runc = " ".join([cab["binary"]] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,101 | 22.956522 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/eidos/src/run.py | import os
import sys
import shutil
import subprocess
import shlex
import yaml
import glob
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
msname = None
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
continue
elif value is True:
value = ''
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
_runc = " ".join([cab["binary"]] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 1,082 | 21.5625 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/curl/src/run.py | import os
import sys
import shutil
import shlex
import glob
import subprocess
import yaml
CONFIG = os.environ["CONFIG"]
OUTPUT = os.environ["OUTPUT"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
url = None
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
continue
elif value is True:
value = ''
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
_runc = " ".join([cab["binary"]] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,042 | 21.673913 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/ragavi/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parse_parameters, prun
args = [config.binary] + parse_parameters(repeat=" ")
# run the command
if prun(args) != 0:
sys.exit(1)
| 207 | 16.333333 | 53 | py |
Stimela | Stimela-master/stimela/cargo/cab/chgcentre/src/run.py | import os
import sys
import glob
import yaml
import shutil
import shlex
import subprocess
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
continue
if value is bool:
args += ['{0}{1}'.format(cab['prefix'], name)]
else:
args += ['{0}'.format(value)]
_runc = " ".join([cab["binary"]] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 1,088 | 21.6875 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/wsclean/src/run.py | import os
import sys
import re
import yaml
import subprocess
import shlex
import glob
import shutil
CONFIG = os.environ['CONFIG']
INPUT = os.environ['INPUT']
OUTPUT = os.environ['OUTPUT']
MSDIR = os.environ['MSDIR']
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
params = cab['parameters']
junk = cab["junk"]
args = []
for param in params:
name = param['name']
value = param['value']
if name == 'msname':
if isinstance(value, str):
mslist = value
else:
mslist = ' '.join(value)
continue
if value in [None, False]:
continue
elif name == "datacolumn":
name = "data-column"
elif name == 'scale':
if isinstance(value, (int, float)):
value = '{0}asec'.format(value)
elif name in 'size trim nwlayers-for-size beam-shape channel-range interval restore restore-list shift'.split():
if isinstance(value, int):
value = '{0} {0}'.format(value)
elif hasattr(value, '__iter__'):
if len(value) == 1:
value.append(value[0])
value = ' '.join(map(str, value))
elif name in 'spws multiscale-scales pol'.split():
if hasattr(value, '__iter__'):
value = ','.join(map(str, value))
if value is True:
arg = '{0}{1}'.format(cab['prefix'], name)
else:
arg = '{0}{1} {2}'.format(cab['prefix'], name, value)
args.append(arg)
_runc = " ".join([cab["binary"]] + args + [mslist])
# This line must never be deleted again. Empires will rise and fall, and certain students will graduate, but this line MUST LIVE ON. When Stimela,
# Caracal, MeerKAT and SKA are long forgotten ancient history, this line MUST REMAIN. Let it be the last remaining line of Python in history, but let it remain!
print("running WSClean: "+_runc)
sys.stdout.flush()
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 2,324 | 28.0625 | 160 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_plotants/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_setjy/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_virtualconcat/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 927 | 21.634146 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_clearcal/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_script/src/run.py | import os
import sys
import yaml
import shlex
import shutil
import subprocess
import glob
CONFIG = os.environ["CONFIG"]
OUTPUT = os.environ["OUTPUT"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
msname = None
custom_script = "print(\"Nothing has been done\")"
for param in cab['parameters']:
name = param['name']
value = param['value']
if name == "script":
custom_script = value
continue
if value is None:
continue
elif value is False:
continue
elif value is True:
value = ''
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
with open("casajob.py.last", "w") as f:
f.write(custom_script)
_runc = " ".join([cab['binary']] + ["-c", "casajob.py.last"] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,266 | 22.036364 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/pyddi/src/run.py | import os
import sys
import subprocess
import yaml
import glob
import shutil
import shlex
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif value is False:
continue
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
_runc = " ".join([cab["binary"]] + args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 1,025 | 21.8 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/halo-fdca/src/run.py | import os
import sys
import shlex
import shutil
import subprocess
import yaml
import glob
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
elif name in ['object', 'd_file']:
args += [value]
else:
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
_runc = " ".join([cab["binary"]]+ args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,022 | 22.25 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/fitstool/src/run.py | import os
import sys
import shutil
import shlex
import subprocess
import shutil
import glob
import yaml
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
inimage = None
outimage = None
stack = False
unstack = False
axis = None
chunk = 1
file_pattern = False
for param in cab['parameters']:
value = param['value']
name = param['name']
if value in [None, False]:
continue
if name == 'image':
inimage = ' '.join(value)
continue
elif name == 'output':
outimage = value
continue
elif name == 'stack':
stack = True
continue
elif name == 'unstack':
unstack = True
continue
elif name == 'unstack-chunk':
chunk = value
continue
elif name == 'fits-axis':
axis = value
continue
elif name == 'file_pattern':
value = '"%s"' % value
file_pattern = True
elif value is True:
value = ""
args.append('{0}{1} {2}'.format(cab['prefix'], name, value))
if stack and axis:
args.append('{0}stack {1}:{2}'.format(cab['prefix'], outimage, axis))
outimage = None
elif unstack and axis:
args.append('{0}unstack {1}:{2}:{3}'.format(
cab['prefix'], outimage, axis, chunk))
outimage = None
else:
outimage = '{0}output {1}'.format(cab['prefix'], outimage)
if file_pattern:
inimage = ""
_runc = " ".join([cab['binary']] + args + [inimage, outimage or ""])
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 2,017 | 21.674157 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_imregrid/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 927 | 21.634146 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_fixvis/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/flagstats/src/run.py | import sys
import os
from MSUtils import flag_stats
import inspect
import glob
import shutil
import yaml
import codecs
import json
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if name in ["fields", "antennas"] and value is not None:
try:
value = list(map(int, value))
except ValueError:
pass
args[name] = value
try:
if args['plot']:
args.pop("plot")
flag_stats.plot_statistics(**args)
else:
args.pop("plot")
args.pop("htmlfile")
flag_stats.save_statistics(**args)
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,176 | 23.020408 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/pycasacore/src/run.py | import os
import sys
import tempfile
import shlex
import shutil
import yaml
import glob
import subprocess
CONFIG = os.environ["CONFIG"]
OUTPUT = os.environ["OUTPUT"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
msname = None
custom_script = "print(\"Nothing has been done\")"
for param in cab['parameters']:
name = param['name']
value = param['value']
if name == "script":
custom_script = value
continue
with tempfile.NamedTemporaryFile(suffix=".py") as tfile:
tfile.write(custom_script)
tfile.flush()
_runc = " ".join([cab["binary"], tfile.name])
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,159 | 22.673469 | 95 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_flagdata/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_ft/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_concat/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_statwt/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 927 | 21.634146 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_polcal/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
save_result = parameters_dict.pop("save_result", None)
task = crasa.CasaTask(config.binary, save_result=save_result, **parameters_dict)
task.run()
| 307 | 27 | 80 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_polcal/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
from pyrap.tables import table
import os
import numpy
print(f"Running CASA task '{config.binary}'")
save_result = parameters_dict.pop("save_result", None)
task = crasa.CasaTask(config.binary, save_result=save_result, **parameters_dict)
task.run()
gtab = parameters_dict["caltable"]
if not os.path.exists(gtab):
raise RuntimeError(f"The gaintable was not created. Please refer to CASA {config.binary} logfile for further details")
tab = table(gtab)
field_ids = numpy.unique(tab.getcol("FIELD_ID"))
tab.close()
tab = table(gtab+"::FIELD")
field_names = tab.getcol("NAME")
tab.close()
field_in = parameters_dict["field"].split(",")
try:
ids = list(map(int, field_in))
except ValueError:
ids = list(map(lambda a: field_names.index(a), field_in))
if not set(ids).issubset(field_ids):
raise RuntimeError(f"Some field(s) do not have solutions after the calibration. Please refer to CASA {config.binary} logfile for further details")
| 1,062 | 28.527778 | 150 | py |
Stimela | Stimela-master/stimela/cargo/cab/equolver/src/run.py | #config -*- coding: future_fstrings -*-
import sys
from scabha import config, parse_parameters, prun
args = [config.binary] + parse_parameters(repeat=" ")
for i in range(len(args)):
if args[i] == '--verb':
val = args.pop(i+1)
if val == 'False':
args.pop(i)
# run the command
if prun(args) != 0:
sys.exit(1)
| 355 | 19.941176 | 53 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_rmtables/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_gaincal/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
from pyrap.tables import table
import os
import numpy
print(f"Running CASA task '{config.binary}'")
save_result = parameters_dict.pop("save_result", None)
task = crasa.CasaTask(config.binary, save_result=save_result, **parameters_dict)
task.run()
gtab = parameters_dict["caltable"]
if not os.path.exists(gtab):
raise RuntimeError(f"The gaintable was not created. Please refer to CASA {config.binary} logfile for further details")
tab = table(gtab)
field_ids = numpy.unique(tab.getcol("FIELD_ID"))
tab.close()
tab = table(gtab+"::FIELD")
field_names = tab.getcol("NAME")
tab.close()
field_in = parameters_dict["field"].split(",")
try:
ids = list(map(int, field_in))
except ValueError:
ids = list(map(lambda a: field_names.index(a), field_in))
if not set(ids).issubset(field_ids):
raise RuntimeError(f"Some field(s) do not have solutions after the calibration. Please refer to CASA {config.binary} logfile for further details")
| 1,062 | 28.527778 | 150 | py |
Stimela | Stimela-master/stimela/cargo/cab/sofia2/src/run.py | import os
import sys
import Tigger
import numpy
import tempfile
import json
import codecs
import shlex
import shutil
import glob
import subprocess
from astLib.astWCS import WCS
from astropy.io.votable import parse_single_table
from Tigger.Models import SkyModel, ModelClasses
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with codecs.open(CONFIG, "r", "utf8") as stdr:
cab = json.load(stdr)
junk = cab["junk"]
args = []
msname = None
sofia_file = 'sofia_parameters.par'
wstd = open(sofia_file, 'w')
wstd.write('output.directory={:s}\n'.format(OUTPUT))
port2tigger = False
image = None
for param in cab['parameters']:
name = param['name']
value = param['value']
dtype = param['dtype']
# Fix the sofia issue of needing lowercase booleans.
if dtype == 'bool':
if (value == True) and (not name == 'port2tigger'):
value = 'true'
elif (not name == 'port2tigger'):
value = 'false'
if value is None:
continue
if name == "port2tigger":
port2tigger = value
continue
if name == "output.writeCatXML":
writecat = value
if name == "parameter.enable":
parameterise = value
if name == "input.data":
image = value
wstd.write('{0}={1}\n'.format(name, value))
wstd.close()
_runc = " ".join([cab['binary'], sofia_file])
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
if not port2tigger:
sys.exit(0)
# convert to data file to Tigger LSM
# First make dummy tigger model
tfile = tempfile.NamedTemporaryFile(suffix='.txt')
tfile.flush()
if image and writecat and parameterise:
pass
else:
sys.exit(0)
prefix = os.path.splitext(image)[0]
tname_lsm = prefix + ".lsm.html"
with open(tfile.name, "w") as stdw:
stdw.write("#format:name ra_d dec_d i emaj_s emin_s pa_d\n")
model = Tigger.load(tfile.name)
tfile.close()
def tigger_src(src, idx):
name = "SRC%d" % idx
flux = ModelClasses.Polarization(src["f_sum"], 0, 0, 0)
ra = numpy.deg2rad(src["ra"])
dec = numpy.deg2rad(src["dec"])
pos = ModelClasses.Position(ra, dec)
ex = numpy.deg2rad(src["ell_maj"])
ey = numpy.deg2rad(src["ell_min"])
pa = numpy.deg2rad(src["ell_pa"])
print(name)
if ex and ey:
shape = ModelClasses.Gaussian(ex, ey, pa)
else:
shape = None
source = SkyModel.Source(name, pos, flux, shape=shape)
# Adding source peak flux (error) as extra flux attributes for sources,
# and to avoid null values for point sources I_peak = src["Total_flux"]
if shape:
source.setAttribute("I_peak", float(src["f_max"]))
else:
source.setAttribute("I_peak", float(src["f_sum"]))
return source
table = parse_single_table('{0}_cat.xml'.format(prefix))
data = table.array
for i, src in enumerate(data):
model.sources.append(tigger_src(src, i))
wcs = WCS(image)
centre = wcs.getCentreWCSCoords()
model.ra0, model.dec0 = map(numpy.deg2rad, centre)
model.save(tname_lsm)
# Rename using CORPAT
_runc = "tigger-convert %s --rename -f" % tname_lsm
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 3,932 | 24.705882 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa47_setjy/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 927 | 21.634146 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/montage/src/run.py | import os
import sys
import subprocess
import shlex
import shutil
import glob
import yaml
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTPUT = os.environ["OUTPUT"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
if os.path.exists(OUTPUT+'/mask_mosaic') == False:
os.mkdir(OUTPUT+'/mask_mosaic')
outdir = OUTPUT+'/mask_mosaic'
try:
make_table = " ".join(['mImgtbl', args['input_dir'], outdir+'/mosaic_table.tbl'])
subprocess.check_call(shlex.split(make_table))
make_header = " ".join(['mMakeHdr', outdir +
'/mosaic_table.tbl', outdir+'/mosaic_header.hdr'])
subprocess.check_call(shlex.split(make_header))
project_mosaic = " ".join(['mProjExec', '-p', args['input_dir'], outdir +
'/mosaic_table.tbl', outdir+'/mosaic_header.hdr', outdir, outdir+'/stats.tbl'])
subprocess.check_call(shlex.split(project_mosaic))
make_mosaic_table = ['mImgtbl', outdir, outdir+'/mosaic_table2.tbl']
subprocess.check_call(shlex.split(make_mosaic_table))
_runc = " ".join(['mAdd', '-p', args['input_dir'], outdir +
'/mosaic_table2.tbl', outdir+'/mosaic_header.hdr', OUTPUT+'/mosaic.fits'])
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 1,844 | 29.75 | 97 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa47_gaincal/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
from casacore.tables import table
import numpy
import glob
import yaml
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
gtab = args["caltable"]
if not os.path.exists(gtab):
raise RuntimeError("The gaintable was not created. Please refer to CASA {0:s} logfile for further details".format(cab["binary"]))
tab = table(gtab)
field_ids = numpy.unique(tab.getcol("FIELD_ID"))
tab.close()
tab = table(gtab+"::FIELD")
field_names = tab.getcol("NAME")
tab.close()
field_in = args["field"].split(",")
try:
ids = map(int, field_in)
except ValueError:
ids = map(lambda a: field_names.index(a), field_in)
if not set(ids).intersection(field_ids):
raise RuntimeError("None of the fields has solutions after the calibration. Please refer to CASA the {} logfile for further details".format(cab["binary"]))
| 1,666 | 24.257576 | 159 | py |
Stimela | Stimela-master/stimela/cargo/cab/cubical_pgs/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parameters_dict, prun, parse_parameters
"""
config:
contains the sections before parameters in params.json
.binary has the name of the binary to be executed
parameters_dict: dict
contains all the provided parameters, even the positional ones
parse_parameters: function
Forms a list containing all the provided arguments for execution.
This is a helper function that formats stuff so that you don't have to
prun: function
Execute your binary with the provided arguments
"""
args = [config.binary] + parse_parameters(parameters_dict)
# run the command
if prun(args) != 0:
sys.exit(1)
| 688 | 26.56 | 74 | py |
Stimela | Stimela-master/stimela/cargo/cab/pybdsm/src/run.py | import os
import sys
import re
import bdsf as bdsm # bdsm it is and bdsm it shall remain
import numpy
import Tigger
import tempfile
import astropy.io.fits as pyfits
import yaml
import shlex
import shutil
import glob
import subprocess
from astLib.astWCS import WCS
from Tigger.Models import SkyModel, ModelClasses
CONFIG = os.environ['CONFIG']
INPUT = os.environ['INPUT']
OUTPUT = os.environ['OUTPUT']
MSDIR = os.environ['MSDIR']
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
write_catalog = ['bbs_patches', 'bbs_patches_mask',
'catalog_type', 'clobber', 'correct_proj', 'format',
'incl_chan', 'incl_empty', 'srcroot', 'port2tigger', 'outfile']
img_opts = {}
write_opts = {}
# Spectral fitting parameters
freq0 = None
spi_do = False
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
if name in ['multi_chan_beam']:
multi_chan_beam = value
continue
if name in write_catalog:
write_opts[name] = value
elif name in ['freq0', 'frequency']:
freq0 = value
else:
img_opts[name] = value
if name == 'spectralindex_do':
spi_do = value
img_opts.pop('freq0', None)
if freq0 is None:
with pyfits.open(img_opts['filename']) as hdu:
hdr = hdu[0].header
for i in range(1, hdr['NAXIS']+1):
if hdr['CTYPE{0:d}'.format(i)].startswith('FREQ'):
freq0 = hdr['CRVAL{0:d}'.format(i)]
if spi_do and multi_chan_beam:
with pyfits.open(img_opts['filename']) as hdu:
hdr = hdu[0].header
beams = []
# Get a sequence of BMAJ with digit suffix from the image header keys
bmaj_ind = filter(lambda a: a.startswith('BMAJ')
and a[-1].isdigit(), hdr.keys())
for bmaj in bmaj_ind:
ind = bmaj.split('BMAJ')[-1]
beam = [hdr['{0:s}{1:s}'.format(b, ind)]
for b in 'BMAJ BMIN BPA'.split()]
beams.append(tuple(beam))
# parse beam info to pybdsm
img_opts['beam_spectrum'] = beams
image = img_opts.pop('filename')
filename = os.path.basename(image)
outfile = write_opts.pop('outfile')
for key, value in sorted(img_opts.items()):
sys.stderr.write("{:20}: {}\n".format(key, value))
sys.stderr.flush()
try:
img = bdsm.process_image(image, **img_opts)
port2tigger = write_opts.pop('port2tigger', True)
if port2tigger:
write_opts['format'] = 'fits'
img.write_catalog(outfile=outfile, **write_opts)
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
if not port2tigger:
sys.exit(0)
# convert to Gaul file to Tigger LSM
# First make dummy tigger model
tfile = tempfile.NamedTemporaryFile(suffix='.txt')
tfile.flush()
prefix = os.path.splitext(outfile)[0]
tname_lsm = prefix + ".lsm.html"
with open(tfile.name, "w") as stdw:
stdw.write("#format:name ra_d dec_d i q u v emaj_s emin_s pa_d\n")
model = Tigger.load(tfile.name)
tfile.close()
def tigger_src(src, idx):
name = "SRC%d" % idx
try:
flux = ModelClasses.Polarization(src["Total_flux"], src["Total_Q"],
src["Total_U"], src["Total_V"],
I_err=src["E_Total_flux"],
Q_err=src["E_Total_Q"],
U_err=src["E_Total_U"],
V_err=src["E_Total_V"])
except KeyError:
flux = ModelClasses.Polarization(src["Total_flux"], 0, 0, 0,
I_err=src["E_Total_flux"])
ra, ra_err = map(numpy.deg2rad, (src["RA"], src["E_RA"]))
dec, dec_err = map(numpy.deg2rad, (src["DEC"], src["E_DEC"]))
pos = ModelClasses.Position(ra, dec, ra_err=ra_err, dec_err=dec_err)
ex, ex_err = map(numpy.deg2rad, (src["DC_Maj"], src["E_DC_Maj"]))
ey, ey_err = map(numpy.deg2rad, (src["DC_Min"], src["E_DC_Min"]))
pa, pa_err = map(numpy.deg2rad, (src["PA"], src["E_PA"]))
if ex and ey:
shape = ModelClasses.Gaussian(
ex, ey, pa, ex_err=ex_err, ey_err=ey_err, pa_err=pa_err)
else:
shape = None
source = SkyModel.Source(name, pos, flux, shape=shape)
# Adding source peak flux (error) as extra flux attributes for sources,
# and to avoid null values for point sources I_peak = src["Total_flux"]
if shape:
source.setAttribute("I_peak", src["Peak_flux"])
source.setAttribute("I_peak_err", src["E_peak_flux"])
else:
source.setAttribute("I_peak", src["Total_flux"])
source.setAttribute("I_peak_err", src["E_Total_flux"])
if spi_do:
# Check if start frequency is provided if not provided raise error.
# It is used to define tigger source spectrum index frequency
if freq0:
spi, spi_err = (src['Spec_Indx'], src['E_Spec_Indx'])
source.spectrum = ModelClasses.SpectralIndex(spi, freq0)
source.setAttribute('spi_error', spi_err)
else:
raise RuntimeError("No start frequency (freq0) provided.")
return source
with pyfits.open(outfile) as hdu:
data = hdu[1].data
for i, src in enumerate(data):
model.sources.append(tigger_src(src, i))
wcs = WCS(image)
centre = wcs.getCentreWCSCoords()
model.ra0, model.dec0 = map(numpy.deg2rad, centre)
model.save(tname_lsm)
# Rename using CORPAT
_runc = "tigger-convert %s --rename -f" % tname_lsm
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 6,220 | 30.419192 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_fringefit/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/breizorro/src/run.py | import os
import sys
import shlex
import shutil
import subprocess
import yaml
import glob
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
if param['dtype'] in ['list:str', 'list:file', 'list:int', 'list:float']:
delimiter = param['delimiter']
args += ['{0}{1} {2}'.format(cab['prefix'], name, delimiter.join(value))]
elif param['dtype'] in ['bool']:
args += ['{0}{1}'.format(cab['prefix'], name)]
else:
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
_runc = " ".join([cab["binary"]]+ args)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,252 | 24.571429 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/politsiyakat_autocorr_amp/src/run.py | import sys
import os
import json
import yaml
import subprocess
import shlex
import shutil
import glob
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
tasksuite = None
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
kwargs = "'{}'".format(json.dumps(args))
ARGS = ["flag_autocorr_drifts",
"-s antenna_mod",
kwargs]
_runc = " ".join([cab['binary']] + ARGS)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,053 | 20.510204 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_clean/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
from pyrap.tables import table
import os
import sys
import numpy
import astropy.io.fits as pyfits
args = parameters_dict
print(f"Running CASA task '{config.binary}'")
noise_image = args.pop('noise_image', False)
if noise_image:
noise_sigma = args.pop('noise_sigma')
noise_hdu = pyfits.open(noise_image)
noise_data = noise_hdu[0].data
noise_std = noise_data.std()
threshold = noise_sigma*noise_std
args['threshold'] = '{}Jy'.format(threshold)
else:
args.pop('noise_sigma')
prefix = args['imagename']
port2fits = args.pop('port2fits', True)
keep_casa_images = args.pop("keep_casa_images", False)
task = crasa.CasaTask(config.binary, **args)
task.run()
nterms = args.get("nterms", 1)
images = ["flux", "model", "residual", "psf", "image"]
STD_IMAGES = images[:4]
convert = []
if port2fits:
for image in images:
img = "{:s}.{:s}".format(prefix, image)
if image == 'flux':
_images = [img]
elif nterms > 1:
_images = ["%s.tt%d" % (img, d) for d in range(nterms)]
if image == "image":
if nterms == 2:
alpha = img+".alpha"
alpha_err = img+".alpha.error"
_images += [alpha, alpha_err]
if nterms == 3:
beta = img+".beta"
beta_err = img+".beta.error"
_images += [beta, beta_err]
else:
_images = [img]
convert += _images
for _image in convert:
sys.stdout.write(_image)
if _image in STD_IMAGES and (not os.path.exists(_image)):
raise RuntimeError(
"Standard output from CASA clean task not found. Something went wrong durring cleaning, take a look at the logs and such")
elif os.path.exists(_image):
task = crasa.CasaTask(
"exportfits", **dict(imagename=_image, fitsimage=_image+".fits", overwrite=True))
task.run()
if not keep_casa_images:
for _image in convert:
os.system("rm -rf {}".format(_image))
| 2,154 | 29.352113 | 134 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_uvsub/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 927 | 21.634146 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa47_applycal/src/run.py | import os
import sys
import logging
import Crasa.Crasa as crasa
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
| 927 | 21.634146 | 91 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_split/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
print(f"Running CASA task '{config.binary}'")
task = crasa.CasaTask(config.binary, **parameters_dict)
task.run()
| 226 | 24.222222 | 55 | py |
Stimela | Stimela-master/stimela/cargo/cab/simms/src/run.py | # -*- coding: future_fstrings -*-
import sys
from scabha import config, parse_parameters, prun
# If a list of fields is given, insert them as repeated arguments.
# Other arguments not allowed to be lists.
args = [config.binary] + parse_parameters(repeat=True,
positional=["antenna-file"], mandatory=["antenna-file"])
# run the command
if prun(args) != 0:
sys.exit(1)
| 416 | 28.785714 | 98 | py |
Stimela | Stimela-master/stimela/cargo/cab/shadems_direct/src/run.py | # -*- coding: future_fstrings -*-
import sys, os, os.path
from scabha import log, config, parameters, prun_multi, OUTPUT
ms = os.path.abspath(parameters.ms)
os.chdir(OUTPUT)
errors = prun_multi([f"{config.binary} {ms} {args}" for args in parameters.args])
for cmd, exc in errors:
log.error(f"{cmd}: failed with return code {exc.returncode}")
if errors and not parameters.get('ignore_errors'):
sys.exit(1)
| 418 | 25.1875 | 81 | py |
Stimela | Stimela-master/stimela/cargo/cab/casa_bandpass/src/run.py | # -*- coding: future_fstrings -*-
import Crasa.Crasa as crasa
from scabha import config, parameters_dict, prun
from pyrap.tables import table
import os
import numpy
print(f"Running CASA task '{config.binary}'")
save_result = parameters_dict.pop("save_result", None)
task = crasa.CasaTask(config.binary, save_result=save_result, **parameters_dict)
task.run()
gtab = parameters_dict["caltable"]
if not os.path.exists(gtab):
raise RuntimeError(f"The gaintable was not created. Please refer to CASA {config.binary} logfile for further details")
tab = table(gtab)
field_ids = numpy.unique(tab.getcol("FIELD_ID"))
tab.close()
field_in = parameters_dict["field"].split(",")
try:
tab = table(gtab+"::FIELD")
field_names = tab.getcol("NAME")
tab.close()
except RuntimeError:
# possible new table format
# sadly Field name and Source name columns are empty
# will need to figure this out, but ignoring the tests for now
tab = table(gtab)
field_names = numpy.unique(tab.getcol("FIELD_NAME"))
tab.close()
pass
if field_names:
try:
ids = list(map(int, field_in))
except ValueError:
ids = list(map(lambda a: field_names.index(a), field_in))
if not set(ids).issubset(field_ids):
raise RuntimeError(f"Some field(s) do not have solutions after the calibration. Please refer to CASA {config.binary} logfile for further details")
| 1,399 | 30.111111 | 154 | py |
Stimela | Stimela-master/stimela/cargo/cab/mvftoms/src/run.py | import os
import sys
import glob
import subprocess
import shutil
import shlex
import yaml
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
MSDIR = os.environ["MSDIR"]
OUTDIR = os.environ["OUTPUT"]
HOME = os.environ["HOME"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = []
overwrite = False
for param in cab['parameters']:
value = param['value']
name = param['name']
if value in [None, False]:
continue
elif name == "overwrite":
overwrite = value
continue
elif value is True:
value = ""
elif name == 'mvffiles':
files = value
continue
elif name == "output-ms" and value:
ms = value
elif name == "credentials_dir" and value:
os.system("cp -rf {0:s} {1:s}/.aws".format(value, HOME))
continue
elif name == "archive-url":
files = value
continue
args += ['{0}{1} {2}'.format(cab['prefix'], name, value)]
if overwrite:
os.system("rm -fr {0:s}".format(ms))
_runc = " ".join([cab["binary"]] + args + files)
try:
subprocess.check_call(shlex.split(_runc))
finally:
for item in junk:
for dest in [OUTDIR, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
| 1,506 | 22.920635 | 91 | py |
SeeChart | SeeChart-main/gold_summary_update.py | import json
for i in range(87, 1062):
if i != 611 and i != 818 and i != 795 and i != 791:
print(str(i))
fileName = str(i)
f = open('static/generated/' + fileName + '.json')
found_data = json.load(f)
if "gold" in found_data:
found_data['gold'] = found_data['gold'].replace("\n", "")
gold_list = found_data['gold'].replace(" . ", ". + ")
gold_list = gold_list.split(" + ")
found_data['gold'] = gold_list
# print(gold_list)
print(found_data)
with open('static/generated/' + fileName + '.json', 'w') as f:
json.dump(found_data, f, indent=4)
| 693 | 23.785714 | 74 | py |
SeeChart | SeeChart-main/test.py | import csv
csv_file = csv.reader(open('recorded_data.csv', 'r'), delimiter=',')
url = "https://www.statista.com/statistics/755069/pubg-player-share/"
for row in csv_file:
if url == row[3]:
print(row[4])
| 218 | 20.9 | 69 | py |
SeeChart | SeeChart-main/utility.py | import csv
import os
import shutil
from datetime import datetime
import random
import string
import base64
# from PIL import Image
from io import BytesIO
import json
import requests
import io
from BaselineSummarizer import summarize
url_name = ""
def make_directory(name):
if os.path.exists(name):
print("Directory exists already")
else:
try:
os.mkdir(name)
except OSError:
print("Creation of the directory %s failed" % name)
else:
print("Successfully created the directory %s " % name)
def write_on_csv(name, data):
with open(name + ".csv", 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow(data)
def save_image_from_url(label, url, path):
response = requests.get(url)
file = open(path + label + ".png", "wb")
file.write(response.content)
file.close()
def check_in_csv(path, value, column):
# print("check_in_csv")
with open(path + ".csv", "r") as f:
reader = csv.reader(f)
for line_num, content in enumerate(reader):
# print("C O N T E N T :" + content[1])
if content[column] == value:
# print(content, line_num + 1)
return True
return False
def write_as_JSON(name, data):
with open(name + '.json', 'w') as outfile:
# json.dump(data, outfile)
p_data = json.dumps(data, indent=4, sort_keys=True)
outfile.write(p_data)
def get_random_label():
p1 = ''.join(random.choice(string.ascii_letters) for i in range(5))
p2 = ''.join(random.choice(string.digits) for i in range(5))
label = p1 + p2
return label
def write_image(name, imgBase64):
im = Image.open(BytesIO(base64.b64decode(imgBase64)))
im.save(name + '.png', 'PNG')
def make_JSON(data):
# print(len(data['d3data']))
# print(data['url'])
name = get_random_label()
global url_name
url_name = data['url']
make_directory(os.getcwd() + "\\Data\\D3JSONData")
write_on_csv(os.getcwd() + "\\Data\\D3JSONData\\deconstructedPageList", [name, data['url'], data['scrap_date']])
write_as_JSON(os.getcwd() + "\\Data\\D3JSONData\\" + name + "_RAW", data)
reshaped_data = reshape_JSON(data)
if reshaped_data == "Error":
return "Error"
else:
return "Success"
def reshape_JSON(data):
lenCheck = (data['d3data'])
chart_type = ""
x_axis_label = ""
y_axis_label = ""
node_id = 0
# print("C H E C K L E N G T H --> " + str(len(lenCheck)))
if len(lenCheck) == 0:
if os.path.exists(os.getcwd() + "\\Data\\test\\" + "testData.txt"):
os.remove(os.getcwd() + "\\Data\\test\\" + "testData.txt")
print("testData.txt deleted")
if os.path.exists(os.getcwd() + "\\static\\generated\\" + "0_SHAPED.json"):
# os.remove(os.getcwd() + "\\static\\generated\\" + "0_SHAPED.json")
folder = os.getcwd() + "\\static\\generated\\"
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
print("Error occurred during deletion.")
return "Error"
else:
print("C H E C K L E N G T H --> " + str(len(lenCheck)))
for key in range(len(lenCheck)):
print("K E Y -> " + str(key))
temp = (data['d3data'][key]['schematized'])
# temp = (data['d3data'][0]['schematized'])
axes = []
chart_found = False
for i in range(len(temp)):
if str(temp[i]["markType"]) == "rect":
chart_type = "bar"
chart_found = True
node_id = i
temp2 = data['d3data'][key]['schematized'][node_id]['data']
for j in temp2:
if not j.startswith('_deriv_'):
axes.append(j)
elif str(temp[i]["markType"]) == "circle":
chart_type = "line"
chart_found = True
node_id = i
temp2 = data['d3data'][key]['schematized'][node_id]['data']
for j in temp2:
if not j.startswith('_deriv_'):
axes.append(j)
elif str(temp[i]["markType"]) == "path" and "name" not in temp[i]:
# elif str(temp[i]["markType"]) == "path" and len(temp[i]["name"]) == 0:
chart_type = "pie"
chart_found = True
print("Chart type : " + chart_type)
node_id = i
temp2 = data['d3data'][key]['schematized'][node_id]['data']
for j in temp2:
if not j.startswith('_deriv_'):
axes.append(j)
if chart_found is False:
print("Could not identify the chart type")
return "Error"
if len(axes) != 0:
if chart_type == "line" or chart_type == "bar":
if axes[0] == "Value":
x_axis_label = axes[1]
y_axis_label = axes[0]
else:
x_axis_label = axes[0]
y_axis_label = axes[1]
temp1 = (data['d3data'][key]['schematized'][node_id]['data'][y_axis_label])
temp2 = (data['d3data'][key]['schematized'][node_id]['data'][x_axis_label])
dataStr = ""
for i in range(len(temp1)):
dataStr += x_axis_label.replace(" ", "_") + "|" + str(temp2[i]).replace(" ",
"_") + "|x|" + chart_type + "_chart "
dataStr += y_axis_label.replace(" ", "_") + "|" + str(temp1[i]).replace(" ",
"_") + "|y|" + chart_type + "_chart "
print(dataStr)
name = key + 1
summarize(data=dataStr, title="This is a " + chart_type + " chart", name=str(name))
# with io.open(os.getcwd() + "/Data/test/testData.txt", "a", encoding="utf-8") as f:
# f.write(dataStr)
# return dataStr
# tempStr = "{\"data\" : ["
#
# for i in range(len(temp1)):
# tempStr += "{\""+str(x_axis_label)+"\":\"" + str(temp2[i]) + "\", \""+str(y_axis_label)+"\": \"" + str(temp1[i]) + "\"}"
# if i != len(temp1) - 1:
# tempStr += ","
#
# tempStr += "]}"
#
# z = json.loads(tempStr)
# y = {"title": "Chart generated from "+url_name}
# z.update(y)
# y = {"xAxis": x_axis_label}
# z.update(y)
# y = {"yAxis": y_axis_label}
# z.update(y)
# y = {"columnType": "two"}
# z.update(y)
# if chart_type == "bar":
# y = {"graphType": "bar"}
# z.update(y)
# elif chart_type == "line":
# y = {"graphType": "line"}
# z.update(y)
# y = {"trends": [{"0": "0"}]}
# z.update(y)
# y = {
# "summary": ["There's no way to really mock up or simulate what I'm doing until I'm there. ",
# "An exhibition for me is not a statement but an experiment. "]}
# z.update(y)
# # write_as_JSON(os.getcwd() + "\\static\\generated\\" + str(key) + "_SHAPED", z)
#
# name = key+1
# write_as_JSON(os.getcwd() + "\\static\\generated\\" + str(name), z)
# print(json.dumps(z, indent=4, sort_keys=True))
#
# return z
elif chart_type == "pie":
# PIE PART
pie_temp = (data['d3data'][key]['schematized'][node_id]['data']['data'])
# print("pie_temp")
# print(pie_temp)
x = pie_temp[0].values()
keys = list(pie_temp[0].keys())
category = keys[0]
amount = keys[1]
pie_str = ""
for a in range(len(pie_temp)):
pie_str += category + "|" + str(pie_temp[a][category]) + "|x|" + chart_type + "_chart "
pie_str += amount + "|" + str(pie_temp[a][amount]) + "|y|" + chart_type + "_chart "
# print(pie_str)
name = key + 1
# with io.open(os.getcwd() + "/Data/test/testData.txt", "a", encoding="utf-8") as f:
# f.write(pie_str)
summarize(data=pie_str, title="This is a " + chart_type + " chart", name=str(name))
# return pie_str
else:
print("This happened")
return "Error"
def single_line_input(xLabel, xValsAr, yLabel, yValsAr):
input_data = ""
for i in range(len(xValsAr)):
input_data += xLabel + "|" + xValsAr[i] + "|x|line_chart " + yLabel + "|" + yValsAr[i] + "|y|line_chart "
return input_data
def single_bar_input(xLabel, xValsAr, yLabel, yValsAr):
input_data = ""
for i in range(len(xValsAr)):
input_data += xLabel + "|" + xValsAr[i].replace(' ', '_') + "|x|bar_chart " + yLabel.replace(' ', '_') + "|" + \
yValsAr[i].replace(' ', '_') + "|y|bar_chart "
return input_data
def single_bar_input_from_mutli_bar_data(xLabel, xValsAr, yLabel, yValsAr, barValsAr):
input_data = ""
# State_And_Union_Territory | Kerala | x | bar_chart
# Old - age_dependency_ratio | 19.6 | y | bar_chart
# State_And_Union_Territory | Punjab | x | bar_chart
# Old - age_dependency_ratio | 16.1 | y | bar_chart
barUniqueValsAr = list(dict.fromkeys(barValsAr)) # removes duplicates
for i in range(len(xValsAr)):
input_data += "Group" + "|" + barUniqueValsAr[i].replace(' ', '_') + "|x|bar_chart " + yLabel.replace(' ',
'_') + "|" + \
yValsAr[i].replace(' ', '_') + "|y|bar_chart "
return input_data
def multi_line_input(chartNumber, xLabel, xValsAr, lineValsAr):
json_path = "/static/generated_new_summary_baseline/" + str(chartNumber) + ".json"
with open(os.getcwd() + json_path) as json_file:
data = json.load(json_file)
print(str(len(data["data"])))
# print(xValsAr)
# print(lineValsAr)
xUniqueValsAr = list(dict.fromkeys(xValsAr)) # removes duplicates
lineUniqueValsAr = list(dict.fromkeys(lineValsAr)) # removes duplicates
print(xUniqueValsAr)
print(lineUniqueValsAr)
numberOfGroup = len(lineUniqueValsAr)
# keyAr = []
input_data = ""
for i in range(len(data["data"])):
if data["data"][i][xLabel] in xUniqueValsAr:
# keyAr.append(i)
# print(str(data["data"][i][xLabel]))
input_data += xLabel + "|" + str(data["data"][i][xLabel].replace(' ', '_')) + "|0|line_chart "
k = 1
for j in range(len(lineUniqueValsAr)):
input_data += str(lineUniqueValsAr[j].replace(' ', '_')) + "|" + str(
data["data"][i][lineUniqueValsAr[j]].replace(' ', '_')) + "|" + str(k) + "|line_chart "
k = k + 1
print(input_data)
return input_data
def multi_bar_input(chartNumber, xLabel, xValsAr, barValsAr):
json_path = "/static/generated_new_summary_baseline/" + str(chartNumber) + ".json"
with open(os.getcwd() + json_path) as json_file:
data = json.load(json_file)
xUniqueValsAr = list(dict.fromkeys(xValsAr)) # removes duplicates
xUniqueValsArWithOutUnderscore = []
for a in range(len(xUniqueValsAr)):
xUniqueValsArWithOutUnderscore.append(xUniqueValsAr[a].replace("_", " "))
barUniqueValsAr = list(dict.fromkeys(barValsAr)) # removes duplicates
numberOfGroup = len(barUniqueValsAr)
# keyAr = []
input_data = ""
for i in range(len(data["data"])):
if data["data"][i][xLabel] in xUniqueValsAr or data["data"][i][xLabel] in xUniqueValsArWithOutUnderscore:
# keyAr.append(i)
# print(str(data["data"][i][xLabel]))
input_data += xLabel + "|" + str(data["data"][i][xLabel].replace(' ', '_')) + "|0|bar_chart "
k = 1
for j in range(len(barUniqueValsAr)):
input_data += str(barUniqueValsAr[j].replace(' ', '_')) + "|" + str(
data["data"][i][barUniqueValsAr[j]].replace(' ', '_')) + "|" + str(k) + "|bar_chart "
k = k + 1
print(input_data)
return input_data
def single_bar_input_brush(chartNumber, xLabel, yLabel, barValsAr):
json_path = "/static/generated_new_summary_baseline/" + str(chartNumber) + ".json"
with open(os.getcwd() + json_path) as json_file:
data = json.load(json_file)
barValsArWithUnderscore = []
for a in range(len(barValsAr)):
barValsArWithUnderscore.append(barValsAr[a].replace(" ", "_"))
input_data = ""
for i in range(len(data["data"])):
if data["data"][i][xLabel] in barValsAr or data["data"][i][xLabel] in barValsArWithUnderscore:
input_data += xLabel.replace(" ", "_") + "|" + str(
data["data"][i][xLabel].replace(' ', '_')) + "|x|bar_chart " + yLabel.replace(' ', '_') + "|" + str(
data["data"][i][yLabel]) + "|y|bar_chart "
print(input_data)
return input_data
def single_line_input_brush(chartNumber, xLabel, yLabel, barValsAr):
json_path = "/static/generated_new_summary_baseline/" + str(chartNumber) + ".json"
with open(os.getcwd() + json_path) as json_file:
data = json.load(json_file)
barValsArWithUnderscore = []
for a in range(len(barValsAr)):
barValsArWithUnderscore.append(barValsAr[a].replace(" ", "_"))
input_data = ""
for i in range(len(data["data"])):
if data["data"][i][xLabel] in barValsAr or data["data"][i][xLabel] in barValsArWithUnderscore:
input_data += xLabel.replace(" ", "_") + "|" + str(
data["data"][i][xLabel].replace(' ', '_')) + "|x|line_chart " + yLabel.replace(' ',
'_') + "|" + str(
data["data"][i][yLabel]) + "|y|line_chart "
print(input_data)
return input_data
def multi_bar_input_brush(chartNumber, xLabel, groupNamesAr, barValsAr):
json_path = "/static/generated_new_summary_baseline/" + str(chartNumber) + ".json"
with open(os.getcwd() + json_path) as json_file:
data = json.load(json_file)
barValsArWithUnderscore = []
for a in range(len(barValsAr)):
barValsArWithUnderscore.append(barValsAr[a].replace(" ", "_"))
input_data = ""
for i in range(len(data["data"])):
if data["data"][i][xLabel] in barValsAr or data["data"][i][xLabel] in barValsArWithUnderscore:
# keyAr.append(i)
# print(str(data["data"][i][xLabel]))
input_data += xLabel + "|" + str(data["data"][i][xLabel].replace(' ', '_')) + "|0|bar_chart "
# print(str(len(data["data"][i])))
for k in range(len(data["data"][i]) - 1):
print(groupNamesAr[k])
print(data["data"][i][groupNamesAr[k]])
input_data += groupNamesAr[k].replace(" ", "_") + "|" + str(k + 1) + data["data"][i][
groupNamesAr[k]].replace(" ", "_") + "|" + str(k + 1) + "|bar_chart "
print(input_data)
return input_data
def multi_bar_input_for_single_brush(chartNumber, xLabel, yLabel, groupNamesAr, barValsAr):
json_path = "/static/generated_new_summary_baseline/" + str(chartNumber) + ".json"
with open(os.getcwd() + json_path) as json_file:
data = json.load(json_file)
barValsArWithUnderscore = []
for a in range(len(barValsAr)):
barValsArWithUnderscore.append(barValsAr[a].replace(" ", "_"))
input_data = ""
for i in range(len(data["data"])):
if data["data"][i][xLabel] in barValsAr or data["data"][i][xLabel] in barValsArWithUnderscore:
added_text = "In case of " + xLabel + " " + str(data["data"][i][xLabel]) + ", "
for k in range(len(data["data"][i]) - 1):
input_data += "Group|" + groupNamesAr[k].replace(" ", "_") + "|x|bar_chart "
input_data += yLabel + "|" + str(data["data"][i][groupNamesAr[k]]).replace(" ",
"_") + "|y|bar_chart "
print(input_data)
return [input_data, added_text]
def multi_line_input_brush(chartNumber, xLabel, groupNamesAr, barValsAr):
json_path = "/static/generated_new_summary_baseline/" + str(chartNumber) + ".json"
print("json_path")
print(json_path)
with open(os.getcwd() + json_path) as json_file:
data = json.load(json_file)
barValsArWithUnderscore = []
for a in range(len(barValsAr)):
barValsArWithUnderscore.append(barValsAr[a].replace(" ", "_"))
input_data = ""
for i in range(len(data["data"])):
if data["data"][i][xLabel] in barValsAr or data["data"][i][xLabel] in barValsArWithUnderscore:
# keyAr.append(i)
input_data += xLabel + "|" + str(data["data"][i][xLabel].replace(' ', '_')) + "|0|line_chart "
for k in range(len(data["data"][i]) - 1):
input_data += groupNamesAr[k].replace(" ", "_") + "|" + data["data"][i][
groupNamesAr[k]].replace(" ", "_") + "|" + str(k + 1) + "|line_chart "
print(input_data)
return input_data
# multi_bar_input_for_single_brush(chartNumber=140, xLabel="Community", yLabel="Population", groupNamesAr=[
# "Male",
# "Female"
# ], barValsAr="Galicia")
# "Community": "Galicia",
# "Male": "1302611",
# "Female": "1396153"
# Group|Male|x|bar_chart Population|1302611|y|bar_chart Group|Female|x|bar_chart Population|1396153|y|bar_chart
# strtext = multi_bar_input(13, "Actor", [
# "Roger Moore",
# "Roger Moore",
# "Roger Moore",
# "Daniel Craig",
# "Daniel Craig"
# ], [
# "Very favorable",
# "Somewhat favorable",
# "Don't know/no opinion",
# "Very favorable",
# "Somewhat favorable"
# ])
# #
# output_data = summarize(data=strtext, all_y_label="yLabel", name="Partial", title="Partial", partial=True)
# print("output_data")
# print(output_data)
def try_me():
# with open(os.getcwd() + "\\Data\\D3JSONData\\"+'IlKrg16739_RAW.json') as json_file:
pie = "KBsvb60656_RAW"
bar = "iBkjI44058_RAW"
line = "UCccq81803_RAW"
multi = "zNbXO23077"
test = "mixed"
with open(os.getcwd() + "\\Data\\D3JSONData\\" + test + '.json') as json_file:
data = json.load(json_file)
reshaped_data = reshape_JSON(data)
if reshaped_data == "Error":
return "Error"
else:
return "Success"
# mod = reshape_JSON(data)
# print(json.dumps(data, indent=4, sort_keys=True))
# try_me()
| 20,592 | 36.306159 | 146 | py |
SeeChart | SeeChart-main/app.py | from datetime import datetime
import json
import csv
import os
from os import listdir
from os.path import isfile, join
import ssl
from flask_jsglue import JSGlue # pip install Flask-JSGlue -> http://stewartpark.github.io/Flask-JSGlue/
import math
from qna import askMe
from flask import (
Flask,
g,
redirect,
render_template,
request,
session,
url_for,
send_from_directory,
flash
)
from flask_cors import CORS
from flask_restful import Api, Resource, reqparse, fields, marshal_with
from BaselineSummarizer import summarize
from users import users
from utility import make_directory, check_in_csv, save_image_from_url, write_on_csv, get_random_label, make_JSON, \
write_image, single_line_input, multi_line_input, single_bar_input, multi_bar_input, \
single_bar_input_from_mutli_bar_data, single_bar_input_brush, multi_bar_input_brush, single_line_input_brush, \
multi_line_input_brush, multi_bar_input_for_single_brush
import tasks
# UNCOMMENT THE FOLLOWING TWO LINES TO RUN LOCALLY
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain('certificate/server.crt', 'certificate/server.key')
app = Flask(__name__, template_folder='templates')
app.secret_key = 'somesecretkeythatonlyshovanshouldknow'
api = Api(app)
CORS(app)
jsglue = JSGlue(app)
screenshot_post_args = reqparse.RequestParser()
screenshot_post_args.add_argument("vis_id", type=str, help="ID of the visual")
screenshot_post_args.add_argument("url", type=str, help="URL")
screenshot_post_args.add_argument("date", type=str, help="Date")
screenshot_post_args.add_argument("imgBase64", type=str, help="imgBase64")
url_post_args = reqparse.RequestParser()
url_post_args.add_argument("iframe_url", type=str, help="ID of the visual")
url_post_args.add_argument("original_url", type=str, help="URL")
decon_post_args = reqparse.RequestParser()
decon_post_args.add_argument("decon", type=str, help="Decon data string")
crawl_image_post_args = reqparse.RequestParser()
crawl_image_post_args.add_argument("img_url", type=str, help="Image URL string")
multi_line_lasso_post_args = reqparse.RequestParser()
multi_line_lasso_post_args.add_argument("xValues", type=str, help="xVals")
multi_line_lasso_post_args.add_argument("yValues", type=str, help="yValues")
multi_line_lasso_post_args.add_argument("lineValues", type=str, help="lineValues")
multi_line_lasso_post_args.add_argument("xLabel", type=str, help="xLabel")
multi_line_lasso_post_args.add_argument("yLabel", type=str, help="yLabel")
multi_line_lasso_post_args.add_argument("chartNumber", type=int, help="chartNumber")
multi_line_lasso_post_args.add_argument("summary", type=str, help="summary")
multi_bar_lasso_post_args = reqparse.RequestParser()
multi_bar_lasso_post_args.add_argument("xValues", type=str, help="xVals")
multi_bar_lasso_post_args.add_argument("yValues", type=str, help="yValues")
multi_bar_lasso_post_args.add_argument("barValues", type=str, help="barValues")
multi_bar_lasso_post_args.add_argument("xLabel", type=str, help="xLabel")
multi_bar_lasso_post_args.add_argument("yLabel", type=str, help="yLabel")
multi_bar_lasso_post_args.add_argument("chartNumber", type=int, help="chartNumber")
multi_bar_lasso_post_args.add_argument("summary", type=str, help="summary")
bar_brush_post_args = reqparse.RequestParser()
bar_brush_post_args.add_argument("chart", type=str, help="chart (line/bar)")
bar_brush_post_args.add_argument("chartType", type=str, help="chartType (single/multi)")
bar_brush_post_args.add_argument("barValues", type=str, help="barValues")
bar_brush_post_args.add_argument("groupNames", type=str, help="groupNames")
bar_brush_post_args.add_argument("xLabel", type=str, help="xLabel")
bar_brush_post_args.add_argument("yLabel", type=str, help="yLabel")
bar_brush_post_args.add_argument("chartNumber", type=int, help="chartNumber")
bar_brush_post_args.add_argument("summary", type=str, help="summary")
login_post_args = reqparse.RequestParser()
login_post_args.add_argument("pid", type=str, help="Participant ID")
login_post_args.add_argument("pwd", type=str, help="Password")
login_post_args.add_argument("status", type=str, help="Status")
task_reset_post_args = reqparse.RequestParser()
task_reset_post_args.add_argument("pid", type=str, help="pid")
task_reset_post_args.add_argument("task_name", type=str, help="task_name")
task_reset_post_args.add_argument("status", type=str, help="status")
question_response_post_args = reqparse.RequestParser()
question_response_post_args.add_argument("pid", type=str, help="pid")
question_response_post_args.add_argument("task", type=str, help="task_name")
question_response_post_args.add_argument("question", type=str, help="question")
question_response_post_args.add_argument("answer", type=str, help="answer")
question_response_post_args.add_argument("time", type=str, help="time taken")
question_response_post_args.add_argument("status", type=str, help="status")
timer_post_args = reqparse.RequestParser()
timer_post_args.add_argument("pid", type=str, help="pid")
timer_post_args.add_argument("question_no", type=str, help="question_no")
timer_post_args.add_argument("answer", type=str, help="answer")
timer_post_args.add_argument("result", type=str, help="result")
timer_post_args.add_argument("taken_time", type=str, help="taken_time")
timer_post_args.add_argument("status", type=str, help="status")
key_post_args = reqparse.RequestParser()
key_post_args.add_argument("pid", type=str, help="pid")
key_post_args.add_argument("chart_no", type=str, help="chart_no")
key_post_args.add_argument("key_presses", type=str, help="key_presses")
key_post_args.add_argument("status", type=str, help="status")
search_post_args = reqparse.RequestParser()
search_post_args.add_argument("chart", type=str, help="chart")
search_post_args.add_argument("x_axis", type=str, help="x_axis")
search_post_args.add_argument("y_axis", type=str, help="y_axis")
search_post_args.add_argument("graphType", type=str, help="graphType")
search_post_args.add_argument("columnType", type=str, help="columnType")
search_post_args.add_argument("search_val", type=str, help="search_val")
search_post_args.add_argument("summary", type=str, help="summary")
qna_post_args = reqparse.RequestParser()
qna_post_args.add_argument("chart", type=str, help="chart")
qna_post_args.add_argument("question", type=str, help="question")
qna_post_args.add_argument("summary", type=str, help="summary")
search_highchart_post_args = reqparse.RequestParser()
search_highchart_post_args.add_argument("url", type=str, help="URL")
search_highchart_post_args.add_argument("json_no", type=str, help="URL")
search_highchart_resource_fields = {
'url': fields.String,
'json_no': fields.String
}
qna_resource_fields = {
'chart': fields.String,
'question': fields.String,
'summary': fields.String
}
search_resource_fields = {
'chart': fields.String,
'x_axis': fields.String,
'y_axis': fields.String,
'graphType': fields.String,
'columnType': fields.String,
'search_val': fields.String,
'summary': fields.String
}
key_resource_fields = {
'pid': fields.String,
'chart_no': fields.String,
'key_presses': fields.String,
'status': fields.String
}
timer_resource_fields = {
'pid': fields.String,
'question_no': fields.String,
'answer': fields.String,
'result': fields.String,
'taken_time': fields.String,
'status': fields.String
}
question_response_resource_fields = {
'pid': fields.String,
'task': fields.String,
'question': fields.String,
'answer': fields.String,
'time': fields.String,
'status': fields.String
}
screenshot_resource_fields = {
'vis_id': fields.String,
'url': fields.String,
'date': fields.String,
'imgBase64': fields.String
}
url_resource_fields = {
'iframe_url': fields.String,
'original_url': fields.String
}
decon_resource_fields = {
'decon': fields.String
}
crawl_image_resource_fields = {
'img_url': fields.String
}
multi_line_lasso = {
'xValues': fields.String,
'yValues': fields.String,
'lineValues': fields.String,
'xLabel': fields.String,
'yLabel': fields.String,
'chartNumber': fields.Integer,
'summary': fields.String
}
multi_bar_lasso = {
'xValues': fields.String,
'yValues': fields.String,
'barValues': fields.String,
'xLabel': fields.String,
'yLabel': fields.String,
'chartNumber': fields.Integer,
'summary': fields.String
}
bar_brush = {
'chart': fields.String,
'chartType': fields.String,
'barValues': fields.String,
'groupNames': fields.String,
'xLabel': fields.String,
'yLabel': fields.String,
'chartNumber': fields.Integer,
'summary': fields.String
}
login = {
'pid': fields.String,
'pwd': fields.String,
'status': fields.String
}
task_reset = {
'pid': fields.String,
'task_name': fields.String,
'status': fields.String
}
global task_obj
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
user = [i for i in users if i.id == session['user_id']][0]
g.user = user
# @app.route("/", methods=['GET', 'POST'])
@app.route('/login', methods=['GET', 'POST'])
def login():
g.task_file = None
g.bar_chart = None
g.bar_chart2 = None
g.bar_chart3 = None
g.multi_bar_chart = None
g.line_chart = None
g.multi_line_chart = None
if request.method == 'POST':
session.pop('user_id', None) # Going to remove user ID if there is already a logged in one
session.pop('task_file_name', None)
username = request.form['username']
password = request.form['password']
if len([i for i in users if i.username == username]) > 0:
user = [i for i in users if i.username == username][0]
else:
flash("Please provide a valid Participant ID.", 'error')
# flash(u'Invalid password provided', 'error')
return redirect(url_for('login'))
if user and user.password == password:
session['user_id'] = user.id
global task_obj
task_obj = tasks.Tasks(str(user.id))
task_obj.set_logged_in_true()
d = task_obj.get_all_task_status_info()
print(json.dumps(d, indent=4))
session['task_file_name'] = 'data_' + str(user.id)
if username == "1000":
return redirect(url_for('admin'))
else:
return redirect(url_for('home'))
flash("Please provide valid credentials.", 'error')
# flash(u'Invalid password provided', 'error')
return redirect(url_for('login'))
return render_template('login.html')
@app.route('/logout')
def logout():
global task_obj
task_obj = tasks.Tasks(session['user_id'])
task_obj.set_logged_in_false()
d = task_obj.get_all_task_status_info()
print(json.dumps(d, indent=4))
session.pop('user_id', None) # Going to remove user ID if there is already a logged in one
session.pop('task_file_name', None)
return redirect(url_for('login'))
@app.route('/home')
def home():
if not g.user:
# abort(403)
return redirect(url_for('login'))
if g.user.id == "1000":
return redirect(url_for('admin'))
return render_template('home.html')
@app.route('/admin')
def admin():
# if not g.user:
# # abort(403)
# global task_obj
# task_obj = tasks.Tasks(session['user_id'])
# task_obj.set_logged_in_false()
# return redirect(url_for('login'))
# if g.user.id != "1000":
# print("Unauthorized admin portal request blocked!")
# return redirect(url_for('home'))
return render_template('admin_config.html')
# return render_template('home_admin.html')
@app.route('/config')
def config():
# if not g.user:
# # abort(403)
# global task_obj
# task_obj = tasks.Tasks(session['user_id'])
# task_obj.set_logged_in_false()
# return redirect(url_for('login'))
# if g.user.id != "1000":
# print("Unauthorized admin portal request blocked!")
# return redirect(url_for('home'))
return render_template('admin_config.html')
@app.route('/timer')
def timer():
# if not g.user:
# # abort(403)
# global task_obj
# task_obj = tasks.Tasks(session['user_id'])
# task_obj.set_logged_in_false()
# return redirect(url_for('login'))
# if g.user.id != "1000":
# print("Unauthorized admin portal request blocked!")
# return redirect(url_for('home'))
return render_template('admin_time_config.html')
@app.route('/allcharts')
def all_charts():
# if not g.user:
# # abort(403)
# global task_obj
# task_obj = tasks.Tasks(session['user_id'])
# task_obj.set_logged_in_false()
# return redirect(url_for('login'))
# if g.user.id != "1000":
# print("Unauthorized admin portal request blocked!")
# return redirect(url_for('home'))
return render_template('original_all_charts.html')
@app.route('/consent')
def consent():
return render_template('consent.html')
def clear():
g.task_file = None
g.bar_chart = None
g.bar_chart2 = None
g.bar_chart3 = None
g.multi_bar_chart = None
g.line_chart = None
g.multi_line_chart = None
@app.route('/pid1001')
def consent_pid1001():
clear()
session.pop('user_id', None)
session.pop('task_file_name', None)
username = '1001'
if len([i for i in users if i.username == username]) > 0:
user = [i for i in users if i.username == username][0]
session['user_id'] = username
g.user = username
global task_obj
task_obj = tasks.Tasks(username)
task_obj.set_logged_in_true()
d = task_obj.get_all_task_status_info()
print(json.dumps(d, indent=4))
session['task_file_name'] = 'data_' + str(username)
return render_template('consent.html')
@app.route('/pid1002')
def consent_pid1002():
clear()
session.pop('user_id', None)
session.pop('task_file_name', None)
username = '1002'
if len([i for i in users if i.username == username]) > 0:
user = [i for i in users if i.username == username][0]
session['user_id'] = username
g.user = username
global task_obj
task_obj = tasks.Tasks(username)
task_obj.set_logged_in_true()
d = task_obj.get_all_task_status_info()
print(json.dumps(d, indent=4))
session['task_file_name'] = 'data_' + str(username)
return render_template('consent.html')
@app.route('/pid1003')
def consent_pid1003():
clear()
session.pop('user_id', None)
session.pop('task_file_name', None)
username = '1003'
if len([i for i in users if i.username == username]) > 0:
user = [i for i in users if i.username == username][0]
session['user_id'] = username
g.user = username
global task_obj
task_obj = tasks.Tasks(username)
task_obj.set_logged_in_true()
d = task_obj.get_all_task_status_info()
print(json.dumps(d, indent=4))
session['task_file_name'] = 'data_' + str(username)
return render_template('consent.html')
@app.route('/pid1004')
def consent_pid1004():
clear()
session.pop('user_id', None)
session.pop('task_file_name', None)
username = '1004'
if len([i for i in users if i.username == username]) > 0:
user = [i for i in users if i.username == username][0]
session['user_id'] = username
g.user = username
global task_obj
task_obj = tasks.Tasks(username)
task_obj.set_logged_in_true()
d = task_obj.get_all_task_status_info()
print(json.dumps(d, indent=4))
session['task_file_name'] = 'data_' + str(username)
return render_template('consent.html')
@app.route('/pid1005')
def consent_pid1005():
clear()
session.pop('user_id', None)
session.pop('task_file_name', None)
username = '1005'
if len([i for i in users if i.username == username]) > 0:
user = [i for i in users if i.username == username][0]
session['user_id'] = username
g.user = username
global task_obj
task_obj = tasks.Tasks(username)
task_obj.set_logged_in_true()
d = task_obj.get_all_task_status_info()
print(json.dumps(d, indent=4))
session['task_file_name'] = 'data_' + str(username)
return render_template('consent.html')
@app.route('/pid1006')
def consent_pid1006():
clear()
session.pop('user_id', None)
session.pop('task_file_name', None)
username = '1006'
if len([i for i in users if i.username == username]) > 0:
user = [i for i in users if i.username == username][0]
session['user_id'] = username
g.user = username
global task_obj
task_obj = tasks.Tasks(username)
task_obj.set_logged_in_true()
d = task_obj.get_all_task_status_info()
print(json.dumps(d, indent=4))
session['task_file_name'] = 'data_' + str(username)
return render_template('consent.html')
@app.route('/pid1007')
def consent_pid1007():
clear()
session.pop('user_id', None)
session.pop('task_file_name', None)
username = '1007'
if len([i for i in users if i.username == username]) > 0:
user = [i for i in users if i.username == username][0]
session['user_id'] = username
g.user = username
global task_obj
task_obj = tasks.Tasks(username)
task_obj.set_logged_in_true()
d = task_obj.get_all_task_status_info()
print(json.dumps(d, indent=4))
session['task_file_name'] = 'data_' + str(username)
return render_template('consent.html')
@app.route('/pid1008')
def consent_pid1008():
clear()
session.pop('user_id', None)
session.pop('task_file_name', None)
username = '1008'
if len([i for i in users if i.username == username]) > 0:
user = [i for i in users if i.username == username][0]
session['user_id'] = username
g.user = username
global task_obj
task_obj = tasks.Tasks(username)
task_obj.set_logged_in_true()
d = task_obj.get_all_task_status_info()
print(json.dumps(d, indent=4))
session['task_file_name'] = 'data_' + str(username)
return render_template('consent.html')
@app.route('/new_pre')
def new_pre():
return render_template('new_pre.html')
@app.route('/new_post')
def new_post():
return render_template('new_post.html')
# @app.route('/original')
@app.route("/")
def original():
# return render_template('original.html')
return render_template('selectedChart.html')
@app.route('/question')
def question():
print("QUESTION WAS CALLED")
if not g.user:
# abort(403)
global task_obj
task_obj = tasks.Tasks(session['user_id'])
task_obj.set_logged_in_false()
return redirect(url_for('login'))
if request.args:
args = request.args
if "number" in args:
if args.get("number") == "a2":
return render_template('taskA2_questions.html')
elif args.get("number") == "a3":
return render_template('taskA3_questions.html')
elif args.get("number") == "a4":
return render_template('taskA4_questions.html')
elif args.get("number") == "bar2_a1":
return render_template('bar2_taskA1_questions.html')
elif args.get("number") == "bar2_a2":
return render_template('bar2_taskA2_questions.html')
elif args.get("number") == "bar2_a3":
return render_template('bar2_taskA3_questions.html')
elif args.get("number") == "bar2_a4":
return render_template('bar2_taskA4_questions.html')
elif args.get("number") == "bar3_a1":
return render_template('bar3_taskA1_questions.html')
elif args.get("number") == "bar3_a2":
return render_template('bar3_taskA2_questions.html')
elif args.get("number") == "bar3_a3":
return render_template('bar3_taskA3_questions.html')
elif args.get("number") == "bar3_a4":
return render_template('bar3_taskA4_questions.html')
elif args.get("number") == "b2":
return render_template('taskB2_questions.html')
elif args.get("number") == "b3":
return render_template('taskB3_questions.html')
elif args.get("number") == "b4":
return render_template('taskB4_questions.html')
elif args.get("number") == "c2":
return render_template('taskC2_questions.html')
elif args.get("number") == "c3":
return render_template('taskC3_questions.html')
elif args.get("number") == "c4":
return render_template('taskC4_questions.html')
elif args.get("number") == "d2":
return render_template('taskD2_questions.html')
elif args.get("number") == "d3":
return render_template('taskD3_questions.html')
elif args.get("number") == "d4":
return render_template('taskD4_questions.html')
elif args.get("number") == "multi_bar2_a1":
return render_template('multi_bar2_taskA1_questions.html')
elif args.get("number") == "multi_bar2_a2":
return render_template('multi_bar2_taskA2_questions.html')
elif args.get("number") == "multi_bar2_a3":
return render_template('multi_bar2_taskA3_questions.html')
elif args.get("number") == "multi_bar2_a4":
return render_template('multi_bar2_taskA4_questions.html')
elif args.get("number") == "multi_bar3_a1":
return render_template('multi_bar3_taskA1_questions.html')
elif args.get("number") == "multi_bar3_a2":
return render_template('multi_bar3_taskA2_questions.html')
elif args.get("number") == "multi_bar3_a3":
return render_template('multi_bar3_taskA3_questions.html')
elif args.get("number") == "multi_bar3_a4":
return render_template('multi_bar3_taskA4_questions.html')
elif args.get("number") == "line2_a1":
return render_template('line2_taskA1_questions.html')
elif args.get("number") == "line2_a2":
return render_template('line2_taskA2_questions.html')
elif args.get("number") == "line2_a3":
return render_template('line2_taskA3_questions.html')
elif args.get("number") == "line2_a4":
return render_template('line2_taskA4_questions.html')
elif args.get("number") == "line3_a1":
return render_template('line3_taskA1_questions.html')
elif args.get("number") == "line3_a2":
return render_template('line3_taskA2_questions.html')
elif args.get("number") == "line3_a3":
return render_template('line3_taskA3_questions.html')
elif args.get("number") == "line3_a4":
return render_template('line3_taskA4_questions.html')
elif args.get("number") == "multi_line2_a1":
return render_template('multi_line2_taskA1_questions.html')
elif args.get("number") == "multi_line2_a2":
return render_template('multi_line2_taskA2_questions.html')
elif args.get("number") == "multi_line2_a3":
return render_template('multi_line2_taskA3_questions.html')
elif args.get("number") == "multi_line2_a4":
return render_template('multi_line2_taskA4_questions.html')
elif args.get("number") == "multi_line3_a1":
return render_template('multi_line3_taskA1_questions.html')
elif args.get("number") == "multi_line3_a2":
return render_template('multi_line3_taskA2_questions.html')
elif args.get("number") == "multi_line3_a3":
return render_template('multi_line3_taskA3_questions.html')
elif args.get("number") == "multi_line3_a4":
return render_template('multi_line3_taskA4_questions.html')
else:
return redirect(url_for('home'))
@app.route('/task')
def task():
# if not g.user:
# # abort(403)
# global task_obj
# task_obj = tasks.Tasks(session['user_id'])
# task_obj.set_logged_in_false()
# return redirect(url_for('login'))
#
# if g.user.id == "1000":
# return redirect(url_for('admin'))
f = open('static/task/selected_chart_ids.json')
selected_chart_ids = json.load(f)
# print(selected_chart_ids["bar"])
if request.args:
args = request.args
if "chart" in args:
if args.get("chart") == "bar" or args.get("chart") == "bar1":
g.bar_chart = selected_chart_ids["bar"]
elif args.get("chart") == "bar2":
g.bar_chart2 = selected_chart_ids["bar2"]
elif args.get("chart") == "bar3":
g.bar_chart3 = selected_chart_ids["bar3"]
elif args.get("chart") == "multi_bar" or args.get("chart") == "multi_bar1":
g.multi_bar_chart = selected_chart_ids["multi_bar"]
elif args.get("chart") == "multi_bar2":
g.multi_bar_chart2 = selected_chart_ids["multi_bar2"]
elif args.get("chart") == "multi_bar3":
g.multi_bar_chart3 = selected_chart_ids["multi_bar3"]
elif args.get("chart") == "multi_line" or args.get("chart") == "multi_line1":
g.multi_line_chart = selected_chart_ids["multi_line"]
elif args.get("chart") == "multi_line2":
g.multi_line_chart2 = selected_chart_ids["multi_line2"]
elif args.get("chart") == "multi_line3":
g.multi_line_chart3 = selected_chart_ids["multi_line3"]
elif args.get("chart") == "line" or args.get("chart") == "line1":
g.line_chart = selected_chart_ids["line"]
elif args.get("chart") == "line2":
g.line_chart2 = selected_chart_ids["line2"]
elif args.get("chart") == "line3":
g.line_chart3 = selected_chart_ids["line3"]
elif args.get("chart") == "test1":
g.test_chart1 = selected_chart_ids["test_chart1"]
elif args.get("chart") == "test2":
g.test_chart2 = selected_chart_ids["test_chart2"]
elif args.get("chart") == "bar_158":
g.bar_158 = selected_chart_ids["bar_158"]
elif args.get("chart") == "bar_180":
g.bar_180 = selected_chart_ids["bar_180"]
elif args.get("chart") == "bar_186":
g.bar_186 = selected_chart_ids["bar_186"]
elif args.get("chart") == "bar_206":
g.bar_206 = selected_chart_ids["bar_206"]
elif args.get("chart") == "bar_775":
g.bar_775 = selected_chart_ids["bar_775"]
elif args.get("chart") == "bar_1092":
g.bar_1092 = selected_chart_ids["bar_1092"]
elif args.get("chart") == "bar_308":
g.bar_308 = selected_chart_ids["bar_308"]
elif args.get("chart") == "bar_309":
g.bar_309 = selected_chart_ids["bar_309"]
elif args.get("chart") == "bar_377":
g.bar_377 = selected_chart_ids["bar_377"]
elif args.get("chart") == "bar_45":
g.bar_45 = selected_chart_ids["bar_45"]
elif args.get("chart") == "bar_669":
g.bar_669 = selected_chart_ids["bar_669"]
elif args.get("chart") == "bar_694":
g.bar_694 = selected_chart_ids["bar_694"]
elif args.get("chart") == "multi_line_170":
g.multi_line_170 = selected_chart_ids["multi_line_170"]
elif args.get("chart") == "multi_line_176":
g.multi_line_176 = selected_chart_ids["multi_line_176"]
elif args.get("chart") == "multi_line_189":
g.multi_line_189 = selected_chart_ids["multi_line_189"]
elif args.get("chart") == "multi_line_197":
g.multi_line_197 = selected_chart_ids["multi_line_197"]
elif args.get("chart") == "multi_line_205":
g.multi_line_205 = selected_chart_ids["multi_line_205"]
elif args.get("chart") == "multi_line_220":
g.multi_line_220 = selected_chart_ids["multi_line_220"]
elif args.get("chart") == "multi_line_711":
g.multi_line_711 = selected_chart_ids["multi_line_711"]
elif args.get("chart") == "multi_line_752":
g.multi_line_752 = selected_chart_ids["multi_line_752"]
elif args.get("chart") == "multi_line_245":
g.multi_line_245 = selected_chart_ids["multi_line_245"]
elif args.get("chart") == "multi_line_457":
g.multi_line_457 = selected_chart_ids["multi_line_457"]
elif args.get("chart") == "multi_line_545":
g.multi_line_545 = selected_chart_ids["multi_line_545"]
elif args.get("chart") == "multi_line_524":
g.multi_line_524 = selected_chart_ids["multi_line_524"]
else:
return redirect(url_for('home'))
# return render_template('taskA_questions.html', query_string=query_string)
return render_template('selectedChart.html')
return redirect(url_for('home'))
@app.route('/questionnaire')
def questionnaire():
if not g.user:
global task_obj
task_obj = tasks.Tasks(session['user_id'])
task_obj.set_logged_in_false()
return redirect(url_for('login'))
if g.user.id == "1000":
return redirect(url_for('admin'))
return render_template('questionnaire.html')
@app.route('/post_questionnaire')
def post_questionnaire():
if not g.user:
global task_obj
task_obj = tasks.Tasks(session['user_id'])
task_obj.set_logged_in_false()
return redirect(url_for('login'))
if g.user.id == "1000":
return redirect(url_for('admin'))
return render_template('post_questionnaire.html')
@app.route('/taskA_questions')
def taskA_questions():
return render_template('taskA_questions.html')
@app.route('/taskA2_questions')
def taskA2_questions():
return render_template('taskA2_questions.html')
@app.route('/taskA3_questions')
def taskA3_questions():
return render_template('bar2_taskA3_questions.html')
@app.route('/taskA4_questions')
def taskA4_questions():
return render_template('task_questions_temp_sum_rating.html')
@app.route('/taskB_questions')
def taskB_questions():
return render_template('taskB_questions.html')
@app.route('/taskC_questions')
def taskC_questions():
return render_template('taskC_questions.html')
@app.route('/taskD_questions')
def taskD_questions():
return render_template('taskD_questions.html')
@app.route('/download/<path:filename>', methods=['GET', 'POST'])
def download(filename):
# if not g.user:
# global task_obj
# task_obj = tasks.Tasks(session['user_id'])
# task_obj.set_logged_in_false()
# return redirect(url_for('login'))
path = 'static/task/responses/'
# uploads = os.path.join(current_app.root_path, app.config['UPLOAD_FOLDER'])
# return send_from_directory(directory=path, filename=filename + '.json', as_attachment=True)
return send_from_directory(path, filename, as_attachment=True)
@app.route('/downloadAll', methods=['GET', 'POST'])
def downloadAll():
if not g.user:
global task_obj
task_obj = tasks.Tasks(session['user_id'])
task_obj.set_logged_in_false()
return redirect(url_for('login'))
path = os.getcwd() + '/static/task/responses/'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
print(onlyfiles)
filename = "post_study_pid_1001"
return send_from_directory(path, onlyfiles, as_attachment=True)
@app.route('/summary_set/<s_type>/<stat>', methods=['GET', 'POST'])
def summary_set(s_type, stat):
# if not g.user:
# global task_obj
# task_obj = tasks.Tasks(session['user_id'])
# task_obj.set_logged_in_false()
# return redirect(url_for('login'))
f = open('static/task/active_summary_types.json')
summary_types = json.load(f)
if s_type == "grp_a":
summary_types["grp_a"] = True
summary_types["grp_b"] = False
summary_types["grp_c"] = False
summary_types["grp_d"] = False
elif s_type == "grp_b":
summary_types["grp_a"] = False
summary_types["grp_b"] = True
summary_types["grp_c"] = False
summary_types["grp_d"] = False
elif s_type == "grp_c":
summary_types["grp_a"] = False
summary_types["grp_b"] = False
summary_types["grp_c"] = True
summary_types["grp_d"] = False
elif s_type == "grp_d":
summary_types["grp_a"] = False
summary_types["grp_b"] = False
summary_types["grp_c"] = False
summary_types["grp_d"] = True
else:
if stat == "true":
summary_types[s_type] = True
elif stat == "false":
summary_types[s_type] = False
with open('static/task/active_summary_types.json', 'w') as f:
json.dump(summary_types, f, indent=4)
return {'summary': 'Status changed.'}, 200
class CrawlImage(Resource):
@marshal_with(crawl_image_resource_fields)
def post(self):
args = crawl_image_post_args.parse_args()
print('POST: CrawlImage Called')
url = args['img_url']
label = get_random_label()
make_directory(os.getcwd() + "\\Data\\Images")
if os.path.exists(os.getcwd() + "\\Data\\Images\\CrawledImageList.csv"):
if not check_in_csv(os.getcwd() + "\\Data\\Images\\CrawledImageList", url, 1):
write_on_csv(os.getcwd() + "\\Data\\Images\\CrawledImageList", [label, args['img_url']])
save_image_from_url(label, url, os.getcwd() + "\\Data\\Images\\")
else:
print("Already crawled")
return {'img_url': args['img_url']}, 409
else:
write_on_csv(os.getcwd() + "\\Data\\Images\\CrawledImageList", [label, args['img_url']])
save_image_from_url(label, url, os.getcwd() + "\\Data\\Images\\")
# save_image_from_url(label, url, os.getcwd() + "\\Data\\Images\\")
return {'img_url': args['img_url']}, 200
class Screenshot(Resource):
@marshal_with(screenshot_resource_fields)
def post(self):
args = screenshot_post_args.parse_args()
print('POST: Screenshot Called')
make_directory(os.getcwd() + "\\Data\\Screenshots")
image_name = get_random_label()
image_data = args['imgBase64']
image_data = image_data[22:]
write_image(os.getcwd() + "\\Data\\Screenshots\\" + image_name, image_data)
write_on_csv(os.getcwd() + "\\Data\\image_list", [image_name, args['vis_id'], args['url'], args['date']])
return {'vis_id': args['vis_id'],
'url': args['url'],
'date': args['date'],
'imgBase64': args['imgBase64']
}
class AddURL(Resource):
@marshal_with(url_resource_fields)
def post(self):
args = url_post_args.parse_args()
print('POST: AddURL Called')
# print(args['iframe_url'])
# print(args['original_url'])
make_directory(os.getcwd() + "\\Data")
write_on_csv(os.getcwd() + "\\Data\\iframe_url", [args['iframe_url'], args['original_url']])
return {'iframe_url': args['iframe_url'],
'original_url': args['original_url']
}
class Deconstruct(Resource):
@marshal_with(decon_resource_fields)
def post(self):
print('POST: Deconstruct Called')
args = decon_post_args.parse_args()
deconString = args['decon']
deconJson = json.loads(deconString)
status = make_JSON(deconJson)
if status == "Success":
return {'decon': args['decon']}, 200
elif status == "Error":
return {'decon': args['decon']}, 403
#
# return Response("{'decon': args['decon']}", status=201, mimetype='application/json')
def find_json(url):
csv_file = csv.reader(open('recorded_data.csv', 'r'), delimiter=',')
for row in csv_file:
if url == row[3]:
print(row[4])
return row[4]
return False
class SearchHighchart(Resource):
@marshal_with(search_highchart_resource_fields)
def post(self):
print('POST: SearchHighchart Called')
args = search_highchart_post_args.parse_args()
out = find_json(args['url'])
if out is not False:
return {'json_no': out}, 200
else:
return {'json_no': out}, 403
class MultiLineLasso(Resource):
@marshal_with(multi_line_lasso)
def post(self):
print('POST: MultiLineLasso Called')
args = multi_line_lasso_post_args.parse_args()
xVals = args['xValues'].replace(' ', '_')
yVals = args['yValues'].replace(' ', '_')
lineVals = args['lineValues'].replace(' ', '_')
xLabel = args['xLabel'].replace(' ', '_')
yLabel = args['yLabel'].replace(' ', '_')
chartNumber = args['chartNumber']
print("chartNumber")
print(str(chartNumber))
xValsAr = xVals.split(",")
yValsAr = yVals.split(",")
lineValsAr = lineVals.split(",")
number_of_group = len(set(lineValsAr))
output_data = []
# print("str(len(xValsAr))")
# print(str(len(xValsAr)))
if len(xValsAr) == 1:
output_data = "You have selected only 1 point at " + xLabel + " " + str(
xValsAr[0]) + " where the " + yLabel.replace("_", " ") + " is " + str(yValsAr[0]) + ". "
else:
if number_of_group == 1:
print("SINGLE LINE CHART")
input_data = single_line_input(xLabel, xValsAr, yLabel, yValsAr)
output_data = summarize(data=input_data, all_y_label=yLabel, name="Partial", title="Partial",
partial=True)
if len(input_data) == 0:
return {'summary': "Input data could not be prepared"}, 400
output_data = output_data.replace(". ", ". +")
print("output_data")
print(output_data)
else:
# IT IS A MULTI LINE CHART
print("MULTI LINE CHART")
input_data = multi_line_input(chartNumber, xLabel, xValsAr, lineValsAr)
# print("input_data")
# print(input_data)
if len(input_data) == 0:
return {'summary': "Input data could not be prepared"}, 400
output_data = summarize(data=input_data, all_y_label=yLabel, name="Partial", title="Partial",
partial=True)
print("output_data")
print(output_data)
return {'summary': output_data}, 200
class MultiBarLasso(Resource):
@marshal_with(multi_bar_lasso)
def post(self):
global barVals
print('POST: MultiBarLasso Called')
args = multi_bar_lasso_post_args.parse_args()
xVals = args['xValues']
yVals = args['yValues']
if args['barValues'] is not None:
barVals = args['barValues']
xLabel = args['xLabel']
yLabel = args['yLabel']
chartNumber = args['chartNumber']
print("chartNumber")
print(str(chartNumber))
xValsAr = xVals.split(",")
yValsAr = yVals.split(",")
if args['barValues'] is not None:
barValsAr = barVals.split(",")
print("barValsAr")
print(barValsAr)
if barValsAr[0] == "":
print("TRUE")
number_of_group = len(set(barValsAr))
# print("number_of_group")
# print(number_of_group)
print("str(len(xValsAr))")
print(str(len(xValsAr)))
output_data = ""
if len(xValsAr) == 1:
output_data = "You have selected only 1 point at " + xLabel + " " + str(
xValsAr[0]) + " where the " + yLabel.replace("_", " ") + " is " + str(yValsAr[0]) + ". "
else:
if barValsAr[0] == "":
print("SINGLE BAR CHART")
input_data = single_bar_input(xLabel, xValsAr, yLabel, yValsAr)
print("input_data")
print(input_data)
if len(input_data) == 0:
return {'summary': "Input data could not be prepared"}, 400
output_data = summarize(data=input_data, all_y_label=yLabel.replace("_", " "), name="Partial",
title="Partial", partial=True)
output_data = output_data.replace(". ", ". +")
print("output_data")
print(output_data)
else:
# IT IS A MULTI LINE CHART
print("MULTI BAR CHART")
if len(set(xValsAr)) == 1:
print("REPRESENTING A SINGLE BAR CHART")
input_data = single_bar_input_from_mutli_bar_data(xLabel, xValsAr, yLabel, yValsAr, barValsAr)
print("input_data")
print(input_data)
if len(input_data) == 0:
return {'summary': "Input data could not be prepared"}, 400
output_data = "You have selected data points for " + xLabel + " " + xValsAr[0] + ". "
output_data += summarize(data=input_data, all_y_label=yLabel.replace("_", " "), name="Partial",
title="Partial", partial=True)
output_data = output_data.replace(". ", ". +")
# output_data = "You have selected bars from 1 group at " + xLabel + " " + str(
# xValsAr[0]) + ". Please select at least two groups' data points for the partial summary. "
print("output_data")
print(output_data)
else:
input_data = multi_bar_input(chartNumber, xLabel, xValsAr, barValsAr)
print("input_data")
print(input_data)
if len(input_data) == 0:
return {'summary': "Input data could not be prepared"}, 400
output_data = summarize(data=input_data, all_y_label=yLabel.replace("_", " "), name="Partial",
title="Partial", partial=True)
print("output_data")
print(output_data)
if output_data is not None:
return {'summary': output_data}, 200
else:
return {'summary': "Partial Summary could not be generated"}, 400
class BarBrush(Resource):
@marshal_with(bar_brush)
def post(self):
global output_data
print('POST: BarBrush Called')
args = bar_brush_post_args.parse_args()
chart = args['chart']
chartType = args['chartType']
barValues = args['barValues']
barValuesAr = barValues.split(",")
xLabel = args['xLabel']
yLabel = args['yLabel']
chartNumber = args['chartNumber']
if chartType == "single":
if chart == "bar":
if len(barValuesAr) == 1:
output_data = "You have selected only 1 point at " + xLabel + " " + str(
barValuesAr[0]) + ". Please select more points for a summary. "
else:
input_data = single_bar_input_brush(chartNumber, xLabel, yLabel, barValuesAr)
print("input_data")
print(input_data)
if len(input_data) == 0:
return {'summary': "Input data could not be prepared"}, 400
output_data = summarize(data=input_data, all_y_label=yLabel.replace("_", " "), name="Partial",
title="Partial", partial=True)
print("output_data")
print(output_data)
elif chart == "line":
if len(barValuesAr) == 1:
output_data = "You have selected only 1 point at " + xLabel + " " + str(
barValuesAr[0]) + ". Please select more points for a summary. "
else:
input_data = single_line_input_brush(chartNumber, xLabel, yLabel, barValuesAr)
print("input_data")
print(input_data)
if len(input_data) == 0:
return {'summary': "Input data could not be prepared"}, 400
output_data = summarize(data=input_data, all_y_label=yLabel.replace("_", " "), name="Partial",
title="Partial", partial=True)
print("output_data")
print(output_data)
elif chartType == "multi":
if chart == "bar":
groupNames = args['groupNames']
groupNamesAr = groupNames.split(",")
added_text = ""
if len(barValuesAr) == 1:
[input_data, added_text] = multi_bar_input_for_single_brush(chartNumber, xLabel, yLabel,
groupNamesAr, barValuesAr[0])
else:
input_data = multi_bar_input_brush(chartNumber, xLabel, groupNamesAr, barValuesAr)
if len(input_data) == 0:
return {'summary': "Input data could not be prepared"}, 400
output_data = summarize(data=input_data, all_y_label=yLabel.replace("_", " "), name="Partial",
title="Partial", partial=True)
print("output_data")
print(output_data)
if added_text is not None:
output_data = added_text + output_data
elif chart == "line":
if len(barValuesAr) == 1:
output_data = "You have selected only 1 point at " + xLabel + " " + str(
barValuesAr[0]) + ". Please select more points for a summary. "
else:
groupNames = args['groupNames']
groupNamesAr = groupNames.split(",")
print("groupNamesAr")
print(groupNamesAr)
print("barValuesAr")
print(barValuesAr)
input_data = multi_line_input_brush(chartNumber, xLabel, groupNamesAr, barValuesAr)
print("input_data multi line")
print(input_data)
if len(input_data) == 0:
return {'summary': "Input data could not be prepared"}, 400
output_data = summarize(data=input_data, all_y_label=yLabel.replace("_", " "), name="Partial",
title="Partial", partial=True)
print("output_data")
print(output_data)
if output_data is not None:
return {'summary': output_data}, 200
else:
return {'summary': "Partial Summary could not be generated"}, 400
class question_response(Resource):
@marshal_with(question_response_resource_fields)
def post(self):
args = question_response_post_args.parse_args()
time = str(datetime.now())
data = [args['pid'], args['task'], args['question'], args['answer'], args['time'], time]
# print(data)
with open('static/task/responses/question_response' + args['pid'] + '.csv', 'a', encoding='UTF8') as f:
writer = csv.writer(f)
writer.writerow(data)
global task_obj
task_obj = tasks.Tasks(args['pid'])
task_obj.set_logged_in_true()
task_obj.set_task_status(args['task'], "DONE")
task_obj.update_json()
return {'summary': args['task'] + ' done.'}, 200
class TaskReset(Resource):
@marshal_with(task_reset)
def post(self):
args = task_reset_post_args.parse_args()
data = {
'pid': args['pid'],
'task_name': args['task_name']
}
global task_obj
task_obj = tasks.Tasks(data['pid'])
task_obj.set_logged_in_true()
task_obj.set_task_status(data['task_name'], "NOT DONE")
task_obj.update_json()
res = data['task_name'] + " has been reset for user# " + data['pid']
print(res)
return {'summary': res}, 200
class task_timer(Resource):
@marshal_with(timer_resource_fields)
def post(self):
args = timer_post_args.parse_args()
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
# data = {
# 'pid': args['pid'],
# 'taken_time': args['taken_time'],
# 'time_stamp': dt_string
# }
pid = args['pid']
question_no = args['question_no']
answer = args['answer']
taken_time = args['taken_time']
# header = ['PID', 'Question#', 'Answer', 'Result', 'Time', 'Timestamp']
data = [args['pid'], args['question_no'], args['answer'], args['result'], args['taken_time'], dt_string]
#
#
# with open('static/task/responses/Timer_' + args['pid'] + '.json', 'w') as f:
# json.dump(data, f, indent=4)
with open('static/task/responses/Timer_' + args['pid'] + '.csv', 'a', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
# write the header
# writer.writerow(header)
# write the data
writer.writerow(data)
res = pid + "'s time has been updated with " + taken_time
print(res)
return {'summary': res}, 200
class key_counter(Resource):
@marshal_with(key_resource_fields)
def post(self):
args = key_post_args.parse_args()
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
# header = ['PID', 'Question#', 'Answer', 'Result', 'Time', 'Timestamp']
data = [args['pid'], args['chart_no'], args['key_presses'], dt_string]
with open('static/task/responses/key.csv', 'a', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(data)
# res = pid+ "'s time has been updated with " + taken_time
print(dt_string + " : Updated Key Press.")
return {'summary': "Updated"}, 200
class qna(Resource):
@marshal_with(qna_resource_fields)
def post(self):
args = qna_post_args.parse_args()
chart_id = args['chart']
question_text = args['question']
# f = open('static/generated_new_summary_baseline/' + chart_id + '.json')
# target_json = json.load(f)
answer = askMe(chart_id, question_text)
if answer is None or answer == "":
answer = "Sorry! SeeChart could not answer!"
return {'summary': answer}, 200
class search(Resource):
@marshal_with(search_resource_fields)
def post(self):
args = search_post_args.parse_args()
chart_id = args['chart']
search_val = args['search_val']
x_axis = args['x_axis']
y_axis = args['y_axis']
graphType = args['graphType']
columnType = args['columnType']
found = False
f = open('static/generated_new_summary_baseline/' + chart_id + '.json')
target_json = json.load(f)
# print(x_axis)
# print(y_axis)
result_str = ""
if columnType == "two":
for a in target_json["data"]:
if a[x_axis].lower() == search_val.lower():
found = True
print(a[x_axis])
# print(a[y_axis])
search_res = a[y_axis]
if graphType == "bar":
search_res = str(math.floor(int(search_res)))
# print(search_res)
result_str = "Value of " + x_axis + " " + search_val + " is, " + str(search_res) + ". "
break
else:
for a in target_json["data"]:
if a[x_axis].lower() == search_val.lower():
found = True
print("a[x_axis] -> " + a[x_axis])
result_str = "We have multiple values for " + x_axis + " " + search_val + ". These are, "
for i in a:
if i != x_axis:
print(i)
result_str += i + " is "
print(a[i])
result_str += str(a[i]) + ", "
# search_res = a[y_axis]
#
# if graphType == "bar":
# search_res = str(math.floor(int(search_res)))
#
# print(search_res)
break
if found is True:
return {'summary': result_str}, 200
else:
result_str = "Provided text " + search_val + " is not a valid x axis label. "
return {'summary': result_str}, 200
api.add_resource(Screenshot, "/getScreenshot")
api.add_resource(AddURL, "/addURL")
api.add_resource(Deconstruct, "/decon")
api.add_resource(SearchHighchart, "/high")
api.add_resource(CrawlImage, "/crawlImage")
api.add_resource(MultiLineLasso, "/multiLineLasso")
api.add_resource(MultiBarLasso, "/multiBarLasso")
api.add_resource(BarBrush, "/multiBarBrush")
api.add_resource(TaskReset, "/reset")
api.add_resource(question_response, "/response")
api.add_resource(task_timer, "/report")
api.add_resource(key_counter, "/key")
api.add_resource(search, "/search")
api.add_resource(qna, "/qna")
# UNCOMMENT THESE TWO LINES TO RUN LOCALLY
if __name__ == "__main__":
app.run(host='192.168.0.106', port='8080', debug=True, ssl_context=context, threaded=True)
# app.run(host='127.0.0.1', port='8080', debug=True, ssl_context=context, threaded=True)
# if __name__ == '__main__':
# # Threaded option to enable multiple instances for multiple user access support
# app.run(threaded=True, port=5000)
| 54,982 | 34.726446 | 116 | py |
SeeChart | SeeChart-main/qna.py | # !pip install transformers
# !pip install datasets
# !pip install nltk
import json
import math
import os
import sys
import nltk # Here to have a nice missing dependency error message early on
import transformers
from filelock import FileLock
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers.file_utils import is_offline_mode
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.11.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
def postprocess_text(preds):
preds = [pred.strip() for pred in preds]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
return preds
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
model_name = "t5-base"
# checkpoint_path = "/content/t5_best_checkpoint_plotqa" # OLD
checkpoint_path = "t5_best_checkpoint_plotqa/checkpoint-560000/"
config = AutoConfig.from_pretrained(
checkpoint_path,
cache_dir="cache",
revision="main",
use_auth_token=None,
)
tokenizer = AutoTokenizer.from_pretrained(
checkpoint_path,
cache_dir="cache",
use_fast=True,
revision="main",
use_auth_token=None,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
checkpoint_path,
config=config,
cache_dir="cache",
revision="main",
use_auth_token=None,
)
model.resize_token_embeddings(len(tokenizer))
# input_text = "Question: What does the 2nd bar from the top in Primary schools represents ? Table: Schools | Pre-primary schools | Primary schools | Secondary schools | Tertiary schools & Egypt Gross enrolment ratio (%) | 100.05 | 99.54 | 84.65 | 23.86 & Luxembourg Gross enrolment ratio (%) | 92.75 | 88.51 | 71.8 | 2.05"
# input_text = "Question: How many bars are there ? Table: Country | Lebanon | Mali | Nepal | Peru & Female % of children under 5 | 1.3 | 11.8 | 3.7 | 0.7 & Male % of children under 5 | 1.8 | 13.9 | 4.5 | 0.8 Chart Type: hbar_categorical Title: Prevalence of severe wasting in children of different countries with age under 5 years x_axis_title: % of children under 5 y_axis_title: Country"
def predict_answer(tokenizer, model, input_text):
model_inputs = tokenizer(input_text, return_tensors="pt")
preds = model.generate(**model_inputs)
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = postprocess_text(decoded_preds)
return decoded_preds[0]
def askMe(chart_id, question):
f = open('static/generated_new_summary_baseline/' + chart_id + '.json')
target_json = json.load(f)
title = target_json['title']
xAxis = target_json['xAxis']
yAxis = target_json['yAxis']
column_type = target_json['columnType']
graphType = target_json['graphType']
if column_type == "two" and graphType in ['bar', 'line']:
str1 = xAxis.strip()
str2 = yAxis.strip()
for i in target_json['data']:
str1 += " | " + str(i[xAxis]).strip()
str2 += " | " + str(i[yAxis]).strip()
# print(str1)
# print(str2)
input_text = "Question: " + question + "? Table: " + str1 + " & " + str2 + " Title: " + title + " x_axis_title: " + xAxis + " y_axis_title: " + yAxis
print(input_text)
answer = predict_answer(tokenizer, model, input_text)
print(answer)
return answer
elif column_type == "multi":
str1 = xAxis.strip()
str2 = yAxis.strip()
group = []
for i in target_json['data']:
str1 += " | " + str(i[xAxis]).strip()
for i in range(1, len(target_json['labels'])):
group.append(target_json['labels'][i])
group_str = ""
for i in group:
group_str += " & " + i
for j in target_json['data']:
group_str += " | " + j[i]
input_text = "Question: " + question + "? Table: " + str1 + group_str + " Title: " + title + " x_axis_title: " + xAxis + " y_axis_title: " + yAxis
print(input_text)
print(question)
answer = predict_answer(tokenizer, model, input_text)
print(answer)
# if answer.is_integer():
# answer = math.ceil(answer)
return answer
# QUESTION EXAMPLE : https://arxiv.org/pdf/1909.00997.pdf
question = "Does the Time in minutes increase over the years for Desktop"
# question = "Across all Years, what is the maximum value"
# question = "Across all years, what is the minimum value"
# question = "What is the difference between 2006 and 2007"
# question = "Does the graph contain any zero values"
# question = "Does the graph contain grids"
# question = "How many legend labels are there"
# question = "How many years are there" # WRONG
# question = "How many lines intersect with each other?"
# question = "How many lines are there"
# question = "What is the maximum value for desktop"
# chart_id = "1092"
# chart_id = "795"
# chart_id = "818"
chart_id = "545"
# askMe(chart_id=chart_id, question=question)
| 5,738 | 30.707182 | 393 | py |
SeeChart | SeeChart-main/tasks.py | import json
import datetime
import os.path
class Tasks(object):
data = {
'last_updated': None,
'pid': None,
'logged_in': None
}
fileName: str
def __init__(self, pid):
self.pid = pid
self.fileName = 'data_' + pid
if os.path.isfile('static/task/' + self.fileName + '.json'):
print(self.fileName + '.json already exists!')
f = open('static/task/' + self.fileName + '.json')
found_data = json.load(f)
self.data = found_data
self.update_task_list()
else:
f = open('static/task/checklist.json')
checklist = json.load(f)
for a in checklist:
self.data[checklist[a]] = "NOT DONE"
# print(checklist)
self.data['pid'] = str(pid)
self.data['last_updated'] = str(datetime.datetime.now())
self.set_logged_in_true()
self.update_json()
def set_task_status(self, task_name, status):
if task_name in self.data:
self.data[task_name] = status
self.data['last_updated'] = str(datetime.datetime.now())
self.update_json()
else:
raise Exception("TASK NAME WAS NOT FOUND IN " + self.fileName + ".json!")
def set_logged_in_true(self):
if 'logged_in' in self.data:
self.data['logged_in'] = 'true'
self.update_json()
else:
raise Exception("TASK NAME WAS NOT FOUND IN " + self.fileName + ".json!")
def set_logged_in_false(self):
if 'logged_in' in self.data:
self.data['logged_in'] = 'false'
self.update_json()
else:
raise Exception("TASK NAME WAS NOT FOUND IN " + self.fileName + ".json!")
def get_logged_in_status(self):
if 'logged_in' in self.data:
return self.data['logged_in']
else:
raise Exception("TASK NAME WAS NOT FOUND IN " + self.fileName + ".json!")
def get_task_status(self, task_name):
if task_name in self.data:
return self.data[task_name]
else:
raise Exception("TASK NAME WAS NOT FOUND IN " + self.fileName + ".json!")
def get_all_task_status_info(self):
return self.data
def update_json(self):
with open('static/task/' + self.fileName + '.json', 'w') as f:
json.dump(self.data, f, indent=4)
print(self.fileName + ".json updated")
def update_task_list(self):
f = open('static/task/checklist.json')
checklist = json.load(f)
changed = 0
for a in checklist:
if checklist[a] not in self.data:
changed = 1
self.data[checklist[a]] = "NOT DONE"
to_be_popped = []
for a in self.data:
if a == "last_updated" or a == "pid" or a == "logged_in":
pass
elif a not in checklist.values():
print('THIS DID NOT MATCH: ')
print(a)
to_be_popped.append(a)
changed = 1
for a in to_be_popped:
self.data.pop(a, None)
if changed == 1:
self.update_json()
print("Updated!")
return True
else:
print("No changes found in checklist.json!")
return False
def reset_data(self):
for a in self.data:
if a == "last_updated" or a == "pid":
pass
else:
self.data[a] = "NOT DONE"
self.update_json()
# t = tasks('123')
# d = t.get_all_task_status_info()
#
# print(json.dumps(d, indent=4))
#
# t.set_task_status('TASK C', 'DONE')
#
# if t.update_task_list():
# d = t.get_all_task_status_info()
# print(json.dumps(d, indent=4))
#
# t.reset_data() | 3,849 | 27.947368 | 85 | py |
SeeChart | SeeChart-main/BaselineSummarizer.py | import \
json # Serialization: process of encoding data into JSON format (like converting a Python list to JSON). Deserialization: process of decoding JSON data back into native objects you can work with (like reading JSON data into a Python list)
import math # To use mathematical functions
import \
re # Regular Expression, The functions in this module let you check if a particular string matches a given regular expression
import random # random number generation. random() function, generates random numbers between 0 and 1.
from random import randint # randint() is an inbuilt function of the random module in Python3
from statistics import mean, median, \
stdev # mean() function can be used to calculate mean/average of a given list of numbers.
from operator import \
itemgetter # operator is a built-in module providing a set of convenient operators #operator. itemgetter(n) assumes an iterable object (e.g. list, tuple, set) as input, and fetches the n-th element out of it. If multiple items are specified, returns a tuple of lookup values.
from scipy.stats import \
linregress # Calculate a linear least-squares regression for two sets of measurements. Parameters x, yarray_like.
from sklearn import \
preprocessing # The sklearn. preprocessing package provides several functions that transform your data before feeding it to the algorithm.
import \
pandas as pd # presents a diverse range of utilities, ranging from parsing multiple file formats to converting an entire data table into a NumPy matrix array.
import \
numpy as np # NumPy is a general-purpose array-processing package. It provides a high-performance multidimensional array object, and tools for working with these arrays.
dataPath = 'Data/test/testData.txt'
titlePath = 'Data/test/testTitle.txt'
yLabelPath = 'Data/test/all_Y_labels.txt'
# websitePath = 'results/generated_baseline'
websitePath = 'static/generated' # Folder where the json file is created as the final output
# websitePath = '../TourDeChart/generated'
summaryList = []
def globalTrendBarChart(yValueArr):
reversed_yValueArr = yValueArr[::-1] # reversing
globalDifference = float(reversed_yValueArr[0]) - float(reversed_yValueArr[len(reversed_yValueArr) - 1])
if reversed_yValueArr[len(reversed_yValueArr) - 1] == 0:
reversed_yValueArr[len(reversed_yValueArr) - 1] = 1
globalPercentChange = (globalDifference / float(reversed_yValueArr[len(reversed_yValueArr) - 1])) * 100
bar_trend = ""
up_trend = ["increased", "grew", "climbed", "risen"]
down_trend = ["decreased", "declined", "reduced", "lowered"]
constant_trend = ["stable", "constant", "unchanged", "unvaried"]
if globalPercentChange > 0:
bar_trend = up_trend[random.randint(0, len(up_trend) - 1)]
elif globalPercentChange < 0:
bar_trend = down_trend[random.randint(0, len(down_trend) - 1)]
else:
bar_trend = "remained " + constant_trend[random.randint(0, len(constant_trend) - 1)]
return bar_trend
def match_trend(trend1, trend2):
if trend1 in ["increased", "grew", "climbed", "risen"] and trend2 in ["increased", "grew", "climbed", "risen"]:
return 1
elif trend1 in ["decreased", "declined", "reduced", "lowered"] and trend2 in ["decreased", "declined", "reduced",
"lowered"]:
return 1
elif trend1 in ["stable", "constant", "unchanged", "unvaried"] and trend2 in ["stable", "constant", "unchanged",
"unvaried"]:
return 1
else:
return 0
def checkIfDuplicates(listOfElems):
# Check if given list contains any duplicates
setOfElems = set()
for elem in listOfElems:
if elem in setOfElems:
return True
else:
setOfElems.add(elem)
return False
def most_frequent(List):
# to find most frequent
counter = 0
num = List[0]
for i in List:
curr_frequency = List.count(i)
if (curr_frequency > counter):
counter = curr_frequency
num = i
return num
def getChartType(x):
if x.lower() == 'year':
return 'line_chart'
else:
return 'bar_chart'
def openCaption(captionPath):
with open(captionPath, 'r', encoding='utf-8') as captionFile:
caption = captionFile.read()
return caption
def openData(dataPath):
df = pd.read_csv(dataPath)
cols = df.columns
size = df.shape[0]
xAxis = cols[0]
yAxis = cols[1]
chartType = getChartType(xAxis)
return df, cols, size, xAxis, yAxis, chartType
def cleanAxisLabel(label):
cleanLabel = re.sub('\s', '_', label)
cleanLabel = cleanLabel.replace('%', '').replace('*', '')
return cleanLabel
def cleanAxisValue(value):
# print(value)
if value == '-' or value == 'nan':
return '0'
cleanValue = re.sub('\s', '_', value)
cleanValue = cleanValue.replace('|', '').replace(',', '').replace('%', '').replace('*', '')
return cleanValue
def getMagnitude(normalizedSlope):
magnitude = "slightly"
# print(normalizedSlope)
if (abs(normalizedSlope) > 0.75):
magnitude = "extremely"
elif (abs(normalizedSlope) > 0.25 and abs(normalizedSlope) <= 0.75):
magnitude = "moderately"
else:
mangitude = "slightly"
return magnitude
## shehnaz-- The functions created by me
# Initilizing constant values for the fucntions below
# mean_percentArray= 0
# sd_percentArray= 0
# constant_rate = 3.45# avg(% chnage)*0.1 # Meaning any chnage less than 5% is considered roughly constant slope # Determines if a trend is increasing, decreasing or constant
# significant_rate = 6.906 # avg(% chnage)*0.1 # Meaning any chnage >constant rate and less than this rate is considered not significant and so it's trend direction is chnaged to the trend of the succesive interval # Determines the start and end of the trend
# rapidly_rate= 57.55
# gradually_rate= 28.77
# constant_rate = mean_percentArray- 1*(sd_percentArray) # avg(% chnage)*0.1 # Meaning any chnage less than 5% is considered roughly constant slope # Determines if a trend is increasing, decreasing or constant
# significant_rate = mean_percentArray# avg(% chnage)*0.1 # Meaning any chnage >constant rate and less than this rate is considered not significant and so it's trend direction is chnaged to the trend of the succesive interval # Determines the start and end of the trend
# gradually_rate= mean_percentArray+ 1*(sd_percentArray)
# rapidly_rate= mean_percentArray+ 2*(sd_percentArray)
# meanRefinedSlope= 0
# sdRefinedSlope= 0
# constant_rate = 20# avg(% chnage)*0.1 # Meaning any chnage less than 5% is considered roughly constant slope # Determines if a trend is increasing, decreasing or constant
# significant_rate= 40 # avg(% chnage)*0.1 # Meaning any chnage >constant rate and less than this rate is considered not significant and so it's trend direction is chnaged to the trend of the succesive interval # Determines the start and end of the trend
# gradually_rate= 50
# rapidly_rate= 70
# These rate stay constant
constant = 5
sig = 10
gradual = 20
rapid = 70
## These rate chnages dynamically with c_rate and mean(percentChnageArr)
constant_rate = constant
significant_rate = 0
gradually_rate = gradual
rapidly_rate = rapid
c_rate = 0.6 # 0.6 avg(% chnage)*0.1 # Meaning any chnage less than 5% is considered roughly constant slope # Determines if a trend is increasing, decreasing or constant
s_rate = 1.2 # 1.2
g_rate = 2 # 2
r_rate = 3 # 3
zigZagNum = 30 # The number of y values there needs for chart to be considered zig zag
def directionTrend(new, old, constant_rate):
difference = new - old
if (old != 0):
percentageChange = ((new - old) / old) * 100
else:
old = 0.00000000001
percentageChange = ((new - old) / old) * 100
absChnage = abs(percentageChange)
if (difference > 0 and absChnage > constant_rate): # if change is significant >5%
return "increasing"
elif (difference < 0 and absChnage > constant_rate):
return "decreasing"
else:
return "constant"
def rateOfChnage(refinedPercentChnageArr, direction, c, g, r):
# new_x= float(new_x)
# old_x= float(old_x)
# percentageChange = ((new_y - old_y) / new_x-old_x)
# # min_val= 0
# # max_val= 100
# if (max_val-min_val != 0):
# normalized_percentChange= (100*(percentageChange- min_val))/(max_val-min_val)
# else:
# normalized_percentChange= (100*(percentageChange- min_val))/0.00000000001
constant_rate = c
gradually_rate = g
rapidly_rate = r
absChnage = abs(refinedPercentChnageArr)
if (direction == "constant"):
return "roughly"
elif (absChnage > rapidly_rate):
return "rapidly"
elif (absChnage > gradually_rate):
return "gradually"
elif (absChnage > constant_rate):
return "slightly"
else:
return "roughly"
def globalDirectionTrend(percent, constant_rate):
absChnage = abs(percent)
if (percent > 0 and absChnage > constant_rate): # if change is significant >5%
return "increasing"
elif (percent < 0 and absChnage > constant_rate):
return "decreasing"
else:
return "constant"
def globalRateOfChange(percentChange, c, g, r):
# new_x= float(new_x)
# old_x= float(old_x)
# percentageChange = ((new_y - old_y) / new_x-old_x)
# # min_val= 0
# # max_val= 100
# if (max_val-min_val != 0):
# normalized_percentChange= (100*(percentageChange- min_val))/(max_val-min_val)
# else:
# normalized_percentChange= (100*(percentageChange- min_val))/0.00000000001
constant_rate = c
gradually_rate = g
rapidly_rate = r
absChnage = abs(percentChange)
if (absChnage > rapidly_rate):
return "rapidly"
elif (absChnage > gradually_rate):
return "gradually"
elif (absChnage > constant_rate):
return "slightly"
def percentChnageFunc(new, old):
difference = new - old
if (old != 0):
percentageChange = ((new - old) / old) * 100
else:
old = 0.00000000001
percentageChange = ((new - old) / old) * 100
return percentageChange
def percentChnageRangeFunc(new, old, maximum):
difference = new - old
if (old != 0):
percentageChange = ((new - old) / (maximum - 0)) * 100
else:
old = 0.00000000001
percentageChange = ((new - old) / (maximum - 0)) * 100
return percentageChange
def increaseDecrease(x):
if (x == "increasing"):
return "increase"
elif (x == "decreasing"):
return "decrease"
else:
return "stays the same"
def increasedDecreased(x):
if (x == "increasing"):
return "increased"
elif (x == "decreasing"):
return "decreased"
else:
return "remained stable"
def get_indexes(l, val):
return l.tolist().index(val)
def get_indexes_max_value(l):
max_value = max(l) # key=lambda x:float(x))
return [i for i, x in enumerate(l) if x == max(l)]
def get_indexes_min_value(l):
min_value = min(l) # key=lambda x:float(x))
return [i for i, x in enumerate(l) if x == min(l)]
def stringToFloat(str):
list = []
for i in str:
extractNums = re.findall(r"[-+]?\d*\.\d+|\d+", i)
num = extractNums[0]
list.append(num)
return list
def floatToStr(x):
for i in range(0, len(x)):
x[i] = str(x[i])
return x
def commaAnd(arr):
if (len(arr) < 2):
arr = arr[0]
else:
slice1 = arr[:len(arr) - 1]
# print(slice1)
slice2 = ", ".join(slice1)
slice2 += ", and " + arr[-1]
# print(slice2)
arr = slice2
return arr
# scaler = preprocessing.MinMaxScaler()
count = 0
# with open(dataPath, 'r', encoding='utf-8') as dataFile, \
# open(titlePath, 'r', encoding='utf-8') as titleFile:
#
# fileIterators = zip(dataFile.readlines(), titleFile.readlines())
# for data, title in fileIterators:
def summarize(data, all_y_label, name, title, partial=None):
# scaler = preprocessing.MinMaxScaler()
# count += 1
datum = data.split() # Splits data where space is found. So datum[0] is groups of data with no space. e.g. Country|Singapore|x|bar_chart `
# check if data is multi column
columnType = datum[0].split('|')[
2].isnumeric() # e.g. Country|Singapore|x|bar_chart, ... x means single, numeric means multiline
# print("Column Type -> " + str(columnType) + " this is -> " + str(datum[0].split('|')[2]))
# fp = open("all_Y_labels.txt", "a")
if columnType: # If MULTI
# fp.write(str(name) + "\t\n")
# fp.close()
y_label = all_y_label
labelArr = []
chartType = datum[0].split('|')[3].split('_')[0]
values = [value.split('|')[1] for value in datum] # for every datum take the 2nd element
# find number of columns:
columnCount = max([int(data.split('|')[2]) for data in
datum]) + 1 # The number of categories #for every datum take the 3rd element
# Get labels
for i in range(columnCount):
label = datum[i].split('|')[0].split('_')
labelArr.append(
label) # e.g. "Year|2018|0|line_chart Export|55968.7|1|line_chart Import|108775.3|2|line_chart Year|2017|0|line_chart ==> [['Year'], ['Export'], ['Import']]
# print(labelArr)
stringLabels = [' '.join(label) for label in labelArr] # e.g. stringLabels = ['Year', 'Export', 'Import']
# Get values
valueArr = [[] for i in range(columnCount)]
cleanValArr = [[] for i in range(columnCount)]
# print("columnCount -> " + str(columnCount))
# columnCount : how many grouped bars
# stringLabels : label of X-axis and the individual groups
groupedLabels = []
for i in range(len(stringLabels)):
groupedLabels.append(str(stringLabels[i]).replace('_', ' '))
# print("groupedLabels")
# for a in groupedLabels:
# print(a)
a = 0
b = 0
groupedCol = int(len(values) / len(stringLabels))
row = groupedCol
col = columnCount
arr = np.empty((row, col),
dtype=object) # creates a martic with rows representing each distinct x value and cols representing y values for different categories/lines (2 in this case)
# arr[0, 0] = stringLabels[0]
m = 0
n = 0
for b in range(len(values)):
if n == col:
m += 1
n = 0
if a == len(stringLabels):
a = 0
if (b % columnCount) == 0:
arr[m][b % columnCount] = str(values[b]).replace('_', ' ')
else:
num = ""
for c in values[b]: # Done for error: could not convert string to float: '290$'
if c.isdigit():
num = num + c
arr[m][b % columnCount] = float(num)
n += 1
a += 1
max_row = []
max_row_val = []
min_row = []
min_row_val = []
number_of_group = len(groupedLabels) - 1
for i in range(len(groupedLabels) - 1):
arr1 = arr[arr[:, (i + 1)].argsort()]
min_row.append(arr1[0][0])
min_row_val.append(arr1[0][i + 1])
arr2 = arr[arr[:, (i + 1)].argsort()[::-1]]
max_row.append(arr2[0][0])
max_row_val.append(arr2[0][i + 1])
# print(max_row) # x values at which max occured for each category (e.g. ['2013', '2018'] ==> Export max occured at 2013 and Import at 2018)
# print(max_row_val) # y values at which max occured for each category (e.g. [91886.1, 108775.3] ==> Export max occured at 91886.1 and Import at 108775.3)
# print(min_row)
# print(min_row_val)
global_max = max(max_row_val)
global_max_index = get_indexes_max_value(max_row_val)
global_max_category_label = groupedLabels[global_max_index[0] + 1]
global_max_category_xlabel = str(max_row[global_max_index[0]])
if len(groupedLabels) > 3:
global_2nd_max = sorted(max_row_val)[1]
global_2nd_max_index = get_indexes_max_value(max_row_val)
global_2nd_max_category_label = groupedLabels[global_2nd_max_index[0] + 1]
global_2nd_max_category_xlabel = str(max_row[global_2nd_max_index[0]])
global_min = min(min_row_val)
global_min_index = get_indexes_min_value(min_row_val)
global_min_category_label = groupedLabels[global_min_index[0] + 1]
global_min_category_xlabel = str(min_row[global_min_index[0]])
rowCount = round(
len(datum) / columnCount) # same as groupedCols or row, with rows representing each distinct x value
categoricalValueArr = [[] for i in range(rowCount)]
i = 0
for n in range(rowCount):
for m in range(columnCount):
value = values[i]
cleanVal = datum[i].split('|')[1].replace('_', ' ')
valueArr[m].append(value)
cleanValArr[m].append(cleanVal)
if m == 0:
categoricalValueArr[n].append(cleanVal)
else:
categoricalValueArr[n].append(float(re.sub("[^\d\.]", "", cleanVal)))
i += 1
titleArr = title.split()
# calculate top two largest categories
summaryArray = []
dataJson = []
# iterate over index of a value
for i in range(len(cleanValArr[0])):
# iterate over each value
dico = {}
for value, label in zip(cleanValArr, labelArr):
cleanLabel = ' '.join(label)
dico[cleanLabel] = value[i]
dataJson.append(dico)
# HERE
# print(json.dumps(dataJson, indent=4, sort_keys=True))
if (chartType == "bar"):
meanCategoricalDict = {}
stringLabels.insert(len(stringLabels) - 1, 'and')
categories = ', '.join(stringLabels[1:-1]) + f' {stringLabels[-1]}'
# if rowCount > 2:
for category in categoricalValueArr:
meanCategoricalDict[category[0]] = mean(category[1:])
sortedCategories = sorted(meanCategoricalDict.items(), key=lambda x: x[1])
# print("sortedCategories")
# print(sortedCategories)
numerator = abs(sortedCategories[-1][1] - sortedCategories[-2][1])
denominator = (sortedCategories[-1][1] + sortedCategories[-2][1]) / 2
topTwoDelta = round((numerator / denominator) * 100, 1)
numerator1 = abs(sortedCategories[-1][1] - sortedCategories[0][1])
denominator1 = (sortedCategories[-1][1] + sortedCategories[0][1]) / 2
minMaxDelta = round((numerator1 / denominator1) * 100, 1)
group_names = groupedLabels[1:]
group_names_text = ""
for a in range(len(group_names)):
if a == len(group_names) - 1:
group_names_text += "and " + group_names[a]
else:
group_names_text += group_names[a] + ", "
rand_category_index = random.randint(0, number_of_group - 1)
global_max_min_categorical = []
global_max_min_categorical.append(
" For " + str(groupedLabels[0]) + " " + str(max_row[rand_category_index]) + ", " + str(
groupedLabels[rand_category_index + 1]) + " had the highest " + y_label + " among all " + str(
rowCount) + " " + str(groupedLabels[0]) + "s and it has the lowest " + y_label + " in " + str(
groupedLabels[0]) + " " + str(min_row[rand_category_index]) + ". ")
global_max_min_categorical.append(
" For " + str(groupedLabels[0]) + " " + str(max_row[rand_category_index]) + ", " + str(groupedLabels[
rand_category_index + 1]) + " had the maximum " + y_label + " and it saw the lowest in " + str(
groupedLabels[0]) + " " + str(min_row[rand_category_index]) + " out of all " + str(
rowCount) + " " + str(groupedLabels[0]) + "s. ")
global_max_min_categorical.append(" Among all the " + str(groupedLabels[0]) + "s, " + str(
groupedLabels[rand_category_index + 1]) + " had the highest " + y_label + " in " + str(
groupedLabels[0]) + " " + str(max_row[rand_category_index]) + " and lowest " + y_label + " in " + str(
groupedLabels[0]) + " " + str(min_row[rand_category_index]) + ". ")
global_max_min_categorical.append(" Among all the " + str(groupedLabels[0]) + "s, " + str(
groupedLabels[rand_category_index + 1]) + " had the highest " + y_label + " " + str(
max_row_val[rand_category_index]) + " in " + str(groupedLabels[0]) + " " + str(
max_row[rand_category_index]) + " and lowest value " + str(
min_row_val[rand_category_index]) + " in " + str(groupedLabels[0]) + " " + str(
min_row[rand_category_index]) + ". ")
extrema_categorical = global_max_min_categorical[random.randint(0, len(global_max_min_categorical) - 1)]
print("Extrema [min/max][categorical] : " + global_max_min_categorical[
random.randint(0, len(global_max_min_categorical) - 1)])
trend_global = None
if groupedLabels[0].lower() == "year" or groupedLabels[0].lower() == "years" or groupedLabels[
0].lower() == "month" or groupedLabels[0].lower() == "months" or groupedLabels[
0].lower() == "quarter" or groupedLabels[0].lower() == "quarters":
category_trend = []
for a in range(1, len(arr[0])):
# print(arr[:, a])
category_trend.append(globalTrendBarChart(arr[:, a]))
# print(category_trend)
categorical_global_trend = []
if match_trend(category_trend[rand_category_index], category_trend[rand_category_index - 1]):
categorical_global_trend.append(" Over the " + str(rowCount) + " " + groupedLabels[
0] + "s, the " + y_label + " for both " + str(
groupedLabels[rand_category_index + 1]) + " and " + str(
groupedLabels[rand_category_index]) + " have " + category_trend[rand_category_index] + ". ")
categorical_global_trend.append(
" All through the " + groupedLabels[0] + "s, similar trend was observed for " + str(
groupedLabels[rand_category_index + 1]) + " and " + str(
groupedLabels[rand_category_index]) + ". In both cases, the " + y_label + " have " +
category_trend[rand_category_index] + ". ")
else:
categorical_global_trend.append(
" Over the " + str(rowCount) + " " + groupedLabels[0] + "s, the " + y_label + " for " + str(
groupedLabels[rand_category_index + 1]) + " have been " + category_trend[
rand_category_index] + " whereas " + category_trend[
rand_category_index - 1] + " for " + str(groupedLabels[rand_category_index]) + ". ")
categorical_global_trend.append(
" All through the " + groupedLabels[0] + "s, the " + y_label + " for " + str(
groupedLabels[rand_category_index + 1]) + " have " + category_trend[
rand_category_index] + ". On the other hand, for " + str(
groupedLabels[rand_category_index]) + " the " + y_label + " have " + category_trend[
rand_category_index - 1] + ". ")
trend_global = categorical_global_trend[random.randint(0, len(categorical_global_trend) - 1)]
print("Trend [global] : " + categorical_global_trend[
random.randint(0, len(categorical_global_trend) - 1)])
# sorted_max_row = sorted(max_row_val)
# print("sorted_max_row")
# print(sorted_max_row)
max_gap_abs = 0
max_gap_rel = 0
max_gap_index = 0
for i in range(number_of_group - 1):
if max_row_val[i] - min_row_val[i] > max_gap_abs:
max_gap_abs = max_row_val[i] - min_row_val[i]
if min_row_val[i] == 0:
min_row_val[i] = 1
max_gap_rel = round((max_row_val[i] / min_row_val[i]), 2)
max_gap_index = i
max_diff_all_cat = []
max_diff_all_cat.append(" Out of all " + str(
number_of_group) + " groups, the highest gap between the maximum and minimum " + y_label + " was found in case of " + str(
groupedLabels[max_gap_index + 1]) + ". ")
max_diff_all_cat.append(" Among the groups, " + str(groupedLabels[
max_gap_index + 1]) + " had the biggest difference in " + y_label + ". Where the maximum " + y_label + " was " + str(
max_gap_rel) + " times larger than the minimum " + y_label + ". ")
max_diff_all_cat.append(" Among all " + str(number_of_group) + " groups, " + str(
groupedLabels[max_gap_index + 1]) + " had the gap of " + str(
max_gap_abs) + " between the maximum and minimum " + y_label + " observed in " + str(
groupedLabels[0]) + " " + max_row[max_gap_index] + " and " + min_row[max_gap_index] + " respectively. ")
extrema_max_diff_in_cat = max_diff_all_cat[random.randint(0, len(max_diff_all_cat) - 1)]
print("Extrema [difference in a category] : " + max_diff_all_cat[
random.randint(0, len(max_diff_all_cat) - 1)])
max_min_difference_abs = max_row_val[rand_category_index] - min_row_val[rand_category_index]
if min_row_val[rand_category_index] != 0:
max_min_difference_rel = round((max_row_val[rand_category_index] / min_row_val[rand_category_index]), 2)
else:
max_min_difference_rel = 0
diff_in_category = []
diff_in_category.append(" The maximum " + y_label + " for " + str(
groupedLabels[rand_category_index + 1]) + " that was found in " + str(groupedLabels[0]) + " " + str(
max_row[rand_category_index]) + " was " + str(
max_min_difference_rel) + " times larger than the minimum " + y_label + " observed in " + str(
groupedLabels[0]) + " " + str(min_row[rand_category_index]) + ". ")
diff_in_category.append(" There is a gap of " + str(
max_min_difference_abs) + " between the highest and lowest " + y_label + " found for " + str(
groupedLabels[rand_category_index + 1]) + " in " + str(groupedLabels[0]) + " " + str(
max_row[rand_category_index]) + " and " + str(min_row[rand_category_index]) + ". ")
diff_in_category.append(str(groupedLabels[0]) + " " + str(max_row[rand_category_index]) + " and " + str(
min_row[rand_category_index]) + " had the biggest gap of " + str(
max_min_difference_abs) + " between the highest and lowest " + y_label + " found for " + str(
groupedLabels[rand_category_index + 1]) + ". ")
comparison_categorical = diff_in_category[random.randint(0, len(diff_in_category) - 1)]
print("Comparison [categorical] : " + diff_in_category[random.randint(0, len(diff_in_category) - 1)])
average_stat = []
average_stat.append("On average, the " + str(groupedLabels[0]) + " " + sortedCategories[-1][
0] + " had the highest " + y_label + " for all " + str(
number_of_group) + " groups " + group_names_text + ". Whereas " + sortedCategories[0][
0] + " had the lowest average " + y_label + ". ")
average_stat.append(
"Averaging all " + str(number_of_group) + " groups " + group_names_text + ", the " + str(
groupedLabels[0]) + " " + sortedCategories[-1][0] + " is the maximum " + y_label + " and " +
sortedCategories[0][0] + " is the minimum " + y_label + ". ")
compute_der_val_avg = average_stat[random.randint(0, len(average_stat) - 1)]
print("Compute derived val [avg] : " + average_stat[random.randint(0, len(average_stat) - 1)])
global_extrema = []
global_extrema.append(
" For " + str(groupedLabels[0]) + " " + str(global_max_category_xlabel) + ", " + str(
global_max_category_label) + " had the highest " + y_label + " " + str(
global_max) + " among the " + str(
number_of_group) + " groups and in " + str(global_min_category_xlabel) + ", " + str(
global_min_category_label) + " had the lowest " + y_label + " " + str(global_min) + ". ")
global_extrema.append(" Out of all " + str(number_of_group) + " groups, " + str(
global_max_category_label) + " had the highest " + y_label + " for " + str(
groupedLabels[0]) + " " + str(
global_max_category_xlabel) + " and " + str(
global_min_category_label) + " had the lowest " + y_label + " for " + str(groupedLabels[0]) + " " + str(
global_min_category_xlabel) + ". ")
global_extrema.append(" " + str(groupedLabels[0]) + " " + str(
global_max_category_xlabel) + " had the maximum " + y_label + " among all " + str(
number_of_group) + " groups, and it was for " + str(
global_max_category_label) + ". The minimum " + y_label + " was observed in " + str(
groupedLabels[0]) + " " + str(global_min_category_xlabel) + " for " + str(
global_min_category_label) + ". ")
extrema_global = global_extrema[random.randint(0, len(global_extrema) - 1)]
print("Extrema [global] : " + global_extrema[random.randint(0, len(global_extrema) - 1)])
order_global = []
if len(groupedLabels) > 3:
order_global.append(
" In case of " + str(groupedLabels[0]) + " " + str(global_max_category_xlabel) + ", " + str(
global_max_category_label) + " had the highest " + y_label + " " + str(
global_max) + " among the " + str(number_of_group) + " groups and in " + str(
global_min_category_xlabel) + ", " + str(
global_min_category_label) + " had the lowest " + y_label + " " + str(
global_min) + ". The second highest " + y_label + " " + str(
global_2nd_max) + " was observed for " + str(global_2nd_max_category_label) + " in " + str(
groupedLabels[0]) + " " + str(global_2nd_max_category_xlabel) + ". ")
order_global.append(
" " + str(global_max_category_label) + " had the maximum " + y_label + " out of all " + str(
number_of_group) + " groups in " + str(groupedLabels[0]) + " " + str(
global_max_category_xlabel) + " followed by " + str(
global_2nd_max_category_label) + " in " + str(
global_2nd_max_category_xlabel) + ", and the minimum " + y_label + " is found for " + str(
global_min_category_label) + " in " + str(global_min_category_xlabel) + ". ")
order_extrema = order_global[random.randint(0, len(order_global) - 1)]
print("Order [Extrema(max/min)] : " + order_global[random.randint(0, len(order_global) - 1)])
x_label = str(stringLabels[0])
intro = []
if x_label.lower() == "month" or x_label.lower() == "year" or x_label.lower() == "months" or x_label.lower() == "years":
intro.append("This is a grouped bar chart showing " + y_label + " on the Y-axis throughout " + str(
rowCount) + " " + x_label + "s for " + categories + " on the X-axis. ")
intro.append(
"This grouped bar chart represents " + y_label + " on the Y-axis. And, its value throughout " + str(
rowCount) + " " + x_label + "s for " + categories + ". ")
intro.append(
"This grouped bar chart represents " + y_label + " on the Y-axis. And, how the value changed throughout " + str(
rowCount) + " " + x_label + "s for " + categories + ". ")
else:
intro.append("This grouped bar chart represents " + str(
rowCount) + " different " + x_label + "s on X-axis for " + str(
number_of_group) + " groups " + categories + ". On the Y-axis it shows their corresponding " + y_label + ". ")
intro.append("This grouped bar chart shows " + y_label + " on the Y-axis for " + str(
rowCount) + " different " + x_label + "s for " + str(
number_of_group) + " groups " + categories + " that are presented on the X-axis. ")
intro_summary = intro[random.randint(0, len(intro) - 1)]
summary1 = f"This grouped bar chart has {rowCount} categories of {stringLabels[0]} on the x axis representing {str(number_of_group)} groups: {categories}."
min_summary = []
mid_summary = []
max_summary = []
min_summary.append(random.choice(intro))
if trend_global is not None:
min_summary.append(random.choice(categorical_global_trend))
else:
min_summary.append(random.choice(global_extrema))
mid_summary.append(random.choice(intro))
if trend_global is not None:
mid_summary.append(random.choice(categorical_global_trend))
mid_summary.append(random.choice(global_extrema))
mid_summary.append(random.choice(diff_in_category))
else:
mid_summary.append(random.choice(global_extrema))
mid_summary.append(random.choice(diff_in_category))
mid_summary.append(random.choice(average_stat))
max_summary.append(random.choice(intro))
if trend_global is not None:
max_summary.append(random.choice(categorical_global_trend))
max_summary.append(random.choice(global_extrema))
max_summary.append(random.choice(global_max_min_categorical))
max_summary.append(random.choice(diff_in_category))
max_summary.append(random.choice(max_diff_all_cat))
if len(order_global) != 0:
max_summary.append(random.choice(order_global))
max_summary.append(random.choice(average_stat))
else:
max_summary.append(random.choice(global_extrema))
max_summary.append(random.choice(global_max_min_categorical))
max_summary.append(random.choice(diff_in_category))
max_summary.append(random.choice(max_diff_all_cat))
if len(order_global) != 0:
max_summary.append(random.choice(order_global))
max_summary.append(random.choice(average_stat))
summary2 = f" Averaging these {str(number_of_group)} groups, the highest category is found for {str(groupedLabels[0])} {sortedCategories[-1][0]} with a mean value of {round(sortedCategories[-1][1], 2)}."
summaryArray = mid_summary
maxValueIndex = cleanValArr[0].index(sortedCategories[-1][0])
secondValueIndex = cleanValArr[0].index(sortedCategories[-2][0])
trendsArray = [
{}, {"2": ["0", str(maxValueIndex)], "13": [str(columnCount - 1), str(maxValueIndex)]},
{"2": ["0", str(secondValueIndex)], "14": [str(columnCount - 1), str(secondValueIndex)]}, {}
]
# elif rowCount == 2:
# for category in categoricalValueArr:
# meanCategoricalDict[category[0]] = mean(category[1:])
# sortedCategories = sorted(meanCategoricalDict.items(), key=lambda x: x[1])
# numerator = abs(sortedCategories[-1][1] - sortedCategories[-2][1])
# denominator = (sortedCategories[-1][1] + sortedCategories[-2][1]) / 2
# topTwoDelta = round((numerator / denominator) * 100, 1)
#
# summary1 = f"This grouped bar chart has {rowCount} categories of {stringLabels[0]} on the x axis representing {str(number_of_group)} groups: {categories}."
# summary2 = f" Averaging the {str(number_of_group)} groups, the highest category is found for {str(groupedLabels[0])} {sortedCategories[-1][0]} with a mean value of {round(sortedCategories[-1][1], 2)}."
# summaryArray.append(summary1)
# summaryArray.append(summary2)
# maxValueIndex = cleanValArr[0].index(sortedCategories[-1][0])
# secondValueIndex = cleanValArr[0].index(sortedCategories[-2][0])
# summary3 = f" The minimum category is found at {sortedCategories[-2][0]} with a mean value of {round(sortedCategories[-2][1], 2)}."
# summaryArray.append(summary3)
#
# if topTwoDelta >= 5:
# summary4 = f" This represents a difference of {topTwoDelta}%."
# summaryArray.append(summary4)
#
# summaryArray.append(chosen_summary)
# trendsArray = [
# {}, {"2": ["0", str(maxValueIndex)], "13": [str(columnCount - 1), str(maxValueIndex)]},
# {"2": ["0", str(secondValueIndex)], "14": [str(columnCount - 1), str(secondValueIndex)]}, {}
# ]
# else:
# summary1 = f"This grouped bar chart has 1 category for the x axis of {stringLabels[0]}."
# summary2 = f" This category is {stringLabels[1]}, with a mean value of {round(mean(categoricalValueArr[1]), 2)}."
# summaryArray.append(summary1)
# summaryArray.append(summary2)
# summaryArray.append(chosen_summary)
# trendsArray = [{}, {"3": ["0", "0"], "9": ["0", "0"]}]
websiteInput = {"title": title.strip(),
"labels": [' '.join(label) for label in labelArr],
"columnType": "multi",
"graphType": chartType, "summaryType": "baseline", "summary": summaryArray,
"xAxis": x_label,
"yAxis": y_label,
"min_summary": min_summary,
"mid_summary": mid_summary,
"max_summary": max_summary,
"trends": trendsArray,
"data": dataJson}
with open(f'{websitePath}/{name}.json', 'w', encoding='utf-8') as websiteFile:
json.dump(websiteInput, websiteFile, indent=3)
# oneFile.writelines(''.join(summaryArray)+'\n')
# run scatter
if (chartType == "scatter"):
stringLabels = [' '.join(label) for label in labelArr]
print("stringLabels")
print(stringLabels)
summaryArray.append("TEST TEST")
# dataJson = [{xLabel: xVal, yLabel: yVal} for xVal, yVal in zip(cleanXArr, cleanYArr)]
className = str(stringLabels[0])
x_label = str(stringLabels[1])
y_label = str(stringLabels[2])
dataJson = []
# iterate over index of a value
for i in range(len(cleanValArr[0])):
# iterate over each value
dico = {}
for value, label in zip(cleanValArr, labelArr):
cleanLabel = ' '.join(label)
dico[cleanLabel] = value[i]
dataJson.append(dico)
trendsArray = [{}]
websiteInput = {"title": title,
"xAxis": x_label,
"yAxis": y_label,
"columnType": "two",
"graphType": chartType,
"class": className,
"summaryType": "baseline",
"summary": summaryArray,
"trends": trendsArray,
"data": dataJson}
with open(f'{websitePath}/{name}.json', 'w', encoding='utf-8') as websiteFile:
json.dump(websiteInput, websiteFile, indent=3)
## for Multi Line charts
elif (chartType == "line"):
# clean data
intData = []
# print(valueArr)
# print(valueArr[1:])
for line in valueArr[1:]: # take 2nd to end elements in valueArr array
cleanLine = []
for data in line:
if data.isnumeric():
cleanLine.append(float(data))
else:
cleanData = re.sub("[^\d\.]", "",
data) # Delete pattern [^\d\.] from data where [^\d\.] probably denotes digits
if len(cleanData) > 0:
cleanLine.append(
float(cleanData[:4])) # character from the beginning to position 4 (excluded)
else:
cleanLine.append(float(cleanData))
intData.append(cleanLine)
# print(len(intData))
# calculate mean for each line
meanLineVals = []
# print("stringLabels")
# print(stringLabels[1:])
# print("intData")
# print(intData)
x_label = str(stringLabels[0])
assert len(stringLabels[1:]) == len(
intData) # tests if a condition is true. If a condition is false, the program will stop with an optional message
for label, data in zip(stringLabels[1:],
intData): # zip output: \(('Export', [5596.0, 4562.0, 4875.0, 7140.0, 4325.0, 9188.0, 5565.0, 6574.0, 4827.0, 2945.0, 4252.0, 3876.0, 2867.0, 2404.0]), ('Import', [1087.0, 9410.0, 7853.0, 8865.0, 6917.0, 1034.0, 7262.0, 7509.0, 5715.0, 4458.0, 6268.0, 5996.0, 4299.0, 3742.0]))
x = (label, round(mean(data), 1)) # round to 1 d.p
# print(x)
meanLineVals.append(x)
sortedLines = sorted(meanLineVals, key=itemgetter(1))
# print(sortedLines) # Ranks all the lines from bottomost to topmost using mean values
# if more than 2 lines
lineCount = len(labelArr) - 1 # no of categories
# The line with higest overall mean
maxLine = sortedLines[-1] # the category with highest overall mean
index1 = stringLabels.index(maxLine[0]) - 1 # index for line with max mean
maxLineData = round(max(intData[index1]), 2) # the max data point (y axis value) of the line with max mean
maxXValue = valueArr[0][
intData[index1].index(maxLineData)] # the corrsponding x value for the above y value
# The line with second higest overall mean
secondLine = sortedLines[-2] # line with second highest overall mean value
rowIndex1 = intData[index1].index(
maxLineData) # the index for the max y value data point of the line with max mean
index2 = stringLabels.index(secondLine[0]) - 1 # index for line with second max mean
secondLineData = round(max(intData[index2]),
2) # the max data point (y axis value) of the line with max mean
secondXValue = valueArr[0][
intData[index2].index(secondLineData)] ## the corrsponding x value for the above y value
rowIndex2 = intData[index2].index(
secondLineData) # the index for the max y value data point of the line with second max mean
# The line with the smallest overall mean
minLine = sortedLines[0]
index_min = stringLabels.index(minLine[0]) - 1
minLineData = round(max(intData[index_min]), 2)
minXValue = valueArr[0][intData[index_min].index(minLineData)]
line_names = ""
for i in range(len(stringLabels) - 1):
if i < len(stringLabels) - 2:
line_names += stringLabels[i + 1] + ", "
else:
line_names += "and " + stringLabels[i + 1]
print(line_names)
## New Summary Template-shehnaz
valueArrMatrix = np.array(valueArr)
# print(valueArrMatrix)
# valueArr_CorrectOrder= np.flip(valueArrMatrix, axis=1)
xVal = valueArrMatrix[0, :]
# print(xVal)
yVals = valueArrMatrix[1:, :]
# print(yVals)
yVals_float = yVals
# print(len(yVals))
for i in range(0, len(yVals)):
yVals_float[i] = stringToFloat(yVals[i])
# print(yVals_float)
yVals = np.array(yVals_float).astype(np.float) # yVal is now in float type
# print(yVals)
coordinates = dict(zip(xVal, zip(*yVals)))
# print(coordinates)
sorted_coordinates = dict(sorted(coordinates.items()))
# for key, value in sorted(coordinates.items()): # Note the () after items!
# print(key, value)
# print("sorted_coordinates")
# print(sorted_coordinates)
keys, values = zip(*sorted_coordinates.items())
# print(keys)
# print(values)
arr = []
for j in range(0, len(values[0])):
array = []
for i in range(0, len(values)):
array.append(values[i][j])
arr.append(array)
# print("keys== xVal")
# print(keys)
# print("arr== yVals")
# print(arr)
# xVal_sorted = xVal[len(xVal)::-1]
# yVals_sorted= yVals
# for i in range(0, len(yVals)):
# yVals_sorted[i] = yVals[i][len(yVals[i])::-1] ## Ordered correctly this time
xVal_sorted = np.array(keys)
yVals_sorted = np.array(arr)
print("Sorted X vals")
print(xVal_sorted)
print("Sorted Y vals")
print(yVals_sorted)
###### Order/Rank of all lines
# print(sortedLines)
sortedLines_descending = sortedLines[len(sortedLines)::-1]
# print(sortedLines_descending)
###### Topmost Line
# print(maxLine[0])
# print(stringLabels.index(maxLine[0]))
topmostLineIndex = stringLabels.index(maxLine[0]) - 1
max_yVal_ofTopmost = max(yVals_sorted[topmostLineIndex])
max_yValIndx_ofTopmost = get_indexes_max_value(yVals_sorted[topmostLineIndex])
max_xVal_ofTopmost = xVal_sorted[max_yValIndx_ofTopmost] # Is an array of xVals
## To concatenate commas and "and" in max_xVal_ofTopmost
if (len(max_xVal_ofTopmost) < 2):
max_xVal_ofTopmost = max_xVal_ofTopmost[0]
else:
slice1 = max_xVal_ofTopmost[:len(max_xVal_ofTopmost) - 1]
# print(slice1)
slice2 = ", ".join(slice1)
slice2 += ", and " + max_xVal_ofTopmost[-1]
# print(slice2)
max_xVal_ofTopmost = slice2
meanOfTopmost = mean(yVals_sorted[topmostLineIndex]).round(2)
# print(meanOfTopmost)
###### Bottommost Line
# print(minLine[0])
# print(stringLabels.index(minLine[0]))
bottomostLineIndex = stringLabels.index(minLine[0]) - 1
max_yVal_ofBotommost = max(yVals_sorted[bottomostLineIndex])
max_yValIndx_ofBotommost = get_indexes_max_value(yVals_sorted[bottomostLineIndex])
max_xVal_ofBotommost = xVal_sorted[max_yValIndx_ofBotommost] # Is an array of xVals
## To concatenate commas and "and" in max_xVal_ofTopmost
if (len(max_xVal_ofBotommost) < 2):
max_xVal_ofBotommost = max_xVal_ofBotommost[0]
else:
slice1 = max_xVal_ofBotommost[:len(max_xVal_ofBotommost) - 1]
# print(slice1)
slice2 = ", ".join(slice1)
slice2 += ", and " + max_xVal_ofBotommost[-1]
# print(slice2)
max_xVal_ofBotommost = slice2
meanOfBotommost = mean(yVals[bottomostLineIndex]).round(2)
# print(meanOfBotommost)
# Extrema [max, absolute, allLines]
## To find max of all the categories
maxLocal_array = []
maxLineNames = []
maxLine_xVals = []
num_of_xVals_max = [] # number of x values listed for each line (e.g. Suppose same max val occurred at two lines and one of those lines reached the max val twice. Then maxLine_xVals = [2010, 2013, 2016]) where 2010 and 2013 are for line 1 and 2016 for line 2. So n for line 1 is: 2 and for line 2 is: 1. So num_of_xVals will be [2,1]
for i in range(0, len(yVals_sorted)):
max_local = max(yVals_sorted[i]) # key=lambda x:float(x)
maxLocal_array.append(max_local)
# max_global= max(maxLocal_array, key=lambda x:float(x))
# print(max_global)
# print(maxLocal_array)
maxLineIndex = get_indexes_max_value(maxLocal_array) # Line which has the max value
# print("maxLineIndex")
# print(maxLineIndex)
for i in range(0, len(maxLineIndex)):
maxLineName = stringLabels[maxLineIndex[i] + 1]
maxLineNames.append(maxLineName)
# print(valueArr[maxLineIndex[i]+1])
maxValIndex = get_indexes_max_value(
yVals_sorted[maxLineIndex[i]]) # Index at which the max value occurred for that line
n = 0
for j in range(0, len(maxValIndex)):
maxLine_xVal = xVal_sorted[maxValIndex[j]]
maxLine_xVals.append(maxLine_xVal)
n = n + 1
num_of_xVals_max.append(n)
# print(valueArr)
maxLineNames = commaAnd(maxLineNames)
maxLine_xVals = commaAnd(maxLine_xVals)
minLocal_array = []
minLineNames = []
minLine_xVals = []
num_of_xVals_min = [] # number of x values listed for each line (e.g. Suppose same max val occurred at two lines and one of those lines reached the max val twice. Then maxLine_xVals = [2010, 2013, 2016]) where 2010 and 2013 are for line 1 and 2016 for line 2. So n for line 1 is: 2 and for line 2 is: 1. So num_of_xVals will be [2,1]
for i in range(0, len(yVals_sorted)):
min_local = min(yVals_sorted[i]) # key=lambda x:float(x)
minLocal_array.append(min_local)
# max_global= max(maxLocal_array, key=lambda x:float(x))
# print(max_global)
# print(maxLocal_array)
minLineIndex = get_indexes_min_value(minLocal_array) # Line which has the max value
# print("maxLineIndex")
# print(maxLineIndex)
for i in range(0, len(minLineIndex)):
minLineName = stringLabels[minLineIndex[i] + 1]
minLineNames.append(minLineName)
# print(valueArr[maxLineIndex[i]+1])
minValIndex = get_indexes_min_value(
yVals_sorted[minLineIndex[i]]) # Index at which the max value occurred for that line
n = 0
for j in range(0, len(minValIndex)):
minLine_xVal = xVal_sorted[minValIndex[j]]
minLine_xVals.append(minLine_xVal)
n = n + 1
num_of_xVals_min.append(n)
# print(valueArr)
minLineNames = commaAnd(minLineNames)
minLine_xVals = commaAnd(minLine_xVals)
############# GlobalTrend ##############
direction = []
rate = []
for i in range(0, len(yVals_sorted)):
n = float(yVals_sorted[i][len(yVals_sorted[i]) - 1])
o = float(yVals_sorted[i][0])
m = max(maxLocal_array)
globalPercentChange = percentChnageRangeFunc(n, o, m)
rate.append(globalPercentChange)
d = globalDirectionTrend(globalPercentChange, constant)
direction.append(d)
lineNames = stringLabels[1:]
# print(lineNames)
# print(direction)
# print(rate)
lineNames_increasing = []
lineNames_decreasing = []
lineNames_constant = []
for i in range(0, len(direction)):
if (direction[i] == "increased"):
lineNames_increasing.append(lineNames[i])
elif (direction[i] == "decreased"):
lineNames_decreasing.append(lineNames[i])
else:
lineNames_constant.append(lineNames[i])
# print(lineNames_increasing)
# print(lineNames_decreasing)
# print(lineNames_constant)
if (len(lineNames) == 2):
difference_arr = []
if (len(yVals_sorted) == 2):
for i in range(0, len(xVal_sorted)):
diff = yVals_sorted[0][i] - yVals_sorted[1][i]
difference_arr.append(diff)
# print(difference_arr)
abs_difference_arr = []
for i in range(0, len(difference_arr)):
abs_difference_arr.append(abs(difference_arr[i]))
# print(abs_difference_arr)
constant_rate = 5
diffPercentChange = percentChnageFunc(abs_difference_arr[-1], abs_difference_arr[0])
diff_direction = directionTrend(abs_difference_arr[-1], abs_difference_arr[0], constant_rate)
# print(diffPercentChange)
# print(diff_direction)
if (diff_direction == "increasing"):
diff_direction = "greater"
elif (diff_direction == "decreasing"):
diff_direction = "smaller"
else:
diff_direction = "roughly same"
# Find and report the max and the min gap between two Lines
max_diff = max(abs_difference_arr)
max_diff_indx = get_indexes_max_value(abs_difference_arr)
min_diff = min(abs_difference_arr)
min_diff_indx = get_indexes_min_value(abs_difference_arr)
# Global Trends with rate of change
globalTrends = []
# print(constant)
# print(gradual)
# print(rapid)
for i in rate:
rate = globalRateOfChange(i, constant, gradual, rapid)
globalTrends.append(rate)
# print(globalTrends)
lineNames = stringLabels[1:]
# print(lineNames)
# print(direction)
# print(rate)
# print(globalTrends)
lineNames_increasing_r = []
lineNames_increasing_g = []
lineNames_decreasing_r = []
lineNames_decreasing_g = []
lineNames_constant_c = []
for i in range(0, len(direction)):
if (direction[i] == "increasing"):
if (globalTrends[i] == "rapidly"):
lineNames_increasing_r.append(lineNames[i])
else:
lineNames_increasing_g.append(lineNames[i])
elif (direction[i] == "decreasing"):
if (globalTrends[i] == "rapidly"):
lineNames_decreasing_r.append(lineNames[i])
else:
lineNames_decreasing_g.append(lineNames[i])
else:
lineNames_constant_c.append(lineNames[i])
# Zig zag
zig_zagLines = []
if (len(lineNames_increasing_r) != 0):
zig_zagLines.append(lineNames_increasing_r)
if (len(lineNames_increasing_g) != 0):
zig_zagLines.append(lineNames_increasing_g)
if (len(lineNames_decreasing_r) != 0):
zig_zagLines.append(lineNames_decreasing_r)
if (len(lineNames_decreasing_g) != 0):
zig_zagLines.append(lineNames_decreasing_g)
zig_zagLineNames = []
for i in range(0, len(zig_zagLines)):
for j in range(0, len(zig_zagLines[i])):
zig_zagLineNames.append(zig_zagLines[i][j])
# print("zig_zagLineNames" + str(zig_zagLineNames))
# For rapidly incresing lines report percentage increase or factor of increase
percentChng_in = []
factorChng_in = []
if (len(lineNames_increasing_r) != 0):
for i in range(0, len(lineNames_increasing_r)):
indx = lineNames.index(lineNames_increasing_r[i])
n = float(yVals_sorted[indx][len(yVals_sorted[indx]) - 1])
o = float(yVals_sorted[indx][0])
if (o == 0):
o = 0.00000000001
if (n == 0):
n = 0.00000000001
p = abs(percentChnageFunc(n, o))
# Factor
if (n != 0.00000000001 and o != 0.00000000001):
if (n > o):
f = round(n / o, 1)
else:
f = round(o / n, 1)
factorChng_in.append(f)
percentChng_in.append(p)
# print("percentChng_in: " + str(percentChng_in))
# print("factorChng_in: " + str(factorChng_in))
# For rapidly decreasing lines report percentage decrease or factor of decrease
percentChng_de = []
factorChng_de = []
if (len(lineNames_decreasing_r) != 0):
for i in range(0, len(lineNames_decreasing_r)):
indx = lineNames.index(lineNames_decreasing_r[i])
n = float(yVals_sorted[indx][len(yVals_sorted[indx]) - 1])
o = float(yVals_sorted[indx][0])
if (o == 0):
o = 0.00000000001
if (n == 0):
n = 0.00000000001
p = abs(percentChnageFunc(n, o))
# Factor
if (n != 0.00000000001 and o != 0.00000000001):
if (n > o):
f = round(n / o, 1)
else:
f = round(o / n, 1)
factorChng_in.append(f)
percentChng_de.append(p)
# print(percentChng_de)
# print(factorChng_de)
percentChngSumm = ""
factorChngSumm = ""
# print("percentChng_in: " + str(percentChng_in))
print(percentChng_in)
print(factorChng_in)
# for i in range(0, len(percentChng_in)):
# percentChng_in[i]= str(percentChng_in[i])
# print(percentChng_in)
percentChng_in = floatToStr(percentChng_in)
if (bool(factorChng_in)):
factorChng_in = floatToStr(factorChng_in)
percentChng_de = floatToStr(percentChng_de)
if (bool(factorChng_de)):
factorChng_de = floatToStr(factorChng_de)
print(percentChng_in)
print(factorChng_in)
# Line that are rapidly increasing
if (len(lineNames_increasing_r) > 1):
percentChngSumm += commaAnd(lineNames_increasing_r) + " has increased by " + commaAnd(
percentChng_in) + " percent respectively. "
if (len(factorChng_in) != 0):
factorChngSumm += commaAnd(lineNames_increasing_r) + " has increased by " + commaAnd(
factorChng_in) + " times respectively. " # globalTrendRate_summary.append(summary_increasing_r)
elif (len(lineNames_increasing_r) == 1):
percentChngSumm += commaAnd(lineNames_increasing_r) + " has increased by " + commaAnd(
percentChng_in) + " percent. "
if (len(factorChng_in) != 0):
factorChngSumm += commaAnd(lineNames_increasing_r) + " has increased by " + commaAnd(
factorChng_in) + " times. "
# globalTrendRate_summary.append(summary_increasing_r)
# Line that are rapidly decreasing
if (len(lineNames_decreasing_r) > 1):
percentChngSumm += commaAnd(lineNames_decreasing_r) + " has decreased by " + commaAnd(
percentChng_de) + " percent respectively. "
if (len(factorChng_de) != 0):
factorChngSumm += commaAnd(lineNames_decreasing_r) + " has decreased by " + commaAnd(
factorChng_de) + " times respectively. "
# globalTrendRate_summary.append(summary_increasing_r)
elif (len(lineNames_decreasing_r) == 1):
percentChngSumm += commaAnd(lineNames_decreasing_r) + " has decreased by " + commaAnd(
percentChng_de) + " percent. "
if (len(factorChng_de) != 0):
factorChngSumm += commaAnd(lineNames_decreasing_r) + " has decreased by " + commaAnd(
factorChng_de) + " times. "
# globalTrendRate_summary.append(summary_increasing_r)
# print("percentChngSumm: " + str(percentChngSumm))
# print("factorChngSumm: ", str(factorChngSumm))
if (len(factorChngSumm) == 0):
selectedChange = percentChngSumm
else:
chnageFactor = [percentChngSumm, factorChngSumm]
selectedChange = random.choice(chnageFactor)
# print("selectedChange: " + str(selectedChange))
# PRINT SUMMARY
# Done by Shehnaz
summaryArr = []
summary1 = []
summary1.append("This is a multi-line chart with " + str(
lineCount) + " lines representing " + line_names + ". " + "The y axis denotes " + y_label + " and the x axis denotes " + x_label + ". ")
summary1.append("The given chart is of multi-line type with " + str(
lineCount) + " lines namely " + line_names + ". " + "The y axis represents " + y_label + " and the x axis represents " + x_label + ". ")
summary1.append("You are viewing a chart of multi-line type with " + str(
lineCount) + " lines denoting " + line_names + ". " + "The y axis indicates the " + y_label + " and the x axis indicates " + x_label + ". ")
# summary2 = "The line for " + str(maxLine[0]) + " has the highest values across " + str(
# stringLabels[0]) + " with a mean value of " + str(maxLine[1]) + ", "
summaryArr.append(random.choice(summary1))
###### Global Trends with Rate of chnage
globalTrendRate_summary = "Overall "
# Lines that rapidly increase
# summary_increasing_r= ""
if (len(lineNames_increasing_r) > 1):
globalTrendRate_summary += commaAnd(lineNames_increasing_r) + " are rapidly increasing, "
# globalTrendRate_summary.append(summary_increasing_r)
elif (len(lineNames_increasing_r) == 1):
globalTrendRate_summary += commaAnd(lineNames_increasing_r) + " is rapidly increasing, "
# globalTrendRate_summary.append(summary_increasing_r)
# Lines that gradually increase
# summary_increasing_g= ""
if (len(lineNames_increasing_g) > 1):
globalTrendRate_summary += commaAnd(lineNames_increasing_g) + " are gradually increasing, "
# globalTrendRate_summary.append(summary_increasing_g)
elif (len(lineNames_increasing_g) == 1):
globalTrendRate_summary += commaAnd(lineNames_increasing_g) + " is gradually increasing, "
# globalTrendRate_summary.append(summary_increasing_g)
# Lines that rapidly decrease
# summary_decreasing_r= ""
if (len(lineNames_decreasing_r) > 1):
globalTrendRate_summary += commaAnd(lineNames_decreasing_r) + " are rapidly decreasing, "
# globalTrendRate_summary.append(lineNames_decreasing_r)
elif (len(lineNames_decreasing_r) == 1):
globalTrendRate_summary += commaAnd(lineNames_decreasing_r) + " is rapidly decreasing, "
# globalTrendRate_summary.append(lineNames_decreasing_r)
# Lines that gradually decrease
# summary_decreasing_g= ""
if (len(lineNames_decreasing_g) > 1):
globalTrendRate_summary += commaAnd(lineNames_decreasing_g) + " are gradually decreasing, "
# globalTrendRate_summary.append(lineNames_decreasing_g)
elif (len(lineNames_decreasing_g) == 1):
globalTrendRate_summary += commaAnd(lineNames_decreasing_g) + " is gradually decreasing, "
# globalTrendRate_summary.append(lineNames_decreasing_g)
# Lines that stay constant
# summary_constant_c= ""
if (len(lineNames_constant_c) > 1):
globalTrendRate_summary += commaAnd(lineNames_constant_c) + " are roughly constant, "
# globalTrendRate_summary.append(summary_constant_c)
elif (len(lineNames_constant_c) == 1):
globalTrendRate_summary += commaAnd(lineNames_constant_c) + " is roughly constant, "
# globalTrendRate_summary.append(summary_constant_c)
globalTrendRate_summary += " throughout the " + stringLabels[0] + ". "
summaryArr.append(globalTrendRate_summary)
##Zig Zag
## If >zigZagNum points and lines not constant then they are considered zig zag
sum_zigzag_arr = []
if (len(yVals_sorted[0]) > zigZagNum and len(zig_zagLineNames) != 0):
sum_zigzag = str(commaAnd(zig_zagLineNames)) + " has in general many fluctuations."
sum_zigzag_arr.append(sum_zigzag)
sum_zigzag = "The lines" + str(commaAnd(zig_zagLineNames)) + " in general has a zig zag shape."
sum_zigzag_arr.append(sum_zigzag)
summaryArr.append(random.choice(sum_zigzag_arr))
#### Order/Ranking of all lines given total no of lines is < 5
sum_rank_arr = []
if (len(sortedLines_descending) < 5): # Given there are no more than 5 lines
summary_rank1 = "The ranking of the lines from topmost to botommmost is as follows: "
for i in range(0, len(sortedLines_descending) - 1):
summary_rank1 += str(i + 1) + ", " + sortedLines_descending[i][0] + ", "
summary_rank1 += "and lastly, " + str(len(sortedLines_descending)) + ", " + \
sortedLines_descending[len(sortedLines_descending) - 1][0] + ". "
sum_rank_arr.append(summary_rank1)
# 2nd Version of wording the sentence
summary_rank2 = "The lines ordered according to average values of " + y_label + " in descending order is: "
for i in range(0, len(sortedLines_descending) - 1):
summary_rank2 += str(i + 1) + ", " + sortedLines_descending[i][0] + ", "
summary_rank2 += "and lastly, " + str(len(sortedLines_descending)) + ", " + \
sortedLines_descending[len(sortedLines_descending) - 1][0] + ". "
sum_rank_arr.append(summary_rank2)
# Choose randomly between 2 versions
summaryArr.append(random.choice(sum_rank_arr))
## Talks about the topmost line
summary2 = []
summary2.append("During this period, " + str(maxLine[
0]) + " generally had the highest " + y_label + " relative to others" + " with an average of " + str(
meanOfTopmost) + ", and it reached its maximum at " + str(
max_xVal_ofTopmost) + " with a value of " + str(
max_yVal_ofTopmost) + ". ") # revised
# Version 2
summary2.append("Overall across the " + stringLabels[0] + ", " + str(maxLine[
0]) + " mostly maintained the highest " + y_label + " when compared to others" + " with a mean value of " + str(
meanOfTopmost) + ", and it peaked at " + str(max_xVal_ofTopmost) + ". ")
summaryArr.append(random.choice(summary2))
## Talks about the second topmost line
summ_2top_arr = []
if lineCount > 2:
summary4 = "After " + str(maxLine[0]) + ", " + str(
secondLine[0]) + " overall has the second highest values " + ", with a mean value of " + str(
secondLine[1]) + ", peaking at " + str(secondXValue) + ". "
summ_2top_arr.append(summary4)
# Version 2
summary4 = "Followed by " + str(
secondLine[0]) + " which ranks as the second topmost line " + ", with an average of " + str(
secondLine[1]) + " " + y_label + ",reaching its highest point at " + str(
secondXValue) + " with a value of " + str(secondLineData) + ". "
summ_2top_arr.append(summary4)
summaryArr.append(random.choice(summ_2top_arr))
## Talks about the bottomost line
sum_bottom_arr = []
summary6 = str(minLine[0]) + " mostly had the least " + y_label + " with a mean value of " + str(
meanOfBotommost) + ", which peaked at " + str(max_xVal_ofBotommost) + " with a value of " + str(
max_yVal_ofBotommost) + ". "
sum_bottom_arr.append(summary6)
# 2nd version
summary6 = "The bottommost line, " + str(minLine[0]) + ", " + " has a mean of " + str(
meanOfBotommost) + ", and peaked at " + str(max_xVal_ofBotommost) + ". "
sum_bottom_arr.append(summary6)
summaryArr.append(random.choice(sum_bottom_arr))
# Additional summaries -shehnaz
# Global Max
sum_max_arr = []
if (max_yVal_ofTopmost != max(maxLocal_array) and len(maxLine_xVals) < 5):
summary8 = maxLineNames + " reported the highest " + y_label + " about " + str(
max(maxLocal_array)) + " in " + stringLabels[0] + " " + maxLine_xVals
sum_max_arr.append(summary8)
# 2nd Version
summary8 = "The maximum " + y_label + " about " + str(
max(maxLocal_array)) + "," + " occured at " + maxLine_xVals + " by " + maxLineNames + ". "
sum_max_arr.append(summary8)
summaryArr.append(random.choice(sum_max_arr))
# Global Min
sum_min_arr = []
if (len(minLine_xVals) < 5): # given no more than 5 x values are reported
summary9 = minLineNames + " reported the lowest " + y_label + " about " + str(
min(minLocal_array)) + " in " + stringLabels[0] + " " + minLine_xVals
sum_min_arr.append(summary9)
# Version 2
summary9 = "The minimum " + y_label + " about " + str(
min(minLocal_array)) + "," + " occured at " + minLine_xVals + " by " + minLineNames + ". "
sum_min_arr.append(summary9)
summaryArr.append(random.choice(sum_min_arr))
#### Global Trend without rate
# #Lines that increase
# summary_increasing= "Overall "
# if (len(lineNames_increasing)>1):
# summary_increasing+= commaAnd(lineNames_increasing) + " are increasing throughout the " + stringLabels[0]
# summaryArr.append(summary_increasing)
# elif(len(lineNames_increasing)==1):
# summary_increasing+= commaAnd(lineNames_increasing) + "is increasing throughout the " + stringLabels[0]
# summaryArr.append(summary_increasing)
# #Lines that decrease
# summary_decreasing= "Overall "
# if (len(lineNames_decreasing)>1):
# summary_decreasing+= commaAnd(lineNames_decreasing) + " are decreasing throughout the " + stringLabels[0]
# summaryArr.append(summary_decreasing)
# elif(len(lineNames_decreasing)==1):
# summary_decreasing+= commaAnd(lineNames_decreasing) + "is decreasing throughout the " + stringLabels[0]
# summaryArr.append(summary_decreasing)
# # Lines that stay constant
# summary_constant= "Overall "
# if (len(lineNames_constant)>1):
# summary_constant+= commaAnd(lineNames_constant) + " are roughly constant throughout the " + stringLabels[0]
# summaryArr.append(summary_constant)
# elif(len(lineNames_constant)==1):
# summary_constant+= commaAnd(lineNames_constant) + "is roughly constant throughout the " + stringLabels[0]
# summaryArr.append(summary_constant)
# Comparison
# Randomly picking abosolute vs relative comparison
# Append randomly the factor of chnage given the chnage was rapid
if (len(lineNames_increasing_r) != 0 or len(lineNames_decreasing_r) != 0):
summaryArr.append(selectedChange)
# Gap
###### The gap between two lines
summary_Gap = []
if (len(lineNames) == 2):
summary10 = "The difference of " + y_label + " between " + lineNames[0] + " and " + lineNames[
1] + " is " + diff_direction + " at " + stringLabels[0] + " " + xVal_sorted[
-1] + " compared to the " + stringLabels[0] + " " + xVal_sorted[0] + ". "
summary_Gap.append(summary10)
summary11 = "The greatest difference of " + y_label + " between " + lineNames[0] + " and " + \
lineNames[1] + " occurs at " + stringLabels[0] + " " + str(
xVal_sorted[max_diff_indx[0]]) + " and the smallest difference occurs at " + str(
xVal_sorted[min_diff_indx[0]]) + ". " # Assumes there is only one max and min gap or difference
summary_Gap.append(summary11)
summaryArr.append(random.choice(summary_Gap))
# print("summary_Gap" + str(summary_Gap))
####### Min, Mid, Max Summaries
# Minimum Summary
min_summary = [] # Minimum length summary
mid_summary = [] # Medium length summary
max_summary = [] # Maximum length summary
min_summary.append(random.choice(summary1)) # intro
min_summary.append(globalTrendRate_summary) # Global Trend
min_summary.append(random.choice(summary2)) # Topmost
if (len(summ_2top_arr) != 0):
min_summary.append(random.choice(summ_2top_arr)) # Second Topmost
min_summary.append(random.choice(sum_bottom_arr)) # Botommost
if (len(sum_zigzag_arr) != 0):
min_summary.append(random.choice(sum_zigzag_arr)) # Zig Zag
# print( "min_summary" + str(min_summary) + "/n")
# Medium Summary
mid_summary.append(random.choice(summary1)) # intro
mid_summary.append(globalTrendRate_summary) # Global Trend
if (len(lineNames_increasing_r) != 0 or len(lineNames_decreasing_r) != 0):
mid_summary.append(selectedChange) # Comparison
if (len(sum_rank_arr) != 0):
mid_summary.append(random.choice(sum_rank_arr)) # Order/Rank
mid_summary.append(random.choice(summary2)) # Topmost
if (len(summ_2top_arr) != 0):
mid_summary.append(random.choice(summ_2top_arr)) # Second Topmost
mid_summary.append(random.choice(sum_bottom_arr)) # Botommost
if (len(sum_zigzag_arr) != 0):
mid_summary.append(random.choice(sum_zigzag_arr)) # Zig Zag
# print( "mid_summary" + str(mid_summary) + "/n")
# Maximum Summary
max_summary.append(random.choice(summary1)) # intro
max_summary.append(globalTrendRate_summary) # Global Trend
if (len(lineNames_increasing_r) != 0 or len(lineNames_decreasing_r) != 0):
max_summary.append(selectedChange) # Comparison
if (len(sum_rank_arr) != 0):
max_summary.append(random.choice(sum_rank_arr)) # Order/Rank
max_summary.append(random.choice(summary2)) # Topmost
if (len(summ_2top_arr) != 0):
max_summary.append(random.choice(summ_2top_arr)) # Second Topmost
max_summary.append(random.choice(sum_bottom_arr)) # Botommost
if (len(sum_max_arr) != 0):
max_summary.append(random.choice(sum_max_arr)) # Global Max
if (len(sum_min_arr) != 0):
max_summary.append(random.choice(sum_min_arr)) # Global Min
if (len(sum_zigzag_arr) != 0):
max_summary.append(random.choice(sum_zigzag_arr)) # Zig Zag
if (len(summary_Gap) != 0):
max_summary.append(random.choice(summary_Gap)) # Gap (if 2 lines only )
print("max_summary" + str(max_summary) + "/n")
summaryArray = mid_summary
trendsArray = [{},
{"2": ["0", str(index1)], "16": [str(rowCount - 1), str(index1)]},
{"1": [str(rowIndex1), str(index1)], "9": [str(rowIndex1), str(index1)]},
{"2": ["0", str(index2)], "15": [str(rowCount - 1), str(index2)]},
{"1": [str(rowIndex2), str(index2)], "10": [str(rowIndex2), str(index2)]}
]
websiteInput = {"title": title.strip(),
"labels": [' '.join(label) for label in labelArr],
"columnType": "multi",
"graphType": chartType, "summaryType": "baseline", "summary": summaryArray,
"xAxis": x_label,
"yAxis": y_label,
"min_summary": min_summary,
"mid_summary": mid_summary,
"max_summary": max_summary,
"trends": trendsArray,
"data": dataJson}
# print(summaryArr)
with open(f'{websitePath}/{name}.json', 'w', encoding='utf-8') as websiteFile:
json.dump(websiteInput, websiteFile, indent=3)
# oneFile.writelines(''.join(summaryArr)+'\n')
else:
xValueArr = []
yValueArr = []
cleanXArr = []
cleanYArr = []
xLabel = ' '.join(datum[0].split('|')[0].split('_'))
yLabel = ' '.join(datum[1].split('|')[0].split('_'))
chartType = datum[0].split('|')[3].split('_')[0]
# fp.write(str(name) + "\t" + str(yLabel) + "\n")
# fp.close()
print(xLabel)
print(yLabel)
print(chartType)
for i in range(0, len(datum)):
if i % 2 == 0:
xValueArr.append((datum[i].split('|')[1]))
cleanXArr.append((datum[i].split('|')[1].replace('_', ' ')))
else:
yValueArr.append(float(re.sub("[^\d\.]", "", datum[i].split('|')[1])))
cleanYArr.append(float(re.sub("[^\d\.]", "", datum[i].split('|')[1])))
titleArr = title.split()
maxValue = str(max(yValueArr))
minValue = str(min(yValueArr))
maxValueIndex = pd.Series(yValueArr).idxmax()
minValueIndex = pd.Series(yValueArr).idxmin()
summaryArray = []
totalValue = sum(yValueArr)
avgValueOfAllBars = totalValue / len(yValueArr)
# print("totalValue -> " + str(totalValue))
# print("avgValueOfAllBars -> " + str(avgValueOfAllBars))
maxPercentage = int(math.ceil((max(yValueArr) / totalValue) * 100.00))
minPercentage = int(math.ceil((min(yValueArr) / totalValue) * 100.00))
position_in_X_axis_for_second_max_value = "" # Added to deal with following error: UnboundLocalError: local variable 'secondMaxIndex' referenced before assignment
if len(xValueArr) > 2:
sortedDataY = sorted(yValueArr, reverse=True)
secondMaxPercentage = int(math.ceil((int(sortedDataY[1]) / totalValue) * 100))
secondMaxIndex = 0
thirdMaxIndex = 0
for a in range(len(yValueArr)):
if yValueArr[a] == sortedDataY[1]:
secondMaxIndex = a
if yValueArr[a] == sortedDataY[2]:
thirdMaxIndex = a
position_in_X_axis_for_second_max_value = str(xValueArr[secondMaxIndex])
position_in_X_axis_for_second_max_value = position_in_X_axis_for_second_max_value.replace("_", " ")
y_axis_for_second_max_value = str(yValueArr[secondMaxIndex])
# print("str(xValueArr[secondMaxIndex]")
# print(position_in_X_axis_for_second_max_value)
position_in_X_axis_for_third_max_value = str(xValueArr[thirdMaxIndex]).replace("_", " ")
y_axis_for_third_max_value = str(yValueArr[thirdMaxIndex])
num_of_category = str(len(xValueArr))
position_in_X_axis_for_max_value = str(xValueArr[maxValueIndex])
position_in_X_axis_for_max_value = position_in_X_axis_for_max_value.replace("_", " ")
y_axis_for_max_value = str(yValueArr[maxValueIndex])
position_in_X_axis_for_min_value = str(xValueArr[minValueIndex])
position_in_X_axis_for_min_value = position_in_X_axis_for_min_value.replace("_", " ")
y_axis_for_min_value = str(yValueArr[minValueIndex])
if (chartType == "pie" or chartType == "bar"):
if type(yValueArr[maxValueIndex]) == int or type(yValueArr[maxValueIndex]) == float:
# proportion = int(math.ceil(yValueArr[maxValueIndex] / yValueArr[minValueIndex]))
# proportion = round((yValueArr[maxValueIndex] / yValueArr[minValueIndex]), 2)
try:
proportion = round((yValueArr[maxValueIndex] / yValueArr[minValueIndex]), 2)
except ZeroDivisionError:
proportion = round((yValueArr[maxValueIndex] / 0.00000000001), 2) # To avoid x/0 math error
max_avg_diff_rel = round((yValueArr[maxValueIndex] / avgValueOfAllBars), 2)
max_min_diff = (yValueArr[maxValueIndex] - yValueArr[minValueIndex])
max_avg_diff_abs = (yValueArr[maxValueIndex] - avgValueOfAllBars)
median_val = median(yValueArr)
# print("proportion -> " + str(proportion))
# print("max_min_diff -> " + str(max_min_diff))
# print("max_avg_diff_rel -> " + str(max_avg_diff_rel))
# print("max_avg_diff -> " + str(max_avg_diff_abs))
else:
print('The variable is not a number')
# run pie
if (chartType == "pie"):
summary1 = "This is a pie chart showing the distribution of " + str(
len(xValueArr)) + " different " + xLabel + ". "
summary2 = xValueArr[maxValueIndex] + " " + xLabel + " has the highest proportion with " + str(
maxPercentage) + "% of the pie chart area"
summary3 = "followed by " + xLabel + " " + xValueArr[secondMaxIndex] + ", with a proportion of " + str(
secondMaxPercentage) + "%. "
summary4 = "Finally, " + xLabel + " " + xValueArr[
minValueIndex] + " has the minimum contribution of " + str(minPercentage) + "%."
summaryArray.append(summary1)
summaryArray.append(summary2)
summaryArray.append(summary3)
summaryArray.append(summary4)
dataJson = [{xLabel: xVal, yLabel: yVal} for xVal, yVal in zip(cleanXArr, cleanYArr)]
trendsArray = [{}]
websiteInput = {"title": title, "name": xLabel, "percent": yLabel,
"columnType": "two",
"graphType": chartType, "summaryType": "baseline", "summary": summaryArray,
"trends": trendsArray,
"data": dataJson}
with open(f'{websitePath}/{name}.json', 'w', encoding='utf-8') as websiteFile:
json.dump(websiteInput, websiteFile, indent=3)
# run bar
elif (chartType == "bar"):
secondMaxIndex = 0 # to deal with error: local variable 'secondMaxIndex' referenced before assignment
intro = []
intro.append(
"This is a bar chart representing " + xLabel + " in the x axis and " + yLabel + " in the y axis. ")
intro.append("This bar chart has " + str(
len(xValueArr)) + " columns on the x axis representing " + xLabel + ", and " + yLabel + " in each " + xLabel + " on the y axis. ")
intro.append("This is a bar chart. It shows " + yLabel + " for " + str(
len(xValueArr)) + " number of " + xLabel + "s. ")
print("INTRO : " + intro[random.randint(0, len(intro) - 1)])
print(intro)
summaryArray.append(intro[random.randint(0, len(intro) - 1)])
# Extrema [max/min]
summary2_extrema_max_min = []
summary2_extrema_max_min.append(
"The maximum " + yLabel + " " + str(yValueArr[
maxValueIndex]) + " is found at " + xLabel + " " + position_in_X_axis_for_max_value + " and the minimum is found at " + position_in_X_axis_for_min_value + " where " + yLabel + " is " + str(
yValueArr[minValueIndex]) + ". ")
summary2_extrema_max_min.append(
"The " + yLabel + " is highest at " + xLabel + " " + position_in_X_axis_for_max_value + " and lowest at " + xLabel + " " + position_in_X_axis_for_min_value + ". ")
summary2_extrema_max_min.append(
xLabel + " " + position_in_X_axis_for_max_value + " has the highest " + yLabel + " and " + position_in_X_axis_for_min_value + " has the lowest " + yLabel + ". ")
summary2_extrema_max_min.append(
"The " + yLabel + " is appeared to be the highest at " + xLabel + " " + position_in_X_axis_for_max_value + " and lowest at " + xLabel + " " + position_in_X_axis_for_min_value + ". ")
print("summary2_extrema_max_min")
print(summary2_extrema_max_min)
print(
"Extrema [max/min] : " + summary2_extrema_max_min[random.randint(0, len(summary2_extrema_max_min) - 1)])
summaryArray.append(summary2_extrema_max_min[random.randint(0, len(summary2_extrema_max_min) - 1)])
global_trend_text = []
# Trend [Pos/Neg]
if xLabel.lower() == "year" or xLabel.lower() == "years" or xLabel.lower() == "month" or xLabel.lower() == "months" or xLabel.lower() == "quarter" or xLabel.lower() == "quarters":
single_bar_trend = globalTrendBarChart(yValueArr)
global_trend_text.append(
"Overall " + yLabel + " has " + single_bar_trend + " over the " + xLabel + "s. ")
global_trend_text.append("The " + yLabel + " has " + single_bar_trend + " over the past " + str(
len(yValueArr)) + " " + xLabel + "s. ")
global_trend_text.append("Over the past " + str(
len(yValueArr)) + " " + xLabel + "s, the " + yLabel + " has " + single_bar_trend + ". ")
print("Trend [Pos/Neg] : " + global_trend_text[random.randint(0, len(global_trend_text) - 1)])
summaryArray.append(global_trend_text[random.randint(0, len(global_trend_text) - 1)])
print("global_trend_text")
print(global_trend_text)
# Order [position]
summary3_order_2nd_max = []
if len(xValueArr) > 2:
summary3_order_2nd_max.append(
"The second highest " + yLabel + " is appeared to be the " + xLabel + " " + position_in_X_axis_for_second_max_value + ". ")
summary3_order_2nd_max.append(
"Second maximum " + yLabel + " is found at " + xLabel + " " + position_in_X_axis_for_second_max_value + ". ")
summary3_order_2nd_max.append(
xLabel + " " + position_in_X_axis_for_second_max_value + " has the second highest value for " + yLabel + ". ")
print(
"Order [position] : " + summary3_order_2nd_max[random.randint(0, len(summary3_order_2nd_max) - 1)])
print("summary3_order_2nd_max")
print(summary3_order_2nd_max)
# Order [rank]
summary_order_rank = []
if len(xValueArr) > 3:
summary_order_rank.append(
"The " + xLabel + " " + position_in_X_axis_for_max_value + " has the highest " + yLabel + ", followed by " + position_in_X_axis_for_second_max_value + ", and " + position_in_X_axis_for_third_max_value + ". Down to the " + xLabel + " " + position_in_X_axis_for_min_value + " which is the lowest. ")
summary_order_rank.append(
xLabel + " " + position_in_X_axis_for_max_value + " is higher than any other " + xLabel + "s with value " + str(
yValueArr[
maxValueIndex]) + ", followed by " + position_in_X_axis_for_second_max_value + ", and " + position_in_X_axis_for_third_max_value + ". Down to " + xLabel + " " + position_in_X_axis_for_min_value + " with the lowest value " + str(
yValueArr[minValueIndex]) + ". ")
summary_order_rank.append(
yLabel + " at " + xLabel + " " + position_in_X_axis_for_max_value + " is " + str(yValueArr[
maxValueIndex]) + " , second place is " + position_in_X_axis_for_second_max_value + " at " + str(
yValueArr[
secondMaxIndex]) + ", and thirdly is " + position_in_X_axis_for_third_max_value + " at " + str(
yValueArr[thirdMaxIndex]) + ". ")
print("Order [rank] : " + summary_order_rank[random.randint(0, len(summary_order_rank) - 1)])
summaryArray.append(summary_order_rank[random.randint(0, len(summary_order_rank) - 1)])
# Comparison [Absolute]
comparison_abs = []
comparison_abs.append("There is a difference of " + str(round(max_min_diff,
2)) + " between the maximum " + xLabel + " " + position_in_X_axis_for_max_value + " and minimum " + xLabel + " " + position_in_X_axis_for_min_value + ". ")
comparison_abs.append(
"The difference of " + yLabel + " between the highest and lowest " + xLabel + " is " + str(
round(max_min_diff, 2)) + ". ")
comparison_abs.append("The highest " + xLabel + " " + position_in_X_axis_for_max_value + " has " + str(
round(max_min_diff,
2)) + " more " + yLabel + " than the lowest " + xLabel + " " + position_in_X_axis_for_min_value + ". ")
print("Comparison [Absolute] : " + comparison_abs[random.randint(0, len(comparison_abs) - 1)])
# Comparison [Relative]
comparison_rel = []
comparison_rel.append(xLabel + " " + position_in_X_axis_for_max_value + " has " + str(
proportion) + " times more " + yLabel + " than " + xLabel + " " + position_in_X_axis_for_min_value + " which is has the lowest. ")
comparison_rel.append(xLabel + " " + position_in_X_axis_for_min_value + " has " + str(
proportion) + " times less " + yLabel + " than " + xLabel + " " + position_in_X_axis_for_max_value + " which is the highest. ")
comparison_rel.append(
"The highest value at " + xLabel + " " + position_in_X_axis_for_max_value + " is " + str(
proportion) + "x times more than the lowest value at " + position_in_X_axis_for_min_value + ". ")
comparison_rel.append(
"The lowest value at " + xLabel + " " + position_in_X_axis_for_min_value + " is " + str(
proportion) + "x times less than the highest value at " + position_in_X_axis_for_max_value + ". ")
comparison_rel.append(
"The " + yLabel + " of " + xLabel + " " + position_in_X_axis_for_max_value + " is " + str(
proportion) + "% larger than the minimum value at " + position_in_X_axis_for_min_value + ". ")
comparison_rel.append(
"The " + yLabel + " of " + xLabel + " " + position_in_X_axis_for_min_value + " is " + str(
proportion) + "% smaller than the maximum value at " + position_in_X_axis_for_max_value + ". ")
comparison_rel.append("The maximum " + xLabel + " " + position_in_X_axis_for_max_value + " has got " + str(
proportion) + " times higher " + yLabel + " than the minimum " + xLabel + " " + position_in_X_axis_for_min_value + ". ")
comparison_rel.append("The minimum " + xLabel + " " + position_in_X_axis_for_min_value + " has got " + str(
proportion) + " times less " + yLabel + " than the maximum " + xLabel + " " + position_in_X_axis_for_max_value + ". ")
print("Comparison [Relative] : " + comparison_rel[random.randint(0, len(comparison_rel) - 1)])
if float(random.uniform(0, 1)) > 0.75:
summaryArray.append(comparison_rel[random.randint(0, len(comparison_rel) - 1)])
else:
summaryArray.append(comparison_abs[random.randint(0, len(comparison_abs) - 1)])
# Compute derived val [avg]
derived_val_avg = []
derived_val_avg.append(
"The average " + yLabel + " in all " + str(len(yValueArr)) + " " + xLabel + "s is " + str(
round(avgValueOfAllBars, 2)) + ". ")
derived_val_avg.append("The average " + yLabel + " in all " + str(
len(yValueArr)) + " " + xLabel + "s is roughly " + str(round(avgValueOfAllBars, 2)) + ". ")
print("Compute derived val [avg] : " + derived_val_avg[random.randint(0, len(derived_val_avg) - 1)])
# Comparison [Relative, vs Avg]
comparison_rel_with_avg = []
comparison_rel_with_avg.append("The highest value " + str(
yValueArr[maxValueIndex]) + " at " + position_in_X_axis_for_max_value + " is almost " + str(
max_avg_diff_rel) + " times larger than the average value " + str(round(avgValueOfAllBars, 2)) + ". ")
comparison_rel_with_avg.append("The lowest value " + str(
yValueArr[minValueIndex]) + " at " + position_in_X_axis_for_min_value + " is almost " + str(
max_avg_diff_rel) + " times smaller than the average value " + str(round(avgValueOfAllBars, 2)) + ". ")
comparison_rel_with_avg.append("The " + xLabel + " " + position_in_X_axis_for_max_value + " has " + str(
max_avg_diff_rel) + " times more " + yLabel + " than average. ")
comparison_rel_with_avg.append("The " + xLabel + " " + position_in_X_axis_for_min_value + " has " + str(
max_avg_diff_rel) + " times less " + yLabel + " than average. ")
comparison_rel_with_avg.append(
"The " + xLabel + " " + position_in_X_axis_for_max_value + " tends to be " + str(
max_avg_diff_rel) + " percent higher than average. ")
comparison_rel_with_avg.append(
"The " + xLabel + " " + position_in_X_axis_for_min_value + " tends to be " + str(
max_avg_diff_rel) + " percent lower than average. ")
print("Comparison [Relative, vs Avg] : " + comparison_rel_with_avg[
random.randint(0, len(comparison_rel_with_avg) - 1)])
if float(random.uniform(0, 1)) > 0.75:
summaryArray.append(comparison_rel_with_avg[random.randint(0, len(comparison_rel_with_avg) - 1)])
else:
summaryArray.append(derived_val_avg[random.randint(0, len(derived_val_avg) - 1)])
# Compute derived val [sum]
sum_text = []
sum_text.append(
"The " + yLabel + " is " + str(round(totalValue, 2)) + " if we add up values of all " + xLabel + "s. ")
sum_text.append(
"Summing up the values of all " + xLabel + "s, we get total " + str(round(totalValue, 2)) + ". ")
print("Compute derived val [sum] : " + sum_text[random.randint(0, len(sum_text) - 1)])
summaryArray.append(sum_text[random.randint(0, len(sum_text) - 1)])
# Compute derived val [shared value]
shared_value = []
res = checkIfDuplicates(yValueArr)
if res:
# print('Yes, list contains duplicates')
most_freq_value = most_frequent(yValueArr)
most_freq_pos = []
most_freq_x_label = []
for i in range(len(yValueArr)):
if yValueArr[i] == most_freq_value:
most_freq_pos.append(i)
most_freq_x_label.append(xValueArr[i])
shared_value_labels = ""
for a in range(len(most_freq_x_label)):
if a == len(most_freq_x_label) - 1:
shared_value_labels += "and " + str(most_freq_x_label[a]).replace('_', ' ')
else:
shared_value_labels += str(most_freq_x_label[a]).replace('_', ' ') + ", "
shared_value.append(
xLabel + " " + shared_value_labels + " have a similar " + yLabel + " that is " + str(
most_freq_value) + ". ")
shared_value.append(
xLabel + " " + shared_value_labels + " share the same value " + str(most_freq_value) + ". ")
shared_value.append(xLabel + " " + shared_value_labels + " have the same " + yLabel + ". ")
shared_value.append("Similar " + yLabel + " is found in " + xLabel + " " + shared_value_labels + ". ")
print("Compute derived val [shared value] : " + shared_value[random.randint(0, len(shared_value) - 1)])
summaryArray.append(shared_value[random.randint(0, len(shared_value) - 1)])
min_summary = []
mid_summary = []
max_summary = []
min_summary.append(random.choice(intro))
min_summary.append(random.choice(summary2_extrema_max_min))
if len(global_trend_text) > 0:
min_summary.append(random.choice(global_trend_text))
mid_summary.append(random.choice(intro))
mid_summary.append(random.choice(summary2_extrema_max_min))
if len(global_trend_text) > 0:
mid_summary.append(random.choice(global_trend_text))
if float(random.uniform(0, 1)) > 0.75:
mid_summary.append(random.choice(comparison_rel))
else:
mid_summary.append(random.choice(comparison_abs))
max_summary.append(random.choice(intro))
max_summary.append(random.choice(summary2_extrema_max_min))
if len(global_trend_text) > 0:
min_summary.append(random.choice(global_trend_text))
if float(random.uniform(0, 1)) > 0.35 and len(summary3_order_2nd_max) > 0:
max_summary.append(random.choice(summary3_order_2nd_max))
if float(random.uniform(0, 1)) > 0.75:
max_summary.append(random.choice(comparison_rel))
else:
max_summary.append(random.choice(comparison_abs))
if len(summary_order_rank) > 0:
max_summary.append(random.choice(summary_order_rank))
if len(shared_value) > 0:
max_summary.append(random.choice(shared_value))
if float(random.uniform(0, 1)) > 0.75:
max_summary.append(random.choice(derived_val_avg))
else:
max_summary.append(random.choice(comparison_rel_with_avg))
if float(random.uniform(0, 1)) > 0.35:
max_summary.append(random.choice(sum_text))
print("max_summary")
print(max_summary)
summaryArray = mid_summary
trendsArray = [{}, {"7": maxValueIndex, "12": maxValueIndex},
{"7": minValueIndex, "12": minValueIndex}, {}]
dataJson = [{xLabel: xVal, yLabel: yVal} for xVal, yVal in zip(cleanXArr, cleanYArr)]
websiteInput = {"title": title, "xAxis": xLabel, "yAxis": yLabel,
"columnType": "two",
"graphType": chartType, "summaryType": "baseline", "summary": summaryArray,
"min_summary": min_summary,
"mid_summary": mid_summary,
"max_summary": max_summary,
"trends": trendsArray,
"data": dataJson}
with open(f'{websitePath}/{name}.json', 'w', encoding='utf-8') as websiteFile:
json.dump(websiteInput, websiteFile, indent=3)
# oneFile.writelines(''.join(summaryArray)+'\n')
## for single line charts
# run line
elif (chartType == "line"):
trendArray = []
numericXValueArr = []
for xVal, index in zip(xValueArr, range(
len(xValueArr))): # Every x value is assigned an index from 0 to 11 (e.g. xval1: 0, xval2: 1)
if xVal.isnumeric():
numericXValueArr.append(float(xVal))
else:
# see if regex works better
cleanxVal = re.sub("[^\d\.]", "", xVal)
if len(cleanxVal) > 0:
numericXValueArr.append(float(cleanxVal[:4]))
else:
numericXValueArr.append(float(index))
# determine local trends
graphTrendArray = []
i = 1
# calculate variance between each adjacent y values
# print(xValueArr)
# print(yValueArr)
##For jason's smoothing
while i < (len(yValueArr)):
variance1 = float(yValueArr[i]) - float(yValueArr[
i - 1]) # 2nd yVal- Prev yVal # Note that xValueArr and yValueArr are ordered such that the start values are written at the end of the array
if (variance1 > 0):
type1 = "decreasing" # Drop/ falls/ goes down
elif (variance1 < 0):
type1 = "increasing" # Rise/ goes up
else:
type1 = "constant" # Stays the same
trendArray.append(type1)
i = i + 1
##### end of jason code
##Finding the direction of trend -shehnaz
yVals_float = yValueArr # yVals_float= stringToFloat(yValueArr)
yVal = np.array(yVals_float).astype(np.float) # yVal is now in float type
# print(xValueArr)
# print(yVal)
coordinates = dict(zip(xValueArr, yVal))
# print(coordinates)
sorted_coordinates = dict(sorted(coordinates.items()))
# print(sorted_coordinates)
keys, values = zip(*sorted_coordinates.items()) # keys, values = zip(sorted_coordinates.items())
print(keys)
print(values)
yValueArrCorrectOrder = np.array(values) # yValueArr[len(yValueArr)::-1] ## Ordered correctly this time
xValueArrCorrectOrder = np.array(keys) # xValueArr[len(xValueArr)::-1] ## Ordered correctly this time
############# GlobalTrend ##############
globalDifference = float(yValueArrCorrectOrder[len(yValueArrCorrectOrder) - 1]) - float(
yValueArrCorrectOrder[0])
globalPercentChange = (globalDifference / float(yValueArr[len(yValueArr) - 1])) * 100
############# LocalTrend ##############
varianceArray = []
### Percentage change appraoch
percentArray = []
# directionArray = []
i = 1
while i < (len(yValueArrCorrectOrder)):
old = yValueArrCorrectOrder[i - 1]
if (old == 0 or old == 0.0):
old = 0.00000000001
variance1 = float(yValueArrCorrectOrder[i]) - float(
old) # 2nd yVal- Prev yVal # Note that xValueArr and yValueArr are ordered such that the start values are written at the end of the array
localPercentChange = (variance1 / float(old)) * 100
varianceArray.append(variance1)
percentArray.append(localPercentChange)
# directionArray.append(d)
i = i + 1
varianceArrayCorrectOrder = varianceArray # varianceArray[len(varianceArray)::-1] ## Ordered correctly this time
percentArrayCorrectOrder = percentArray # percentArray[len(percentArray)::-1] ## Ordered correctly this time
# print(varianceArrayCorrectOrder)
# print(percentArrayCorrectOrder) #neww
## percentArray Appraoch
## Mean of abs_percentArrayCorrectOrder
abs_percentArrayCorrectOrder = [abs(number) for number in percentArrayCorrectOrder] # neww
# print(abs_percentArrayCorrectOrder)
mean_percentArray = mean(abs_percentArrayCorrectOrder) # mean of abosulte values of percentArray
constant_rate = c_rate * mean_percentArray # avg(% chnage)*0.1 # Meaning any chnage less than 5% is considered roughly constant slope # Determines if a trend is increasing, decreasing or constant
significant_rate = s_rate * mean_percentArray
gradually_rate = g_rate * mean_percentArray
rapidly_rate = r_rate * mean_percentArray
directionArray = []
i = 1
while i < (len(yValueArrCorrectOrder)):
d = directionTrend(yValueArrCorrectOrder[i],
yValueArrCorrectOrder[i - 1],
constant_rate) # direction e.g. increase, decrease or constant
directionArray.append(d)
i = i + 1
# print("Orginal Direction Trend:")
# print(directionArray)
### Previously indexs reported for only increasing and decresing trends
# trendChangeIdx = []
# for idx in range(0, len(varianceArrayCorrectOrder) - 1):
# # checking for successive opposite index
# if varianceArrayCorrectOrder[idx] > 0 and varianceArrayCorrectOrder[idx + 1] < 0 or varianceArrayCorrectOrder[idx] < 0 and varianceArrayCorrectOrder[idx + 1] > 0:
# trendChangeIdx.append(idx)
# print("Sign shift indices : " + str(trendChangeIdx))
# percentArray approach to smoothing
## Smoothing directionArray. If percentChange >10% then direction of trend is that of the next interval (regardless if it was increasing or decreasing)
directionArraySmoothed = []
for idx in range(0, len(percentArrayCorrectOrder) - 1): # neww
# checking for percent chnage >5% (not constant) and <10% (not significant) and chnaging their direction to be the direction of the succesive interval
if (abs(percentArrayCorrectOrder[idx]) > constant_rate and abs(
percentArrayCorrectOrder[idx]) < significant_rate): # neww
d = directionArray[idx + 1]
directionArraySmoothed.append(d)
else:
directionArraySmoothed.append(directionArray[idx])
directionArraySmoothed.append(directionArray[len(
percentArrayCorrectOrder) - 1]) # neww # The last value doesn't have a succesive interval so it will be appended as is
# print("Smoothed Direction Trend:")
# print(directionArraySmoothed)
# constant_rate = meanSlope- 1*(sdSlope)
# significant_rate = meanSlope
# gradually_rate= meanSlope+ 1*(sdSlope)
# rapidly_rate= meanSlope + 2*(sdSlope)
# slopeArray approach to smoothing
## Smoothing directionArray. If percentChange >10% then direction of trend is that of the next interval (regardless if it was increasing or decreasing)
# directionArraySmoothed = []
# for idx in range(0, len(normalized_slopeArray) - 1): #neww
# # checking for percent chnage >5% (not constant) and <10% (not significant) and chnaging their direction to be the direction of the succesive interval
# if (abs(normalized_slopeArray[idx]) > constant_rate and abs(normalized_slopeArray[idx]) < significant_rate): #neww
# d = directionArray[idx + 1]
# directionArraySmoothed.append(d)
# else:
# directionArraySmoothed.append(directionArray[idx])
# directionArraySmoothed.append(directionArray[len(
# normalized_slopeArray) - 1]) #neww # The last value doesn't have a succesive interval so it will be appended as is
# print("Smoothed Direction Trend:")
# print(directionArraySmoothed)
trendChangeIdx = []
for idx in range(0, len(directionArraySmoothed) - 1):
# checking for successive opposite index
if directionArraySmoothed[idx] != directionArraySmoothed[idx + 1]:
trendChangeIdx.append(idx)
print("Sign shift indices : " + str(trendChangeIdx))
# yValueArrCorrectOrder = yValueArr[len(yValueArr)::-1] ## Ordered correctly this time
# print(yValueArrCorrectOrder)
# xValueArrCorrectOrder = xValueArr[len(xValueArr)::-1] ## Ordered correctly this time
# print(xValueArrCorrectOrder)
# trendArrayCorrectOrder = trendArray[len(trendArray)::-1] # no need since have my own directionArray now ordered correctly
# print(trendArrayCorrectOrder)
# print(trendChangeIdx)
# Slope Approach
## Find the new slopes for the trendChangeIdx points
# refinedSlope_array= []
refinedPercentChnage_array = []
x = 0
# max_y= max(yValueArrCorrectOrder)
if trendChangeIdx: # if trendChangeIdx is not empty
for i in trendChangeIdx:
if (x == 0):
# neumerator= yValueArrCorrectOrder[i+1]- yValueArrCorrectOrder[0]
# denominator= (i+1)- (0)
# slope= neumerator/denominator
# refinedSlope_array.append(slope)
new = yValueArrCorrectOrder[i + 1]
old = yValueArrCorrectOrder[0]
# percentChange= ((new-old)/old)*100
percentChange = percentChnageFunc(new, old) # to account for error: float division by zero
refinedPercentChnage_array.append(percentChange)
# localPercentChange= percentChnageRangeFunc(new, old, max_y)
# refinedPercentChnage_array.append(localPercentChange)
elif (x > 0 or x < len(trendChangeIdx) - 1):
# neumerator= yValueArrCorrectOrder[i+1]- yValueArrCorrectOrder[trendChangeIdx[x - 1] + 1]
# denominator= (i+1)- (trendChangeIdx[x - 1] + 1)
# slope= neumerator/denominator
# refinedSlope_array.append(slope)
new = yValueArrCorrectOrder[i + 1]
old = yValueArrCorrectOrder[trendChangeIdx[x - 1] + 1]
# percentChange= ((new-old)/old)*100
percentChange = percentChnageFunc(new, old) # to account for error: float division by zero
refinedPercentChnage_array.append(percentChange)
# localPercentChange= percentChnageRangeFunc(new, old, max_y)
# refinedPercentChnage_array.append(localPercentChange)
x = x + 1
# neumerator= yValueArrCorrectOrder[-1]- yValueArrCorrectOrder[trendChangeIdx[-1] + 1]
# denominator= (x)- (trendChangeIdx[-1] + 1)
# slope= neumerator/denominator
# refinedSlope_array.append(slope)
new = yValueArrCorrectOrder[-1]
old = yValueArrCorrectOrder[trendChangeIdx[-1] + 1]
# percentChange= ((new-old)/old)*100
percentChange = percentChnageFunc(new, old) # to account for error: float division by zero
refinedPercentChnage_array.append(percentChange)
# localPercentChange= percentChnageRangeFunc(new, old, max_y)
# refinedPercentChnage_array.append(localPercentChange)
else:
# neumerator= yValueArrCorrectOrder[-1]- yValueArrCorrectOrder[0]
# denominator= (len(yValueArrCorrectOrder)-1)- 0
# slope= neumerator/denominator
# refinedSlope_array.append(slope)
new = yValueArrCorrectOrder[-1]
old = yValueArrCorrectOrder[0]
# percentChange= ((new-old)/old)*100
percentChange = percentChnageFunc(new, old) # to account for error: float division by zero
refinedPercentChnage_array.append(percentChange)
# localPercentChange= percentChnageRangeFunc(new, old, max_y)
# refinedPercentChnage_array.append(localPercentChange)
# print("Refined Slope")
# print(refinedSlope_array)
print("Refined Percent Change")
print(refinedPercentChnage_array)
# Mean of abs_refinedPercentChnage_array
abs_refinedPercentChnage_array = [abs(number) for number in refinedPercentChnage_array] # neww
# print(abs_percentArrayCorrectOrder)
mean_abs_refinedPercentChnage = mean(
abs_refinedPercentChnage_array) # mean of abosulte values of percentArray
print(mean_abs_refinedPercentChnage)
# sd_abs_refinedPercentChnage= stdev(abs_refinedPercentChnage_array)
# print(sd_abs_refinedPercentChnage)
constant_rate = c_rate * mean_abs_refinedPercentChnage # avg(% chnage)*0.1 # Meaning any chnage less than 5% is considered roughly constant slope # Determines if a trend is increasing, decreasing or constant
significant_rate = s_rate * mean_abs_refinedPercentChnage
gradually_rate = g_rate * mean_abs_refinedPercentChnage
rapidly_rate = r_rate * mean_abs_refinedPercentChnage
# constant_rate = mean_abs_refinedPercentChnage- 1*(sd_abs_refinedPercentChnage) # avg(% chnage)*0.1 # Meaning any chnage less than 5% is considered roughly constant slope # Determines if a trend is increasing, decreasing or constant
# significant_rate = mean_abs_refinedPercentChnage# avg(% chnage)*0.1 # Meaning any chnage >constant rate and less than this rate is considered not significant and so it's trend direction is chnaged to the trend of the succesive interval # Determines the start and end of the trend
# gradually_rate= mean_abs_refinedPercentChnage+ 1*(sd_abs_refinedPercentChnage)
# rapidly_rate= mean_abs_refinedPercentChnage+ 2*(sd_abs_refinedPercentChnage)
# Trying out the percentage using max-0 range of charts instead of dividing by old
# constant_rate = constant
# gradually_rate= gradual
# rapidly_rate= rapid
print(constant_rate)
print(significant_rate)
print(gradually_rate)
print(rapidly_rate)
## Normalize refined Slope
# abs_refinedSlope_array= [abs(number) for number in refinedSlope_array] #neww
# print(abs_refinedSlope_array)
# normalized_refinedSlope_array= []
# minValRefinedSlope= min(abs_refinedSlope_array)
# maxValRefinedSlope= max(abs_refinedSlope_array)
# for i in range(0, len(abs_refinedSlope_array)):
# normalized_slope= (100*(abs_refinedSlope_array[i]- minValRefinedSlope))/(maxValRefinedSlope-minValRefinedSlope)
# normalized_refinedSlope_array.append(normalized_slope)
# print("normalized_refinedSlopeArray")
# meanRefinedSlope= mean(abs_refinedSlope_array)
# sdRefinedSlope= stdev(abs_refinedSlope_array)
# for i in range(0, len(abs_refinedSlope_array)):
# normalized_slope= (abs_refinedSlope_array[i]- meanRefinedSlope)/sdRefinedSlope
# normalized_refinedSlope_array.append(normalized_slope)
# print("normalized_refinedSlopeArray")
# print(normalized_refinedSlope_array)
# constant_rate = meanRefinedSlope- 1*(sdRefinedSlope)
# significant_rate = meanRefinedSlope
# gradually_rate= meanRefinedSlope+ 1*(sdRefinedSlope)
# rapidly_rate= meanRefinedSlope + 2*(sdRefinedSlope)
# print(constant_rate)
# print(significant_rate)
# print(gradually_rate)
# print(rapidly_rate)
############# Steepest Slope ##############
# Absolute value of varianceArrayCorrectOrder elements
absoluteVariance = [abs(ele) for ele in varianceArrayCorrectOrder]
max_value = max(absoluteVariance)
max_index = absoluteVariance.index(max_value)
# print(absoluteVariance)
# print(max_value)
# print(max_index)
# print(directionArraySmoothed)
##### Print the summary
###### Print all summaries for single line chart: #########
##### INTRO
summary1 = []
localTrendSentence1 = "This is a line chart with an x axis representing " + xLabel + " and a y axis representing " + yLabel + ", containing a total of " + str(
len(yValueArrCorrectOrder)) \
+ " data points."
summary1.append(localTrendSentence1)
# Version 2
localTrendSentence1 = "The chart at hand is a line chart where the x axis denotes " + xLabel + " and a y axis denotes " + yLabel + ". In total the number of data points it has is " + str(
len(yValueArrCorrectOrder)) \
+ ". "
summary1.append(localTrendSentence1)
summaryArray.append(random.choice(summary1))
#### GLOBAL TREND
summary2_arr = []
summary2 = " Overall " + yLabel + " has "
if globalPercentChange > 0:
summary2 += "increased"
elif globalPercentChange < 0:
summary2 += "decreased"
else:
summary2 += "constant"
# summary2 +=direction
summary2 += " over the " + xLabel + ". "
summary2_arr.append(summary2)
# Version 2
summary2 = " In general, " + yLabel + " has "
if globalPercentChange > 0 and abs(globalPercentChange) > constant:
summary2 += "rose"
elif globalPercentChange < 0 and abs(globalPercentChange) > constant:
summary2 += "fallen"
else:
summary2 += "stayed the same"
# summary2 +=direction
summary2 += " over the " + xLabel + ". "
summary2_arr.append(summary2)
summaryArray.append(random.choice(summary2_arr))
# LOCAL TREND
summary3 = yLabel
rateOfchange_array = []
# rateOfchange_num_array= []
x = 0
if trendChangeIdx: # if trendChangeIdx is not empty
for i in trendChangeIdx:
if (x == 0):
# rateOfChange_num= rateOfChnageVal(yValueArrCorrectOrder[i + 1],yValueArrCorrectOrder[0], directionArraySmoothed[i], (i + 1), 0, max_val, min_val)
# rateOfchange_num_array.append(rateOfChange_num)
rateOfChange = rateOfChnage(refinedPercentChnage_array[x], directionArraySmoothed[i],
constant_rate, gradually_rate, rapidly_rate)
rateOfchange_array.append(rateOfChange)
summary3 += " is " + rateOfChange + " " + directionArraySmoothed[
i] + " from " + str(xValueArrCorrectOrder[0]) + " to " + str(xValueArrCorrectOrder[
i + 1]) + ", "
elif (x > 0 or x < len(trendChangeIdx) - 1):
# rateOfChange_num= rateOfChange(yValueArrCorrectOrder[i + 1], yValueArrCorrectOrder[trendChangeIdx[x - 1] + 1], directionArraySmoothed[i], (i + 1), (trendChangeIdx[x - 1] + 1), max_val, min_val)
# rateOfchange_num_array.append(rateOfChange_num)
rateOfChange = rateOfChnage(refinedPercentChnage_array[x], directionArraySmoothed[i],
constant_rate, gradually_rate, rapidly_rate)
rateOfchange_array.append(rateOfChange)
summary3 += rateOfChange + " " + \
directionArraySmoothed[i] + " from " + str(xValueArrCorrectOrder[
trendChangeIdx[
x - 1] + 1]) + " to " + str(
xValueArrCorrectOrder[i + 1]) + ", "
x = x + 1
# rateOfChange_num= rateOfChnageVal(yValueArrCorrectOrder[-1], yValueArrCorrectOrder[trendChangeIdx[-1] + 1], directionArraySmoothed[-1], (-1), (trendChangeIdx[-1] + 1), max_val, min_val)
# rateOfchange_num_array.append(rateOfChange_num)
rateOfChange = rateOfChnage(refinedPercentChnage_array[x], directionArraySmoothed[-1], constant_rate,
gradually_rate, rapidly_rate)
rateOfchange_array.append(rateOfChange)
synonym = ["lastly", "finally"]
word = random.choice(synonym)
summary3 += "and " + str(word) + " " + rateOfChange + " " + \
directionArraySmoothed[-1] + " from " + str(xValueArrCorrectOrder[
trendChangeIdx[-1] + 1]) + " to " + str(
xValueArrCorrectOrder[-1]) + ". "
else:
# rateOfChange_num= rateOfChnageVal(yValueArrCorrectOrder[-1], yValueArrCorrectOrder[0], directionArraySmoothed[-1], (-1), (0), max_val, min_val)
# rateOfchange_num_array.append(rateOfChange_num)
rateOfChange = rateOfChnage(refinedPercentChnage_array[x], directionArraySmoothed[-1], constant_rate,
gradually_rate, rapidly_rate)
rateOfchange_array.append(rateOfChange)
summary3 += " is " + rateOfChange + " " + \
directionArraySmoothed[-1] + " from " + str(xValueArrCorrectOrder[0]) + " to " + \
str(xValueArrCorrectOrder[-1]) + ". "
sum_zigzag_arr = [] # for ZIG Zag
if (len(trendChangeIdx) < 5):
summaryArray.append(summary3)
# ZIG ZAG
elif (len(yValueArrCorrectOrder) > zigZagNum):
sum_zigzag = "The chart in general has a zig-zag shape."
sum_zigzag_arr.append(sum_zigzag)
sum_zigzag = "The chart generally has many fluctuations."
sum_zigzag_arr.append(sum_zigzag)
summaryArray.append(random.choice(sum_zigzag_arr))
# print(rateOfchange_num_array)
# print("percentArrayCorrectOrder: " + str(percentArrayCorrectOrder))
# print("directionArray: " + str(directionArray))
# COMPARISON
print("")
print("")
print("######################################")
print("C O M P A R I S O N")
print("######################################")
print("")
print("")
summ_Comp = []
summ_Comp1 = "The line rapidly "
summ_Comp2 = "The line "
summ_Comp3 = "The line significantly "
i = 0
# print(rapid)
# To find the number of rapid trends
x = 0
for i in range(0, len(percentArrayCorrectOrder)):
if (abs(percentArrayCorrectOrder[i]) > rapid):
x = x + 1
m = 0
for i in range(0, len(percentArrayCorrectOrder)):
if (abs(percentArrayCorrectOrder[i]) > rapid):
m = m + 1
n = float(yValueArrCorrectOrder[i + 1])
o = float(yValueArrCorrectOrder[i])
print(n)
print(o)
if (o == 0):
o = 0.00000000001
if (n == 0):
n = 0.00000000001
# percentage chnage
p = abs(percentChnageFunc(n, o))
# Factor
f = ""
if (n != 0.00000000001 and o != 0.00000000001):
if (n > o):
f = round(n / o, 1)
else:
f = round(o / n, 1)
# Absolue difference
absolute_diff = abs(n - o)
end = ","
conjucntion = ""
if (m == x): # It is the last line to be printed and it is not only 1 line
end = ". "
if (x != 1):
conjucntion = " and lastly, "
# Version1
summ_Comp1 += conjucntion + str(increasedDecreased(directionArray[i])) + " by " + str(
round(p, 2)) + "% from " + str(xLabel) + " " + str(xValueArrCorrectOrder[i]) + " to " + str(
xValueArrCorrectOrder[i + 1]) + end
# Version 2
if (bool(f)):
summ_Comp2 += conjucntion + str(increasedDecreased(directionArray[i])) + " by " + str(
f) + " times from " + str(xLabel) + " " + str(xValueArrCorrectOrder[i]) + " to " + str(
xValueArrCorrectOrder[i + 1]) + end
# Version 3
summ_Comp3 += conjucntion + str(increasedDecreased(directionArray[i])) + " by " + str(
round(absolute_diff, 2)) + " from " + str(xLabel) + " " + str(
xValueArrCorrectOrder[i]) + " to " + str(xValueArrCorrectOrder[i + 1]) + end
summ_Comp.append(summ_Comp1)
if (len(summ_Comp2) != 0):
summ_Comp.append(summ_Comp2)
summ_Comp.append(summ_Comp3)
summaryArray.append(random.choice(summ_Comp))
# STEEPEST SLOPE
summary4_arr = []
if increaseDecrease(directionArraySmoothed[max_index]) != "stays the same":
summary4 = "The steepest " + increaseDecrease(
directionArraySmoothed[max_index]) + " occurs in between the " + xLabel + " " + str(
xValueArrCorrectOrder[
max_index]) + " and " + str(xValueArrCorrectOrder[max_index + 1]) + ". "
summary4_arr.append(summary4)
# Version 2
summary4 = "The most drastic " + increaseDecrease(
directionArraySmoothed[max_index]) + " took place within the " + xLabel + " " + str(
xValueArrCorrectOrder[
max_index]) + " and " + str(xValueArrCorrectOrder[max_index + 1]) + ". "
summary4_arr.append(summary4)
summaryArray.append(random.choice(summary4_arr))
# EXTREMA MAX
# print(yValueArrCorrectOrder)
max_index = get_indexes_max_value(yValueArrCorrectOrder)
# print(max_index)
# print(len(max_index))
summ_max_arr = []
# version 1
summary_v1 = "Maximum " + yLabel + ", about " + str(
yValueArrCorrectOrder[max_index[0]]) + " was reported at " + xLabel
summ_max_arr.append(summary_v1)
# version2
summary_v2 = "The highest " + yLabel + ", of value " + str(
yValueArrCorrectOrder[max_index[0]]) + " was reached at " + xLabel
summ_max_arr.append(summary_v2)
chosen_max = random.choice(summ_max_arr)
if len(max_index) > 1:
i = 0
while i < (len(max_index) - 1):
chosen_max += " " + str(xValueArrCorrectOrder[max_index[i]]) + ", "
i = i + 1
chosen_max += "and " + str(xValueArrCorrectOrder[max_index[-1]])
else:
chosen_max += " " + str(xValueArrCorrectOrder[max_index[0]]) + ". "
summaryArray.append(chosen_max)
## EXTREMA MIN
# print(yValueArrCorrectOrder)
min_index = get_indexes_min_value(yValueArrCorrectOrder)
# print(min_index)
# print(len(min_index))
summ_min_arr = []
# version 1
summ_v1 = "Minimum " + yLabel + " about " + str(
yValueArrCorrectOrder[min_index[0]]) + " was reached at " + xLabel
summ_min_arr.append(summ_v1)
# version 2
summ_v2 = "The lowest " + yLabel + ", of value " + str(
yValueArrCorrectOrder[min_index[0]]) + " was reported at " + xLabel
summ_min_arr.append(summ_v2)
chosen_min = random.choice(summ_min_arr)
if len(min_index) > 1:
i = 0
while i < (len(min_index) - 1):
chosen_min += " " + str(xValueArrCorrectOrder[min_index[i]]) + ", "
i = i + 1
chosen_min += "and " + str(xValueArrCorrectOrder[min_index[-1]])
else:
chosen_min += " " + str(xValueArrCorrectOrder[min_index[0]]) + ". "
summaryArray.append(chosen_min)
####### Min, Mid, Max Summaries
# Minimum Summary
min_summary = [] # Minimum length summary
mid_summary = [] # Medium length summary
max_summary = [] # Maximum length summary
min_summary.append(random.choice(summary1)) # intro
min_summary.append(random.choice(summary2_arr)) # Global Trend
if (len(yValueArrCorrectOrder) > zigZagNum and len(sum_zigzag_arr) != 0):
max_summary.append(random.choice(sum_zigzag_arr)) # Zig zag
if (len(trendChangeIdx) < 5):
min_summary.append(summary3) # Local Trends
print("min_summary: " + str(min_summary) + "/n")
# Medium Summary
mid_summary.append(random.choice(summary1)) # intro
mid_summary.append(random.choice(summary2_arr)) # Global Trend
if (len(yValueArrCorrectOrder) > zigZagNum and len(sum_zigzag_arr) != 0):
max_summary.append(random.choice(sum_zigzag_arr)) # Zig zag
if (len(trendChangeIdx) < 5):
mid_summary.append(summary3) # Local Trends
if (len(summary4_arr) != 0):
mid_summary.append(random.choice(summary4_arr)) # Steepest Slope
mid_summary.append(chosen_max) # Extrema max
mid_summary.append(chosen_min) # Extrema Min
print("mid_summary: " + str(mid_summary) + "/n")
# Maximum Summary
max_summary.append(random.choice(summary1)) # intro
max_summary.append(random.choice(summary2_arr)) # Global Trend
if (len(yValueArrCorrectOrder) > zigZagNum and len(sum_zigzag_arr) != 0):
max_summary.append(random.choice(sum_zigzag_arr)) # Zig zag
if (len(trendChangeIdx) < 5):
max_summary.append(summary3) # Local Trends
if (len(summary4_arr) != 0):
max_summary.append(random.choice(summary4_arr)) # Steepest Slope
max_summary.append(chosen_max) # Extrema max
max_summary.append(chosen_min) # Extrema Min
if (len(summ_Comp) != 0):
max_summary.append(random.choice(summ_Comp)) # Comparison
print("max_summary: " + str(max_summary) + "/n")
summaryArray = mid_summary
dataJson = [{xLabel: xVal, yLabel: yVal} for xVal, yVal in zip(cleanXArr, cleanYArr)]
websiteInput = {"title": title, "xAxis": xLabel, "yAxis": yLabel,
"columnType": "two",
"graphType": chartType, "summaryType": "baseline", "summary": summaryArray,
"min_summary": min_summary,
"mid_summary": mid_summary,
"max_summary": max_summary,
"trends": graphTrendArray,
"data": dataJson}
with open(f'{websitePath}/{name}.json', 'w', encoding='utf-8') as websiteFile:
json.dump(websiteInput, websiteFile, indent=3)
# oneFile.writelines(''.join(summaryArray) + '\n')
if partial is True:
summaryArray.pop(0)
print(summaryArray)
summaryStr = ""
for a in range(len(summaryArray)):
summaryStr += summaryArray[a]
return summaryStr
# input_data = "Year|2010|x|line_chart Trade_in_thousands_metric_tons|57152.3|y|line_chart Year|2009|x|line_chart Trade_in_thousands_metric_tons|44580.8|y|line_chart Year|2008|x|line_chart Trade_in_thousands_metric_tons|62685.1|y|line_chart Year|2007|x|line_chart Trade_in_thousands_metric_tons|59961.2|y|line_chart Year|2006|x|line_chart Trade_in_thousands_metric_tons|42992.7|y|line_chart "
#
# output_data = summarize(data=input_data, all_y_label="yLabel", name="Partial", title="Partial")
# print("output_data")
# print(output_data["summary"])
pie_chart = "Strategy|advertising|x|pie_chart Amount|20|y|pie_chart Strategy|email|x|pie_chart Amount|40|y|pie_chart Strategy|sale_offers|x|pie_chart Amount|25|y|pie_chart Strategy|leaflet|x|pie_chart Amount|10|y|pie_chart "
scatter = "Manufacturer|Nabisco|0|scatter_chart Calories|50|1|scatter_chart Protein_(g)|1|2|scatter_chart Manufacturer|Quaker_Oats|0|scatter_chart Calories|115|1|scatter_chart Protein_(g)|2.5|2|scatter_chart Manufacturer|Kelloggs|0|scatter_chart Calories|75|1|scatter_chart Protein_(g)|3|2|scatter_chart Manufacturer|Kelloggs|0|scatter_chart Calories|63|1|scatter_chart Protein_(g)|4|2|scatter_chart Manufacturer|Ralston_Purina|0|scatter_chart Calories|160|1|scatter_chart Protein_(g)|5|2|scatter_chart Manufacturer|General_Mills|0|scatter_chart Calories|130|1|scatter_chart Protein_(g)|6|2|scatter_chart Manufacturer|Kelloggs|0|scatter_chart Calories|89|1|scatter_chart Protein_(g)|7|2|scatter_chart Manufacturer|General_Mills|0|scatter_chart Calories|70|1|scatter_chart Protein_(g)|1|2|scatter_chart Manufacturer|Ralston_Purina|0|scatter_chart Calories|140|1|scatter_chart Protein_(g)|2|2|scatter_chart Manufacturer|Post|0|scatter_chart Calories|135|1|scatter_chart Protein_(g)|3|2|scatter_chart Manufacturer|Quaker_Oats|0|scatter_chart Calories|85|1|scatter_chart Protein_(g)|4|2|scatter_chart Manufacturer|General_Mills|0|scatter_chart Calories|80|1|scatter_chart Protein_(g)|6|2|scatter_chart Manufacturer|General_Mills|0|scatter_chart Calories|127|1|scatter_chart Protein_(g)|5|2|scatter_chart Manufacturer|General_Mills|0|scatter_chart Calories|140|1|scatter_chart Protein_(g)|7|2|scatter_chart Manufacturer|General_Mills|0|scatter_chart Calories|145|1|scatter_chart Protein_(g)|1|2|scatter_chart Manufacturer|Ralston_Purina|0|scatter_chart Calories|90|1|scatter_chart Protein_(g)|2|2|scatter_chart Manufacturer|Kelloggs|0|scatter_chart Calories|111|1|scatter_chart Protein_(g)|1|2|scatter_chart Manufacturer|Kelloggs|0|scatter_chart Calories|63|1|scatter_chart Protein_(g)|3|2|scatter_chart Manufacturer|General_Mills|0|scatter_chart Calories|57|1|scatter_chart Protein_(g)|4|2|scatter_chart Manufacturer|Kelloggs|0|scatter_chart Calories|82|1|scatter_chart Protein_(g)|5|2|scatter_chart Manufacturer|Nabisco|0|scatter_chart Calories|72|1|scatter_chart Protein_(g)|6|2|scatter_chart Manufacturer|Kelloggs|0|scatter_chart Calories|132|1|scatter_chart Protein_(g)|7|2|scatter_chart Manufacturer|General_Mills|0|scatter_chart Calories|142|1|scatter_chart Protein_(g)|5|2|scatter_chart "
# line1 = "Year|2018|x|line_chart Sales_volume_in_millions|12.88|y|line_chart Year|2017|x|line_chart Sales_volume_in_millions|13.51|y|line_chart Year|2016|x|line_chart Sales_volume_in_millions|16.17|y|line_chart Year|2015|x|line_chart Sales_volume_in_millions|15.94|y|line_chart Year|2014|x|line_chart Sales_volume_in_millions|15.46|y|line_chart Year|2013|x|line_chart Sales_volume_in_millions|13.5|y|line_chart Year|2012|x|line_chart Sales_volume_in_millions|15.85|y|line_chart Year|2011|x|line_chart Sales_volume_in_millions|13.82|y|line_chart Year|2010|x|line_chart Sales_volume_in_millions|11.78|y|line_chart Year|2009|x|line_chart Sales_volume_in_millions|12.99|y|line_chart Year|2008|x|line_chart Sales_volume_in_millions|13.0|y|line_chart Year|2007|x|line_chart Sales_volume_in_millions|8.18|y|line_chart Year|2006|x|line_chart Sales_volume_in_millions|5.0|y|line_chart Year|2005|x|line_chart Sales_volume_in_millions|3.2|y|line_chart Year|2004|x|line_chart Sales_volume_in_millions|2.03|y|line_chart "
line1 = "Year|2018|x|line_chart Sales_volume_in_millions|1288|y|line_chart Year|2017|x|line_chart Sales_volume_in_millions|1351|y|line_chart Year|2016|x|line_chart Sales_volume_in_millions|1617|y|line_chart Year|2015|x|line_chart Sales_volume_in_millions|1594|y|line_chart Year|2014|x|line_chart Sales_volume_in_millions|1546|y|line_chart Year|2013|x|line_chart Sales_volume_in_millions|135|y|line_chart Year|2012|x|line_chart Sales_volume_in_millions|1585|y|line_chart Year|2011|x|line_chart Sales_volume_in_millions|1382|y|line_chart Year|2010|x|line_chart Sales_volume_in_millions|1178|y|line_chart Year|2009|x|line_chart Sales_volume_in_millions|1299|y|line_chart Year|2008|x|line_chart Sales_volume_in_millions|130|y|line_chart Year|2007|x|line_chart Sales_volume_in_millions|818|y|line_chart Year|2006|x|line_chart Sales_volume_in_millions|50|y|line_chart Year|2005|x|line_chart Sales_volume_in_millions|32|y|line_chart Year|2004|x|line_chart Sales_volume_in_millions|203|y|line_chart "
line2 = "Year|2019|x|line_chart Net_income_in_million_U.S._dollars|15119|y|line_chart Year|2018|x|line_chart Net_income_in_million_U.S._dollars|15297|y|line_chart Year|2017|x|line_chart Net_income_in_million_U.S._dollars|1300|y|line_chart Year|2016|x|line_chart Net_income_in_million_U.S._dollars|16540|y|line_chart Year|2015|x|line_chart Net_income_in_million_U.S._dollars|15409|y|line_chart Year|2014|x|line_chart Net_income_in_million_U.S._dollars|16323|y|line_chart Year|2013|x|line_chart Net_income_in_million_U.S._dollars|13831|y|line_chart Year|2012|x|line_chart Net_income_in_million_U.S._dollars|10853|y|line_chart Year|2011|x|line_chart Net_income_in_million_U.S._dollars|9672|y|line_chart Year|2010|x|line_chart Net_income_in_million_U.S._dollars|13334|y|line_chart Year|2009|x|line_chart Net_income_in_million_U.S._dollars|12266|y|line_chart Year|2008|x|line_chart Net_income_in_million_U.S._dollars|12949|y|line_chart Year|2007|x|line_chart Net_income_in_million_U.S._dollars|10576|y|line_chart Year|2006|x|line_chart Net_income_in_million_U.S._dollars|11053|y|line_chart Year|2005|x|line_chart Net_income_in_million_U.S._dollars|10060|y|line_chart "
bar1 = "Month|Dec_19|x|bar_chart Units_sold|708|y|bar_chart Month|Nov_19|x|bar_chart Units_sold|157|y|bar_chart Month|Oct_19|x|bar_chart Units_sold|88|y|bar_chart Month|Sep_19|x|bar_chart Units_sold|526|y|bar_chart Month|Aug_19|x|bar_chart Units_sold|52|y|bar_chart Month|Jul_19|x|bar_chart Units_sold|103|y|bar_chart Month|Jun_19|x|bar_chart Units_sold|244|y|bar_chart Month|May_19|x|bar_chart Units_sold|138|y|bar_chart Month|Apr_19|x|bar_chart Units_sold|101|y|bar_chart Month|Mar_19|x|bar_chart Units_sold|632|y|bar_chart Month|Feb_19|x|bar_chart Units_sold|74|y|bar_chart Month|Jan_19|x|bar_chart Units_sold|174|y|bar_chart Month|Dec_18|x|bar_chart Units_sold|193|y|bar_chart Month|Nov_18|x|bar_chart Units_sold|145|y|bar_chart Month|Oct_18|x|bar_chart Units_sold|135|y|bar_chart Month|Sep_18|x|bar_chart Units_sold|829|y|bar_chart Month|Aug_18|x|bar_chart Units_sold|100|y|bar_chart Month|Jul_18|x|bar_chart Units_sold|112|y|bar_chart Month|Jun_18|x|bar_chart Units_sold|265|y|bar_chart Month|May_18|x|bar_chart Units_sold|231|y|bar_chart Month|Apr_18|x|bar_chart Units_sold|153|y|bar_chart Month|Mar_18|x|bar_chart Units_sold|761|y|bar_chart Month|Feb_18|x|bar_chart Units_sold|62|y|bar_chart Month|Jan_18|x|bar_chart Units_sold|155|y|bar_chart Month|Dec_17|x|bar_chart Units_sold|246|y|bar_chart Month|Nov_17|x|bar_chart Units_sold|216|y|bar_chart Month|Oct_17|x|bar_chart Units_sold|99|y|bar_chart Month|Sep_17|x|bar_chart Units_sold|510|y|bar_chart Month|Aug_17|x|bar_chart Units_sold|44|y|bar_chart Month|Jul_17|x|bar_chart Units_sold|152|y|bar_chart Month|Jun_17|x|bar_chart Units_sold|202|y|bar_chart Month|May_17|x|bar_chart Units_sold|155|y|bar_chart Month|Apr_17|x|bar_chart Units_sold|123|y|bar_chart Month|Mar_17|x|bar_chart Units_sold|706|y|bar_chart Month|Feb_17|x|bar_chart Units_sold|48|y|bar_chart Month|Jan_17|x|bar_chart Units_sold|178|y|bar_chart Month|Dec_16|x|bar_chart Units_sold|330|y|bar_chart Month|Nov_16|x|bar_chart Units_sold|219|y|bar_chart Month|Oct_16|x|bar_chart Units_sold|256|y|bar_chart Month|Sep_16|x|bar_chart Units_sold|762|y|bar_chart Month|Aug_16|x|bar_chart Units_sold|69|y|bar_chart Month|Jul_16|x|bar_chart Units_sold|148|y|bar_chart "
hchart1 = "Characteristic|Q3_'08|x|line_chart Number_of_users_in_millions|100|y|line_chart Characteristic|Q2_'09|x|line_chart Number_of_users_in_millions|242|y|line_chart Characteristic|Q4_'09|x|line_chart Number_of_users_in_millions|360|y|line_chart Characteristic|Q2_'10|x|line_chart Number_of_users_in_millions|482|y|line_chart Characteristic|Q4_'10|x|line_chart Number_of_users_in_millions|608|y|line_chart Characteristic|Q2_'11|x|line_chart Number_of_users_in_millions|739|y|line_chart Characteristic|Q4_'11|x|line_chart Number_of_users_in_millions|845|y|line_chart Characteristic|Q2_'12|x|line_chart Number_of_users_in_millions|955|y|line_chart Characteristic|Q4_'12|x|line_chart Number_of_users_in_millions|1056|y|line_chart Characteristic|Q2_'13|x|line_chart Number_of_users_in_millions|1155|y|line_chart Characteristic|Q4_'13|x|line_chart Number_of_users_in_millions|1228|y|line_chart Characteristic|Q2_'14|x|line_chart Number_of_users_in_millions|1317|y|line_chart Characteristic|Q4_'14|x|line_chart Number_of_users_in_millions|1393|y|line_chart Characteristic|Q2_'15|x|line_chart Number_of_users_in_millions|1490|y|line_chart Characteristic|Q4_'15|x|line_chart Number_of_users_in_millions|1591|y|line_chart Characteristic|Q2_'16|x|line_chart Number_of_users_in_millions|1712|y|line_chart Characteristic|Q4_'16|x|line_chart Number_of_users_in_millions|1860|y|line_chart Characteristic|Q2_'17|x|line_chart Number_of_users_in_millions|2006|y|line_chart Characteristic|Q4_'17|x|line_chart Number_of_users_in_millions|2129|y|line_chart Characteristic|Q2_'18|x|line_chart Number_of_users_in_millions|2234|y|line_chart Characteristic|Q4_'18|x|line_chart Number_of_users_in_millions|2320|y|line_chart Characteristic|Q2_'19|x|line_chart Number_of_users_in_millions|2414|y|line_chart Characteristic|Q4_'19|x|line_chart Number_of_users_in_millions|2498|y|line_chart Characteristic|Q2_'20|x|line_chart Number_of_users_in_millions|2701|y|line_chart Characteristic|Q4_'20|x|line_chart Number_of_users_in_millions|2797|y|line_chart"
hchart6 = "Label|White|0|bar_chart Active_duty_enlisted_women|53.76|1|bar_chart Active_duty_enlisted_men|69.98|2|bar_chart Label|Black|0|bar_chart Active_duty_enlisted_women|29.22|1|bar_chart Active_duty_enlisted_men|16.82|2|bar_chart Label|American|0|bar_chart Active_duty_enlisted_women|1.42|1|bar_chart Active_duty_enlisted_men|1.2|2|bar_chart Label|Asian|0|bar_chart Active_duty_enlisted_women|4.8|1|bar_chart Active_duty_enlisted_men|4.28|2|bar_chart Label|Native|0|bar_chart Active_duty_enlisted_women|1.62|1|bar_chart Active_duty_enlisted_men|1.18|2|bar_chart Label|Two or more|0|bar_chart Active_duty_enlisted_women|4.5|1|bar_chart Active_duty_enlisted_men|3.01|2|bar_chart Label|Unknown|0|bar_chart Active_duty_enlisted_women|4.68|1|bar_chart Active_duty_enlisted_men|3.51|2|bar_chart Label|Hispanic|0|bar_chart Active_duty_enlisted_women|20.55|1|bar_chart Active_duty_enlisted_men|17.32|2|bar_chart "
# output = summarize(data=hchart6, all_y_label="yLabel", name="Partial", title="Partial", partial=False)
# print("output")
# print(output)
### USE THIS PORTION TO RUN ALL CHARTS AT ONCE WITH Y LABELS
# with open(dataPath, 'r', encoding='utf-8') as dataFile, \
# open(titlePath, 'r', encoding='utf-8') as titleFile, open(yLabelPath, 'r', encoding='utf-8') as all_y_label:
# count = 1
# fileIterators = zip(dataFile.readlines(), titleFile.readlines(), all_y_label.readlines())
# for data, title, y_label in fileIterators:
# summarize(data=data, all_y_label=y_label.rstrip('\n'), name=count, title=title.rstrip('\n'))
# count += 1
| 149,901 | 51.875485 | 2,291 | py |
SeeChart | SeeChart-main/users.py | import csv
class User:
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def __repr__(self):
return f'<User: {self.username}>'
users = []
with open('static/users/users.csv', mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for a in csv_reader:
users.append(User(id=str(a["id"]), username=str(a["username"]), password="password"+str(a["password"])))
| 483 | 20.043478 | 112 | py |
lama-cleaner | lama-cleaner-main/main.py | from lama_cleaner import entry_point
if __name__ == "__main__":
entry_point()
| 83 | 15.8 | 36 | py |
lama-cleaner | lama-cleaner-main/setup.py | import setuptools
from pathlib import Path
web_files = Path("lama_cleaner/app/build/").glob("**/*")
web_files = [str(it).replace("lama_cleaner/", "") for it in web_files]
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
def load_requirements():
requirements_file_name = "requirements.txt"
requires = []
with open(requirements_file_name) as f:
for line in f:
if line:
requires.append(line.strip())
return requires
# https://setuptools.readthedocs.io/en/latest/setuptools.html#including-data-files
setuptools.setup(
name="lama-cleaner",
version="1.2.2",
author="PanicByte",
author_email="cwq1913@gmail.com",
description="Image inpainting tool powered by SOTA AI Model",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Sanster/lama-cleaner",
packages=setuptools.find_packages("./"),
package_data={"lama_cleaner": web_files},
install_requires=load_requirements(),
python_requires=">=3.7",
entry_points={"console_scripts": ["lama-cleaner=lama_cleaner:entry_point"]},
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| 1,616 | 33.404255 | 82 | py |
lama-cleaner | lama-cleaner-main/scripts/tool.py | import glob
import os
from typing import Dict, List, Union
import torch
from diffusers.utils import is_safetensors_available
if is_safetensors_available():
import safetensors.torch
from huggingface_hub import snapshot_download
from diffusers import DiffusionPipeline, __version__
from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
from diffusers.utils import (
CONFIG_NAME,
DIFFUSERS_CACHE,
ONNX_WEIGHTS_NAME,
WEIGHTS_NAME,
)
class CheckpointMergerPipeline(DiffusionPipeline):
"""
A class that that supports merging diffusion models based on the discussion here:
https://github.com/huggingface/diffusers/issues/877
Example usage:-
pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger.py")
merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","prompthero/openjourney"], interp = 'inv_sigmoid', alpha = 0.8, force = True)
merged_pipe.to('cuda')
prompt = "An astronaut riding a unicycle on Mars"
results = merged_pipe(prompt)
## For more details, see the docstring for the merge method.
"""
def __init__(self):
self.register_to_config()
super().__init__()
def _compare_model_configs(self, dict0, dict1):
if dict0 == dict1:
return True
else:
config0, meta_keys0 = self._remove_meta_keys(dict0)
config1, meta_keys1 = self._remove_meta_keys(dict1)
if config0 == config1:
print(f"Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.")
return True
return False
def _remove_meta_keys(self, config_dict: Dict):
meta_keys = []
temp_dict = config_dict.copy()
for key in config_dict.keys():
if key.startswith("_"):
temp_dict.pop(key)
meta_keys.append(key)
return (temp_dict, meta_keys)
@torch.no_grad()
def merge(
self,
pretrained_model_name_or_path_list: List[Union[str, os.PathLike]],
**kwargs,
):
"""
Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed
in the argument 'pretrained_model_name_or_path_list' as a list.
Parameters:
-----------
pretrained_model_name_or_path_list : A list of valid pretrained model names in the HuggingFace hub or paths to locally stored models in the HuggingFace format.
**kwargs:
Supports all the default DiffusionPipeline.get_config_dict kwargs viz..
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map.
alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_diff" and None.
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_diff" is supported.
force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
"""
# Default kwargs from DiffusionPipeline
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
resume_download = kwargs.pop("resume_download", False)
force_download = kwargs.pop("force_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
torch_dtype = kwargs.pop("torch_dtype", None)
device_map = kwargs.pop("device_map", None)
alpha = kwargs.pop("alpha", 0.5)
interp = kwargs.pop("interp", None)
print("Received list", pretrained_model_name_or_path_list)
print(f"Combining with alpha={alpha}, interpolation mode={interp}")
checkpoint_count = len(pretrained_model_name_or_path_list)
# Ignore result from model_index_json comparision of the two checkpoints
force = kwargs.pop("force", False)
# If less than 2 checkpoints, nothing to merge. If more than 3, not supported for now.
if checkpoint_count > 3 or checkpoint_count < 2:
raise ValueError(
"Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being"
" passed."
)
print("Received the right number of checkpoints")
# chkpt0, chkpt1 = pretrained_model_name_or_path_list[0:2]
# chkpt2 = pretrained_model_name_or_path_list[2] if checkpoint_count == 3 else None
# Validate that the checkpoints can be merged
# Step 1: Load the model config and compare the checkpoints. We'll compare the model_index.json first while ignoring the keys starting with '_'
config_dicts = []
for pretrained_model_name_or_path in pretrained_model_name_or_path_list:
config_dict = DiffusionPipeline.load_config(
pretrained_model_name_or_path,
cache_dir=cache_dir,
resume_download=resume_download,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
)
config_dicts.append(config_dict)
comparison_result = True
for idx in range(1, len(config_dicts)):
comparison_result &= self._compare_model_configs(
config_dicts[idx - 1], config_dicts[idx]
)
if not force and comparison_result is False:
raise ValueError(
"Incompatible checkpoints. Please check model_index.json for the models."
)
print(config_dicts[0], config_dicts[1])
print("Compatible model_index.json files found")
# Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files.
cached_folders = []
for pretrained_model_name_or_path, config_dict in zip(
pretrained_model_name_or_path_list, config_dicts
):
folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
allow_patterns = [os.path.join(k, "*") for k in folder_names]
allow_patterns += [
WEIGHTS_NAME,
SCHEDULER_CONFIG_NAME,
CONFIG_NAME,
ONNX_WEIGHTS_NAME,
DiffusionPipeline.config_name,
]
requested_pipeline_class = config_dict.get("_class_name")
user_agent = {
"diffusers": __version__,
"pipeline_class": requested_pipeline_class,
}
cached_folder = (
pretrained_model_name_or_path
if os.path.isdir(pretrained_model_name_or_path)
else snapshot_download(
pretrained_model_name_or_path,
cache_dir=cache_dir,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
allow_patterns=allow_patterns,
user_agent=user_agent,
)
)
print("Cached Folder", cached_folder)
cached_folders.append(cached_folder)
# Step 3:-
# Load the first checkpoint as a diffusion pipeline and modify its module state_dict in place
final_pipe = DiffusionPipeline.from_pretrained(
cached_folders[0], torch_dtype=torch_dtype, device_map=device_map
)
final_pipe.to(self.device)
checkpoint_path_2 = None
if len(cached_folders) > 2:
checkpoint_path_2 = os.path.join(cached_folders[2])
if interp == "sigmoid":
theta_func = CheckpointMergerPipeline.sigmoid
elif interp == "inv_sigmoid":
theta_func = CheckpointMergerPipeline.inv_sigmoid
elif interp == "add_diff":
theta_func = CheckpointMergerPipeline.add_difference
else:
theta_func = CheckpointMergerPipeline.weighted_sum
# Find each module's state dict.
for attr in final_pipe.config.keys():
if not attr.startswith("_"):
checkpoint_path_1 = os.path.join(cached_folders[1], attr)
if os.path.exists(checkpoint_path_1):
files = list(
(
*glob.glob(
os.path.join(checkpoint_path_1, "*.safetensors")
),
*glob.glob(os.path.join(checkpoint_path_1, "*.bin")),
)
)
checkpoint_path_1 = files[0] if len(files) > 0 else None
if len(cached_folders) < 3:
checkpoint_path_2 = None
else:
checkpoint_path_2 = os.path.join(cached_folders[2], attr)
if os.path.exists(checkpoint_path_2):
files = list(
(
*glob.glob(
os.path.join(checkpoint_path_2, "*.safetensors")
),
*glob.glob(os.path.join(checkpoint_path_2, "*.bin")),
)
)
checkpoint_path_2 = files[0] if len(files) > 0 else None
# For an attr if both checkpoint_path_1 and 2 are None, ignore.
# If atleast one is present, deal with it according to interp method, of course only if the state_dict keys match.
if checkpoint_path_1 is None and checkpoint_path_2 is None:
print(f"Skipping {attr}: not present in 2nd or 3d model")
continue
try:
module = getattr(final_pipe, attr)
if isinstance(
module, bool
): # ignore requires_safety_checker boolean
continue
theta_0 = getattr(module, "state_dict")
theta_0 = theta_0()
update_theta_0 = getattr(module, "load_state_dict")
theta_1 = (
safetensors.torch.load_file(checkpoint_path_1)
if (
is_safetensors_available()
and checkpoint_path_1.endswith(".safetensors")
)
else torch.load(checkpoint_path_1, map_location="cpu")
)
if attr in ['vae', 'text_encoder']:
print(f"Direct use theta1 {attr}: {checkpoint_path_1}")
update_theta_0(theta_1)
del theta_1
del theta_0
continue
theta_2 = None
if checkpoint_path_2:
theta_2 = (
safetensors.torch.load_file(checkpoint_path_2)
if (
is_safetensors_available()
and checkpoint_path_2.endswith(".safetensors")
)
else torch.load(checkpoint_path_2, map_location="cpu")
)
if not theta_0.keys() == theta_1.keys():
print(f"Skipping {attr}: key mismatch")
continue
if theta_2 and not theta_1.keys() == theta_2.keys():
print(f"Skipping {attr}:y mismatch")
except Exception as e:
print(f"Skipping {attr} do to an unexpected error: {str(e)}")
continue
print(f"MERGING {attr}")
for key in theta_0.keys():
if theta_2:
theta_0[key] = theta_func(
theta_0[key], theta_1[key], theta_2[key], alpha
)
else:
theta_0[key] = theta_func(
theta_0[key], theta_1[key], None, alpha
)
del theta_1
del theta_2
update_theta_0(theta_0)
del theta_0
return final_pipe
@staticmethod
def weighted_sum(theta0, theta1, theta2, alpha):
return ((1 - alpha) * theta0) + (alpha * theta1)
# Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
@staticmethod
def sigmoid(theta0, theta1, theta2, alpha):
alpha = alpha * alpha * (3 - (2 * alpha))
return theta0 + ((theta1 - theta0) * alpha)
# Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
@staticmethod
def inv_sigmoid(theta0, theta1, theta2, alpha):
import math
alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
return theta0 + ((theta1 - theta0) * alpha)
@staticmethod
def add_difference(theta0, theta1, theta2, alpha):
# theta0 + (theta1 - theta2) * (1.0 - alpha)
diff = (theta1 - theta2) * (1.0 - alpha)
# print(f"theta0.shape: {theta0.shape}, diff shape: {diff.shape}")
# theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
if theta0.shape != diff.shape:
theta0[:, 0:4, :, :] = theta0[:, 0:4, :, :] + diff
else:
theta0 = theta0 + diff
return theta0
pipe = CheckpointMergerPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
merged_pipe = pipe.merge(
[
"runwayml/stable-diffusion-inpainting",
#"SG161222/Realistic_Vision_V1.4",
"dreamlike-art/dreamlike-diffusion-1.0",
"runwayml/stable-diffusion-v1-5",
],
force=True,
interp="add_diff",
alpha=0,
)
merged_pipe = merged_pipe.to(torch.float16)
merged_pipe.save_pretrained("dreamlike-diffusion-1.0-inpainting", safe_serialization=True)
| 14,778 | 39.825967 | 171 | py |
lama-cleaner | lama-cleaner-main/scripts/convert_vae_pt_to_diffusers.py | import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def custom_convert_ldm_vae_checkpoint(checkpoint, config):
vae_state_dict = checkpoint
new_checkpoint = {}
new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
new_checkpoint["encoder.conv_out.weight"] = vae_state_dict[
"encoder.conv_out.weight"
]
new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict[
"encoder.norm_out.weight"
]
new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict[
"encoder.norm_out.bias"
]
new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
new_checkpoint["decoder.conv_out.weight"] = vae_state_dict[
"decoder.conv_out.weight"
]
new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict[
"decoder.norm_out.weight"
]
new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict[
"decoder.norm_out.bias"
]
new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
num_down_blocks = len(
{
".".join(layer.split(".")[:3])
for layer in vae_state_dict
if "encoder.down" in layer
}
)
down_blocks = {
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key]
for layer_id in range(num_down_blocks)
}
# Retrieves the keys for the decoder up blocks only
num_up_blocks = len(
{
".".join(layer.split(".")[:3])
for layer in vae_state_dict
if "decoder.up" in layer
}
)
up_blocks = {
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key]
for layer_id in range(num_up_blocks)
}
for i in range(num_down_blocks):
resnets = [
key
for key in down_blocks[i]
if f"down.{i}" in key and f"down.{i}.downsample" not in key
]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
new_checkpoint[
f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"
] = vae_state_dict.pop(f"encoder.down.{i}.downsample.conv.weight")
new_checkpoint[
f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"
] = vae_state_dict.pop(f"encoder.down.{i}.downsample.conv.bias")
paths = renew_vae_resnet_paths(resnets)
meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
num_mid_res_blocks = 2
for i in range(1, num_mid_res_blocks + 1):
resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
paths = renew_vae_resnet_paths(resnets)
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
paths = renew_vae_attention_paths(mid_attentions)
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
conv_attn_to_linear(new_checkpoint)
for i in range(num_up_blocks):
block_id = num_up_blocks - 1 - i
resnets = [
key
for key in up_blocks[block_id]
if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
new_checkpoint[
f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"
] = vae_state_dict[f"decoder.up.{block_id}.upsample.conv.weight"]
new_checkpoint[
f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"
] = vae_state_dict[f"decoder.up.{block_id}.upsample.conv.bias"]
paths = renew_vae_resnet_paths(resnets)
meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
num_mid_res_blocks = 2
for i in range(1, num_mid_res_blocks + 1):
resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
paths = renew_vae_resnet_paths(resnets)
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
paths = renew_vae_attention_paths(mid_attentions)
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
conv_attn_to_linear(new_checkpoint)
return new_checkpoint
def vae_pt_to_vae_diffuser(
checkpoint_path: str,
output_path: str,
):
# Only support V1
r = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
io_obj = io.BytesIO(r.content)
original_config = OmegaConf.load(io_obj)
image_size = 512
device = "cuda" if torch.cuda.is_available() else "cpu"
checkpoint = torch.load(checkpoint_path, map_location=device)
# Convert the VAE model.
vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
converted_vae_checkpoint = custom_convert_ldm_vae_checkpoint(
checkpoint["state_dict"], vae_config
)
vae = AutoencoderKL(**vae_config)
vae.load_state_dict(converted_vae_checkpoint)
vae.save_pretrained(output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--vae_pt_path",
default="/Users/cwq/code/github/lama-cleaner/scripts/anything-v4.0.vae.pt",
type=str,
help="Path to the VAE.pt to convert.",
)
parser.add_argument(
"--dump_path",
default="diffusion_pytorch_model.bin",
type=str,
help="Path to the VAE.pt to convert.",
)
args = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 7,961 | 33.318966 | 117 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model_manager.py | import torch
import gc
from loguru import logger
from lama_cleaner.const import SD15_MODELS
from lama_cleaner.helper import switch_mps_device
from lama_cleaner.model.controlnet import ControlNet
from lama_cleaner.model.fcf import FcF
from lama_cleaner.model.lama import LaMa
from lama_cleaner.model.ldm import LDM
from lama_cleaner.model.manga import Manga
from lama_cleaner.model.mat import MAT
from lama_cleaner.model.paint_by_example import PaintByExample
from lama_cleaner.model.instruct_pix2pix import InstructPix2Pix
from lama_cleaner.model.sd import SD15, SD2, Anything4, RealisticVision14
from lama_cleaner.model.utils import torch_gc
from lama_cleaner.model.zits import ZITS
from lama_cleaner.model.opencv2 import OpenCV2
from lama_cleaner.schema import Config
models = {
"lama": LaMa,
"ldm": LDM,
"zits": ZITS,
"mat": MAT,
"fcf": FcF,
SD15.name: SD15,
Anything4.name: Anything4,
RealisticVision14.name: RealisticVision14,
"cv2": OpenCV2,
"manga": Manga,
"sd2": SD2,
"paint_by_example": PaintByExample,
"instruct_pix2pix": InstructPix2Pix,
}
class ModelManager:
def __init__(self, name: str, device: torch.device, **kwargs):
self.name = name
self.device = device
self.kwargs = kwargs
self.model = self.init_model(name, device, **kwargs)
def init_model(self, name: str, device, **kwargs):
if name in SD15_MODELS and kwargs.get("sd_controlnet", False):
return ControlNet(device, **{**kwargs, "name": name})
if name in models:
model = models[name](device, **kwargs)
else:
raise NotImplementedError(f"Not supported model: {name}")
return model
def is_downloaded(self, name: str) -> bool:
if name in models:
return models[name].is_downloaded()
else:
raise NotImplementedError(f"Not supported model: {name}")
def __call__(self, image, mask, config: Config):
self.switch_controlnet_method(control_method=config.controlnet_method)
return self.model(image, mask, config)
def switch(self, new_name: str, **kwargs):
if new_name == self.name:
return
try:
if torch.cuda.memory_allocated() > 0:
# Clear current loaded model from memory
torch.cuda.empty_cache()
del self.model
gc.collect()
self.model = self.init_model(
new_name, switch_mps_device(new_name, self.device), **self.kwargs
)
self.name = new_name
except NotImplementedError as e:
raise e
def switch_controlnet_method(self, control_method: str):
if not self.kwargs.get("sd_controlnet"):
return
if self.kwargs["sd_controlnet_method"] == control_method:
return
if self.model.is_local_sd_model:
# is_native_control_inpaint 表示加载了普通 SD 模型
if (
self.model.is_native_control_inpaint
and control_method != "control_v11p_sd15_inpaint"
):
raise RuntimeError(
f"--sd-local-model-path load a normal SD model, "
f"to use {control_method} you should load an inpainting SD model"
)
elif (
not self.model.is_native_control_inpaint
and control_method == "control_v11p_sd15_inpaint"
):
raise RuntimeError(
f"--sd-local-model-path load an inpainting SD model, "
f"to use {control_method} you should load a norml SD model"
)
del self.model
torch_gc()
old_method = self.kwargs["sd_controlnet_method"]
self.kwargs["sd_controlnet_method"] = control_method
self.model = self.init_model(
self.name, switch_mps_device(self.name, self.device), **self.kwargs
)
logger.info(f"Switch ControlNet method from {old_method} to {control_method}")
| 4,073 | 34.12069 | 86 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/const.py | import json
import os
from enum import Enum
from pydantic import BaseModel
MPS_SUPPORT_MODELS = [
"instruct_pix2pix",
"sd1.5",
"anything4",
"realisticVision1.4",
"sd2",
"paint_by_example",
"controlnet",
]
DEFAULT_MODEL = "lama"
AVAILABLE_MODELS = [
"lama",
"ldm",
"zits",
"mat",
"fcf",
"sd1.5",
"anything4",
"realisticVision1.4",
"cv2",
"manga",
"sd2",
"paint_by_example",
"instruct_pix2pix",
]
SD15_MODELS = ["sd1.5", "anything4", "realisticVision1.4"]
AVAILABLE_DEVICES = ["cuda", "cpu", "mps"]
DEFAULT_DEVICE = "cuda"
NO_HALF_HELP = """
Using full precision model.
If your generate result is always black or green, use this argument. (sd/paint_by_exmaple)
"""
CPU_OFFLOAD_HELP = """
Offloads all models to CPU, significantly reducing vRAM usage. (sd/paint_by_example)
"""
DISABLE_NSFW_HELP = """
Disable NSFW checker. (sd/paint_by_example)
"""
SD_CPU_TEXTENCODER_HELP = """
Run Stable Diffusion text encoder model on CPU to save GPU memory.
"""
SD_CONTROLNET_HELP = """
Run Stable Diffusion inpainting model with ControlNet. You can switch control method in webui.
"""
DEFAULT_CONTROLNET_METHOD = "control_v11p_sd15_canny"
SD_CONTROLNET_CHOICES = [
"control_v11p_sd15_canny",
"control_v11p_sd15_openpose",
"control_v11p_sd15_inpaint",
"control_v11f1p_sd15_depth"
]
SD_LOCAL_MODEL_HELP = """
Load Stable Diffusion 1.5 model(ckpt/safetensors) from local path.
"""
LOCAL_FILES_ONLY_HELP = """
Use local files only, not connect to Hugging Face server. (sd/paint_by_example)
"""
ENABLE_XFORMERS_HELP = """
Enable xFormers optimizations. Requires xformers package has been installed. See: https://github.com/facebookresearch/xformers (sd/paint_by_example)
"""
DEFAULT_MODEL_DIR = os.getenv(
"XDG_CACHE_HOME", os.path.join(os.path.expanduser("~"), ".cache")
)
MODEL_DIR_HELP = """
Model download directory (by setting XDG_CACHE_HOME environment variable), by default model downloaded to ~/.cache
"""
OUTPUT_DIR_HELP = """
Result images will be saved to output directory automatically without confirmation.
"""
INPUT_HELP = """
If input is image, it will be loaded by default.
If input is directory, you can browse and select image in file manager.
"""
GUI_HELP = """
Launch Lama Cleaner as desktop app
"""
NO_GUI_AUTO_CLOSE_HELP = """
Prevent backend auto close after the GUI window closed.
"""
QUALITY_HELP = """
Quality of image encoding, 0-100. Default is 95, higher quality will generate larger file size.
"""
class RealESRGANModelName(str, Enum):
realesr_general_x4v3 = "realesr-general-x4v3"
RealESRGAN_x4plus = "RealESRGAN_x4plus"
RealESRGAN_x4plus_anime_6B = "RealESRGAN_x4plus_anime_6B"
RealESRGANModelNameList = [e.value for e in RealESRGANModelName]
INTERACTIVE_SEG_HELP = "Enable interactive segmentation using Segment Anything."
INTERACTIVE_SEG_MODEL_HELP = "Model size: vit_b < vit_l < vit_h. Bigger model size means better segmentation but slower speed."
AVAILABLE_INTERACTIVE_SEG_MODELS = ["vit_b", "vit_l", "vit_h"]
AVAILABLE_INTERACTIVE_SEG_DEVICES = ["cuda", "cpu", "mps"]
REMOVE_BG_HELP = "Enable remove background. Always run on CPU"
ANIMESEG_HELP = "Enable anime segmentation. Always run on CPU"
REALESRGAN_HELP = "Enable realesrgan super resolution"
REALESRGAN_AVAILABLE_DEVICES = ["cpu", "cuda", "mps"]
GFPGAN_HELP = (
"Enable GFPGAN face restore. To enhance background, use with --enable-realesrgan"
)
GFPGAN_AVAILABLE_DEVICES = ["cpu", "cuda", "mps"]
RESTOREFORMER_HELP = "Enable RestoreFormer face restore. To enhance background, use with --enable-realesrgan"
RESTOREFORMER_AVAILABLE_DEVICES = ["cpu", "cuda", "mps"]
GIF_HELP = "Enable GIF plugin. Make GIF to compare original and cleaned image"
class Config(BaseModel):
host: str = "127.0.0.1"
port: int = 8080
model: str = DEFAULT_MODEL
sd_local_model_path: str = None
sd_controlnet: bool = False
sd_controlnet_method: str = DEFAULT_CONTROLNET_METHOD
device: str = DEFAULT_DEVICE
gui: bool = False
no_gui_auto_close: bool = False
no_half: bool = False
cpu_offload: bool = False
disable_nsfw: bool = False
sd_cpu_textencoder: bool = False
enable_xformers: bool = False
local_files_only: bool = False
model_dir: str = DEFAULT_MODEL_DIR
input: str = None
output_dir: str = None
# plugins
enable_interactive_seg: bool = False
interactive_seg_model: str = "vit_l"
interactive_seg_device: str = "cpu"
enable_remove_bg: bool = False
enable_anime_seg: bool = False
enable_realesrgan: bool = False
realesrgan_device: str = "cpu"
realesrgan_model: str = RealESRGANModelName.realesr_general_x4v3.value
realesrgan_no_half: bool = False
enable_gfpgan: bool = False
gfpgan_device: str = "cpu"
enable_restoreformer: bool = False
restoreformer_device: str = "cpu"
enable_gif: bool = False
def load_config(installer_config: str):
if os.path.exists(installer_config):
with open(installer_config, "r", encoding="utf-8") as f:
return Config(**json.load(f))
else:
return Config()
| 5,145 | 28.574713 | 148 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/benchmark.py | #!/usr/bin/env python3
import argparse
import os
import time
import numpy as np
import nvidia_smi
import psutil
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import Config, HDStrategy, SDSampler
try:
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
except:
pass
NUM_THREADS = str(4)
os.environ["OMP_NUM_THREADS"] = NUM_THREADS
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
if os.environ.get("CACHE_DIR"):
os.environ["TORCH_HOME"] = os.environ["CACHE_DIR"]
def run_model(model, size):
# RGB
image = np.random.randint(0, 256, (size[0], size[1], 3)).astype(np.uint8)
mask = np.random.randint(0, 255, size).astype(np.uint8)
config = Config(
ldm_steps=2,
hd_strategy=HDStrategy.ORIGINAL,
hd_strategy_crop_margin=128,
hd_strategy_crop_trigger_size=128,
hd_strategy_resize_limit=128,
prompt="a fox is sitting on a bench",
sd_steps=5,
sd_sampler=SDSampler.ddim
)
model(image, mask, config)
def benchmark(model, times: int, empty_cache: bool):
sizes = [(512, 512)]
nvidia_smi.nvmlInit()
device_id = 0
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(device_id)
def format(metrics):
return f"{np.mean(metrics):.2f} ± {np.std(metrics):.2f}"
process = psutil.Process(os.getpid())
# 每个 size 给出显存和内存占用的指标
for size in sizes:
torch.cuda.empty_cache()
time_metrics = []
cpu_metrics = []
memory_metrics = []
gpu_memory_metrics = []
for _ in range(times):
start = time.time()
run_model(model, size)
torch.cuda.synchronize()
# cpu_metrics.append(process.cpu_percent())
time_metrics.append((time.time() - start) * 1000)
memory_metrics.append(process.memory_info().rss / 1024 / 1024)
gpu_memory_metrics.append(nvidia_smi.nvmlDeviceGetMemoryInfo(handle).used / 1024 / 1024)
print(f"size: {size}".center(80, "-"))
# print(f"cpu: {format(cpu_metrics)}")
print(f"latency: {format(time_metrics)}ms")
print(f"memory: {format(memory_metrics)} MB")
print(f"gpu memory: {format(gpu_memory_metrics)} MB")
nvidia_smi.nvmlShutdown()
def get_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--name")
parser.add_argument("--device", default="cuda", type=str)
parser.add_argument("--times", default=10, type=int)
parser.add_argument("--empty-cache", action="store_true")
return parser.parse_args()
if __name__ == "__main__":
args = get_args_parser()
device = torch.device(args.device)
model = ModelManager(
name=args.name,
device=device,
sd_run_local=True,
disable_nsfw=True,
sd_cpu_textencoder=True,
hf_access_token="123"
)
benchmark(model, args.times, args.empty_cache)
| 3,215 | 28.236364 | 100 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/server.py | #!/usr/bin/env python3
import os
import hashlib
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import imghdr
import io
import logging
import multiprocessing
import random
import time
from pathlib import Path
import cv2
import numpy as np
import torch
from PIL import Image
from loguru import logger
from lama_cleaner.const import SD15_MODELS
from lama_cleaner.file_manager import FileManager
from lama_cleaner.model.utils import torch_gc
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.plugins import (
InteractiveSeg,
RemoveBG,
RealESRGANUpscaler,
MakeGIF,
GFPGANPlugin,
RestoreFormerPlugin,
AnimeSeg,
)
from lama_cleaner.schema import Config
try:
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
except:
pass
from flask import (
Flask,
request,
send_file,
cli,
make_response,
send_from_directory,
jsonify,
)
from flask_socketio import SocketIO
# Disable ability for Flask to display warning about using a development server in a production environment.
# https://gist.github.com/jerblack/735b9953ba1ab6234abb43174210d356
cli.show_server_banner = lambda *_: None
from flask_cors import CORS
from lama_cleaner.helper import (
load_img,
numpy_to_bytes,
resize_max_size,
pil_to_bytes,
)
NUM_THREADS = str(multiprocessing.cpu_count())
# fix libomp problem on windows https://github.com/Sanster/lama-cleaner/issues/56
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
os.environ["OMP_NUM_THREADS"] = NUM_THREADS
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
if os.environ.get("CACHE_DIR"):
os.environ["TORCH_HOME"] = os.environ["CACHE_DIR"]
BUILD_DIR = os.environ.get("LAMA_CLEANER_BUILD_DIR", "app/build")
class NoFlaskwebgui(logging.Filter):
def filter(self, record):
msg = record.getMessage()
if "Running on http:" in msg:
print(msg[msg.index("Running on http:") :])
return (
"flaskwebgui-keep-server-alive" not in msg
and "socket.io" not in msg
and "This is a development server." not in msg
)
logging.getLogger("werkzeug").addFilter(NoFlaskwebgui())
app = Flask(__name__, static_folder=os.path.join(BUILD_DIR, "static"))
app.config["JSON_AS_ASCII"] = False
CORS(app, expose_headers=["Content-Disposition"])
sio_logger = logging.getLogger("sio-logger")
sio_logger.setLevel(logging.ERROR)
socketio = SocketIO(app, cors_allowed_origins="*", async_mode="threading")
model: ModelManager = None
thumb: FileManager = None
output_dir: str = None
device = None
input_image_path: str = None
is_disable_model_switch: bool = False
is_controlnet: bool = False
controlnet_method: str = "control_v11p_sd15_canny"
is_enable_file_manager: bool = False
is_enable_auto_saving: bool = False
is_desktop: bool = False
image_quality: int = 95
plugins = {}
def get_image_ext(img_bytes):
w = imghdr.what("", img_bytes)
if w is None:
w = "jpeg"
return w
def diffuser_callback(i, t, latents):
socketio.emit("diffusion_progress", {"step": i})
@app.route("/save_image", methods=["POST"])
def save_image():
if output_dir is None:
return "--output-dir is None", 500
input = request.files
filename = request.form["filename"]
origin_image_bytes = input["image"].read() # RGB
ext = get_image_ext(origin_image_bytes)
image, alpha_channel, exif_infos = load_img(origin_image_bytes, return_exif=True)
save_path = os.path.join(output_dir, filename)
if alpha_channel is not None:
if alpha_channel.shape[:2] != image.shape[:2]:
alpha_channel = cv2.resize(
alpha_channel, dsize=(image.shape[1], image.shape[0])
)
image = np.concatenate((image, alpha_channel[:, :, np.newaxis]), axis=-1)
pil_image = Image.fromarray(image)
img_bytes = pil_to_bytes(
pil_image,
ext,
quality=image_quality,
exif_infos=exif_infos,
)
with open(save_path, "wb") as fw:
fw.write(img_bytes)
return "ok", 200
@app.route("/medias/<tab>")
def medias(tab):
if tab == "image":
response = make_response(jsonify(thumb.media_names), 200)
else:
response = make_response(jsonify(thumb.output_media_names), 200)
# response.last_modified = thumb.modified_time[tab]
# response.cache_control.no_cache = True
# response.cache_control.max_age = 0
# response.make_conditional(request)
return response
@app.route("/media/<tab>/<filename>")
def media_file(tab, filename):
if tab == "image":
return send_from_directory(thumb.root_directory, filename)
return send_from_directory(thumb.output_dir, filename)
@app.route("/media_thumbnail/<tab>/<filename>")
def media_thumbnail_file(tab, filename):
args = request.args
width = args.get("width")
height = args.get("height")
if width is None and height is None:
width = 256
if width:
width = int(float(width))
if height:
height = int(float(height))
directory = thumb.root_directory
if tab == "output":
directory = thumb.output_dir
thumb_filename, (width, height) = thumb.get_thumbnail(
directory, filename, width, height
)
thumb_filepath = f"{app.config['THUMBNAIL_MEDIA_THUMBNAIL_ROOT']}{thumb_filename}"
response = make_response(send_file(thumb_filepath))
response.headers["X-Width"] = str(width)
response.headers["X-Height"] = str(height)
return response
@app.route("/inpaint", methods=["POST"])
def process():
input = request.files
# RGB
origin_image_bytes = input["image"].read()
image, alpha_channel, exif_infos = load_img(origin_image_bytes, return_exif=True)
mask, _ = load_img(input["mask"].read(), gray=True)
mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]
if image.shape[:2] != mask.shape[:2]:
return (
f"Mask shape{mask.shape[:2]} not queal to Image shape{image.shape[:2]}",
400,
)
original_shape = image.shape
interpolation = cv2.INTER_CUBIC
form = request.form
size_limit = max(image.shape)
if "paintByExampleImage" in input:
paint_by_example_example_image, _ = load_img(
input["paintByExampleImage"].read()
)
paint_by_example_example_image = Image.fromarray(paint_by_example_example_image)
else:
paint_by_example_example_image = None
config = Config(
ldm_steps=form["ldmSteps"],
ldm_sampler=form["ldmSampler"],
hd_strategy=form["hdStrategy"],
zits_wireframe=form["zitsWireframe"],
hd_strategy_crop_margin=form["hdStrategyCropMargin"],
hd_strategy_crop_trigger_size=form["hdStrategyCropTrigerSize"],
hd_strategy_resize_limit=form["hdStrategyResizeLimit"],
prompt=form["prompt"],
negative_prompt=form["negativePrompt"],
use_croper=form["useCroper"],
croper_x=form["croperX"],
croper_y=form["croperY"],
croper_height=form["croperHeight"],
croper_width=form["croperWidth"],
sd_scale=form["sdScale"],
sd_mask_blur=form["sdMaskBlur"],
sd_strength=form["sdStrength"],
sd_steps=form["sdSteps"],
sd_guidance_scale=form["sdGuidanceScale"],
sd_sampler=form["sdSampler"],
sd_seed=form["sdSeed"],
sd_match_histograms=form["sdMatchHistograms"],
cv2_flag=form["cv2Flag"],
cv2_radius=form["cv2Radius"],
paint_by_example_steps=form["paintByExampleSteps"],
paint_by_example_guidance_scale=form["paintByExampleGuidanceScale"],
paint_by_example_mask_blur=form["paintByExampleMaskBlur"],
paint_by_example_seed=form["paintByExampleSeed"],
paint_by_example_match_histograms=form["paintByExampleMatchHistograms"],
paint_by_example_example_image=paint_by_example_example_image,
p2p_steps=form["p2pSteps"],
p2p_image_guidance_scale=form["p2pImageGuidanceScale"],
p2p_guidance_scale=form["p2pGuidanceScale"],
controlnet_conditioning_scale=form["controlnet_conditioning_scale"],
controlnet_method=form["controlnet_method"],
)
if config.sd_seed == -1:
config.sd_seed = random.randint(1, 999999999)
if config.paint_by_example_seed == -1:
config.paint_by_example_seed = random.randint(1, 999999999)
logger.info(f"Origin image shape: {original_shape}")
image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
start = time.time()
try:
res_np_img = model(image, mask, config)
except RuntimeError as e:
if "CUDA out of memory. " in str(e):
# NOTE: the string may change?
return "CUDA out of memory", 500
else:
logger.exception(e)
return f"{str(e)}", 500
finally:
logger.info(f"process time: {(time.time() - start) * 1000}ms")
torch_gc()
res_np_img = cv2.cvtColor(res_np_img.astype(np.uint8), cv2.COLOR_BGR2RGB)
if alpha_channel is not None:
if alpha_channel.shape[:2] != res_np_img.shape[:2]:
alpha_channel = cv2.resize(
alpha_channel, dsize=(res_np_img.shape[1], res_np_img.shape[0])
)
res_np_img = np.concatenate(
(res_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
)
ext = get_image_ext(origin_image_bytes)
bytes_io = io.BytesIO(
pil_to_bytes(
Image.fromarray(res_np_img),
ext,
quality=image_quality,
exif_infos=exif_infos,
)
)
response = make_response(
send_file(
# io.BytesIO(numpy_to_bytes(res_np_img, ext)),
bytes_io,
mimetype=f"image/{ext}",
)
)
response.headers["X-Seed"] = str(config.sd_seed)
socketio.emit("diffusion_finish")
return response
@app.route("/run_plugin", methods=["POST"])
def run_plugin():
form = request.form
files = request.files
name = form["name"]
if name not in plugins:
return "Plugin not found", 500
origin_image_bytes = files["image"].read() # RGB
rgb_np_img, alpha_channel, exif_infos = load_img(
origin_image_bytes, return_exif=True
)
start = time.time()
try:
form = dict(form)
if name == InteractiveSeg.name:
img_md5 = hashlib.md5(origin_image_bytes).hexdigest()
form["img_md5"] = img_md5
bgr_res = plugins[name](rgb_np_img, files, form)
except RuntimeError as e:
torch.cuda.empty_cache()
if "CUDA out of memory. " in str(e):
# NOTE: the string may change?
return "CUDA out of memory", 500
else:
logger.exception(e)
return "Internal Server Error", 500
logger.info(f"{name} process time: {(time.time() - start) * 1000}ms")
torch_gc()
if name == MakeGIF.name:
return send_file(
io.BytesIO(bgr_res),
mimetype="image/gif",
as_attachment=True,
download_name=form["filename"],
)
if name == InteractiveSeg.name:
return make_response(
send_file(
io.BytesIO(numpy_to_bytes(bgr_res, "png")),
mimetype="image/png",
)
)
if name in [RemoveBG.name, AnimeSeg.name]:
rgb_res = bgr_res
ext = "png"
else:
rgb_res = cv2.cvtColor(bgr_res, cv2.COLOR_BGR2RGB)
ext = get_image_ext(origin_image_bytes)
if alpha_channel is not None:
if alpha_channel.shape[:2] != rgb_res.shape[:2]:
alpha_channel = cv2.resize(
alpha_channel, dsize=(rgb_res.shape[1], rgb_res.shape[0])
)
rgb_res = np.concatenate(
(rgb_res, alpha_channel[:, :, np.newaxis]), axis=-1
)
response = make_response(
send_file(
io.BytesIO(
pil_to_bytes(
Image.fromarray(rgb_res),
ext,
quality=image_quality,
exif_infos=exif_infos,
)
),
mimetype=f"image/{ext}",
)
)
return response
@app.route("/server_config", methods=["GET"])
def get_server_config():
return {
"isControlNet": is_controlnet,
"controlNetMethod": controlnet_method,
"isDisableModelSwitchState": is_disable_model_switch,
"isEnableAutoSaving": is_enable_auto_saving,
"enableFileManager": is_enable_file_manager,
"plugins": list(plugins.keys()),
}, 200
@app.route("/model")
def current_model():
return model.name, 200
@app.route("/model_downloaded/<name>")
def model_downloaded(name):
return str(model.is_downloaded(name)), 200
@app.route("/is_desktop")
def get_is_desktop():
return str(is_desktop), 200
@app.route("/model", methods=["POST"])
def switch_model():
if is_disable_model_switch:
return "Switch model is disabled", 400
new_name = request.form.get("name")
if new_name == model.name:
return "Same model", 200
try:
model.switch(new_name)
except NotImplementedError:
return f"{new_name} not implemented", 403
return f"ok, switch to {new_name}", 200
@app.route("/")
def index():
return send_file(os.path.join(BUILD_DIR, "index.html"))
@app.route("/inputimage")
def set_input_photo():
if input_image_path:
with open(input_image_path, "rb") as f:
image_in_bytes = f.read()
return send_file(
input_image_path,
as_attachment=True,
download_name=Path(input_image_path).name,
mimetype=f"image/{get_image_ext(image_in_bytes)}",
)
else:
return "No Input Image"
def build_plugins(args):
global plugins
if args.enable_interactive_seg:
logger.info(f"Initialize {InteractiveSeg.name} plugin")
plugins[InteractiveSeg.name] = InteractiveSeg(
args.interactive_seg_model, args.interactive_seg_device
)
if args.enable_remove_bg:
logger.info(f"Initialize {RemoveBG.name} plugin")
plugins[RemoveBG.name] = RemoveBG()
if args.enable_anime_seg:
logger.info(f"Initialize {AnimeSeg.name} plugin")
plugins[AnimeSeg.name] = AnimeSeg()
if args.enable_realesrgan:
logger.info(
f"Initialize {RealESRGANUpscaler.name} plugin: {args.realesrgan_model}, {args.realesrgan_device}"
)
plugins[RealESRGANUpscaler.name] = RealESRGANUpscaler(
args.realesrgan_model,
args.realesrgan_device,
no_half=args.realesrgan_no_half,
)
if args.enable_gfpgan:
logger.info(f"Initialize {GFPGANPlugin.name} plugin")
if args.enable_realesrgan:
logger.info("Use realesrgan as GFPGAN background upscaler")
else:
logger.info(
f"GFPGAN no background upscaler, use --enable-realesrgan to enable it"
)
plugins[GFPGANPlugin.name] = GFPGANPlugin(
args.gfpgan_device, upscaler=plugins.get(RealESRGANUpscaler.name, None)
)
if args.enable_restoreformer:
logger.info(f"Initialize {RestoreFormerPlugin.name} plugin")
plugins[RestoreFormerPlugin.name] = RestoreFormerPlugin(
args.restoreformer_device,
upscaler=plugins.get(RealESRGANUpscaler.name, None),
)
if args.enable_gif:
logger.info(f"Initialize GIF plugin")
plugins[MakeGIF.name] = MakeGIF()
def main(args):
global model
global device
global input_image_path
global is_disable_model_switch
global is_enable_file_manager
global is_desktop
global thumb
global output_dir
global is_enable_auto_saving
global is_controlnet
global controlnet_method
global image_quality
build_plugins(args)
image_quality = args.quality
if args.sd_controlnet and args.model in SD15_MODELS:
is_controlnet = True
controlnet_method = args.sd_controlnet_method
output_dir = args.output_dir
if output_dir:
is_enable_auto_saving = True
device = torch.device(args.device)
is_disable_model_switch = args.disable_model_switch
is_desktop = args.gui
if is_disable_model_switch:
logger.info(
f"Start with --disable-model-switch, model switch on frontend is disable"
)
if args.input and os.path.isdir(args.input):
logger.info(f"Initialize file manager")
thumb = FileManager(app)
is_enable_file_manager = True
app.config["THUMBNAIL_MEDIA_ROOT"] = args.input
app.config["THUMBNAIL_MEDIA_THUMBNAIL_ROOT"] = os.path.join(
args.output_dir, "lama_cleaner_thumbnails"
)
thumb.output_dir = Path(args.output_dir)
# thumb.start()
# try:
# while True:
# time.sleep(1)
# finally:
# thumb.image_dir_observer.stop()
# thumb.image_dir_observer.join()
# thumb.output_dir_observer.stop()
# thumb.output_dir_observer.join()
else:
input_image_path = args.input
model = ModelManager(
name=args.model,
sd_controlnet=args.sd_controlnet,
sd_controlnet_method=args.sd_controlnet_method,
device=device,
no_half=args.no_half,
hf_access_token=args.hf_access_token,
disable_nsfw=args.sd_disable_nsfw or args.disable_nsfw,
sd_cpu_textencoder=args.sd_cpu_textencoder,
sd_run_local=args.sd_run_local,
sd_local_model_path=args.sd_local_model_path,
local_files_only=args.local_files_only,
cpu_offload=args.cpu_offload,
enable_xformers=args.sd_enable_xformers or args.enable_xformers,
callback=diffuser_callback,
)
if args.gui:
app_width, app_height = args.gui_size
from flaskwebgui import FlaskUI
ui = FlaskUI(
app,
socketio=socketio,
width=app_width,
height=app_height,
host=args.host,
port=args.port,
close_server_on_exit=not args.no_gui_auto_close,
)
ui.run()
else:
socketio.run(
app,
host=args.host,
port=args.port,
debug=args.debug,
allow_unsafe_werkzeug=True,
)
| 18,878 | 29.303371 | 109 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/helper.py | import io
import os
import sys
from typing import List, Optional
from urllib.parse import urlparse
import cv2
from PIL import Image, ImageOps, PngImagePlugin
import numpy as np
import torch
from lama_cleaner.const import MPS_SUPPORT_MODELS
from loguru import logger
from torch.hub import download_url_to_file, get_dir
import hashlib
def md5sum(filename):
md5 = hashlib.md5()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b""):
md5.update(chunk)
return md5.hexdigest()
def switch_mps_device(model_name, device):
if model_name not in MPS_SUPPORT_MODELS and str(device) == "mps":
logger.info(f"{model_name} not support mps, switch to cpu")
return torch.device("cpu")
return device
def get_cache_path_by_url(url):
parts = urlparse(url)
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, "checkpoints")
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
filename = os.path.basename(parts.path)
cached_file = os.path.join(model_dir, filename)
return cached_file
def download_model(url, model_md5: str = None):
cached_file = get_cache_path_by_url(url)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = None
download_url_to_file(url, cached_file, hash_prefix, progress=True)
if model_md5:
_md5 = md5sum(cached_file)
if model_md5 == _md5:
logger.info(f"Download model success, md5: {_md5}")
else:
try:
os.remove(cached_file)
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
)
except:
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart lama-cleaner."
)
exit(-1)
return cached_file
def ceil_modulo(x, mod):
if x % mod == 0:
return x
return (x // mod + 1) * mod
def handle_error(model_path, model_md5, e):
_md5 = md5sum(model_path)
if _md5 != model_md5:
try:
os.remove(model_path)
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
)
except:
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart lama-cleaner."
)
else:
logger.error(
f"Failed to load model {model_path},"
f"please submit an issue at https://github.com/Sanster/lama-cleaner/issues and include a screenshot of the error:\n{e}"
)
exit(-1)
def load_jit_model(url_or_path, device, model_md5: str):
if os.path.exists(url_or_path):
model_path = url_or_path
else:
model_path = download_model(url_or_path, model_md5)
logger.info(f"Loading model from: {model_path}")
try:
model = torch.jit.load(model_path, map_location="cpu").to(device)
except Exception as e:
handle_error(model_path, model_md5, e)
model.eval()
return model
def load_model(model: torch.nn.Module, url_or_path, device, model_md5):
if os.path.exists(url_or_path):
model_path = url_or_path
else:
model_path = download_model(url_or_path, model_md5)
try:
logger.info(f"Loading model from: {model_path}")
state_dict = torch.load(model_path, map_location="cpu")
model.load_state_dict(state_dict, strict=True)
model.to(device)
except Exception as e:
handle_error(model_path, model_md5, e)
model.eval()
return model
def numpy_to_bytes(image_numpy: np.ndarray, ext: str) -> bytes:
data = cv2.imencode(
f".{ext}",
image_numpy,
[int(cv2.IMWRITE_JPEG_QUALITY), 100, int(cv2.IMWRITE_PNG_COMPRESSION), 0],
)[1]
image_bytes = data.tobytes()
return image_bytes
def pil_to_bytes(pil_img, ext: str, quality: int = 95, exif_infos={}) -> bytes:
with io.BytesIO() as output:
kwargs = {k: v for k, v in exif_infos.items() if v is not None}
if ext == "png" and "parameters" in kwargs:
pnginfo_data = PngImagePlugin.PngInfo()
pnginfo_data.add_text("parameters", kwargs["parameters"])
kwargs["pnginfo"] = pnginfo_data
pil_img.save(
output,
format=ext,
quality=quality,
**kwargs,
)
image_bytes = output.getvalue()
return image_bytes
def load_img(img_bytes, gray: bool = False, return_exif: bool = False):
alpha_channel = None
image = Image.open(io.BytesIO(img_bytes))
if return_exif:
info = image.info or {}
exif_infos = {"exif": image.getexif(), "parameters": info.get("parameters")}
try:
image = ImageOps.exif_transpose(image)
except:
pass
if gray:
image = image.convert("L")
np_img = np.array(image)
else:
if image.mode == "RGBA":
np_img = np.array(image)
alpha_channel = np_img[:, :, -1]
np_img = cv2.cvtColor(np_img, cv2.COLOR_RGBA2RGB)
else:
image = image.convert("RGB")
np_img = np.array(image)
if return_exif:
return np_img, alpha_channel, exif_infos
return np_img, alpha_channel
def norm_img(np_img):
if len(np_img.shape) == 2:
np_img = np_img[:, :, np.newaxis]
np_img = np.transpose(np_img, (2, 0, 1))
np_img = np_img.astype("float32") / 255
return np_img
def resize_max_size(
np_img, size_limit: int, interpolation=cv2.INTER_CUBIC
) -> np.ndarray:
# Resize image's longer size to size_limit if longer size larger than size_limit
h, w = np_img.shape[:2]
if max(h, w) > size_limit:
ratio = size_limit / max(h, w)
new_w = int(w * ratio + 0.5)
new_h = int(h * ratio + 0.5)
return cv2.resize(np_img, dsize=(new_w, new_h), interpolation=interpolation)
else:
return np_img
def pad_img_to_modulo(
img: np.ndarray, mod: int, square: bool = False, min_size: Optional[int] = None
):
"""
Args:
img: [H, W, C]
mod:
square: 是否为正方形
min_size:
Returns:
"""
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
height, width = img.shape[:2]
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
if min_size is not None:
assert min_size % mod == 0
out_width = max(min_size, out_width)
out_height = max(min_size, out_height)
if square:
max_size = max(out_height, out_width)
out_height = max_size
out_width = max_size
return np.pad(
img,
((0, out_height - height), (0, out_width - width), (0, 0)),
mode="symmetric",
)
def boxes_from_mask(mask: np.ndarray) -> List[np.ndarray]:
"""
Args:
mask: (h, w, 1) 0~255
Returns:
"""
height, width = mask.shape[:2]
_, thresh = cv2.threshold(mask, 127, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
boxes = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
box = np.array([x, y, x + w, y + h]).astype(int)
box[::2] = np.clip(box[::2], 0, width)
box[1::2] = np.clip(box[1::2], 0, height)
boxes.append(box)
return boxes
def only_keep_largest_contour(mask: np.ndarray) -> List[np.ndarray]:
"""
Args:
mask: (h, w) 0~255
Returns:
"""
_, thresh = cv2.threshold(mask, 127, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
max_index = -1
for i, cnt in enumerate(contours):
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_index = i
if max_index != -1:
new_mask = np.zeros_like(mask)
return cv2.drawContours(new_mask, contours, max_index, 255, -1)
else:
return mask
| 8,639 | 28.488055 | 165 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/runtime.py | # https://github.com/huggingface/huggingface_hub/blob/5a12851f54bf614be39614034ed3a9031922d297/src/huggingface_hub/utils/_runtime.py
import platform
import sys
import packaging.version
from rich import print
from typing import Dict, Any
_PY_VERSION: str = sys.version.split()[0].rstrip("+")
if packaging.version.Version(_PY_VERSION) < packaging.version.Version("3.8.0"):
import importlib_metadata # type: ignore
else:
import importlib.metadata as importlib_metadata # type: ignore
_package_versions = {}
_CANDIDATES = [
"torch",
"torchvision",
"Pillow",
"diffusers",
"transformers",
"opencv-python",
"xformers",
"accelerate",
"lama-cleaner",
"rembg",
"realesrgan",
"gfpgan",
]
# Check once at runtime
for name in _CANDIDATES:
_package_versions[name] = "N/A"
try:
_package_versions[name] = importlib_metadata.version(name)
except importlib_metadata.PackageNotFoundError:
pass
def dump_environment_info() -> Dict[str, str]:
"""Dump information about the machine to help debugging issues. """
# Generic machine info
info: Dict[str, Any] = {
"Platform": platform.platform(),
"Python version": platform.python_version(),
}
info.update(_package_versions)
print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]) + "\n")
return info
| 1,374 | 25.960784 | 132 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/web_config.py | import json
import os
from datetime import datetime
import gradio as gr
from loguru import logger
from lama_cleaner.const import *
_config_file = None
def save_config(
host,
port,
model,
sd_local_model_path,
sd_controlnet,
sd_controlnet_method,
device,
gui,
no_gui_auto_close,
no_half,
cpu_offload,
disable_nsfw,
sd_cpu_textencoder,
enable_xformers,
local_files_only,
model_dir,
input,
output_dir,
quality,
enable_interactive_seg,
interactive_seg_model,
interactive_seg_device,
enable_remove_bg,
enable_anime_seg,
enable_realesrgan,
realesrgan_device,
realesrgan_model,
enable_gfpgan,
gfpgan_device,
enable_restoreformer,
restoreformer_device,
enable_gif,
):
config = Config(**locals())
print(config)
if config.input and not os.path.exists(config.input):
return "[Error] Input file or directory does not exist"
current_time = datetime.now().strftime("%H:%M:%S")
msg = f"[{current_time}] Successful save config to: {os.path.abspath(_config_file)}"
logger.info(msg)
try:
with open(_config_file, "w", encoding="utf-8") as f:
json.dump(config.dict(), f, indent=4, ensure_ascii=False)
except Exception as e:
return f"Save failed: {str(e)}"
return msg
def close_server(*args):
# TODO: make close both browser and server works
import os, signal
pid = os.getpid()
os.kill(pid, signal.SIGUSR1)
def main(config_file: str):
global _config_file
_config_file = config_file
init_config = load_config(config_file)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=1):
save_btn = gr.Button(value="Save configurations")
message = gr.HTML()
with gr.Tabs():
with gr.Tab("Common"):
with gr.Row():
host = gr.Textbox(init_config.host, label="Host")
port = gr.Number(init_config.port, label="Port", precision=0)
model = gr.Radio(
AVAILABLE_MODELS, label="Model", value=init_config.model
)
device = gr.Radio(
AVAILABLE_DEVICES, label="Device", value=init_config.device
)
quality = gr.Slider(
value=95,
label=f"Image Quality ({QUALITY_HELP})",
minimum=75,
maximum=100,
step=1,
)
with gr.Column():
gui = gr.Checkbox(init_config.gui, label=f"{GUI_HELP}")
no_gui_auto_close = gr.Checkbox(
init_config.no_gui_auto_close, label=f"{NO_GUI_AUTO_CLOSE_HELP}"
)
with gr.Column():
model_dir = gr.Textbox(
init_config.model_dir, label=f"{MODEL_DIR_HELP}"
)
input = gr.Textbox(
init_config.input,
label=f"Input file or directory. {INPUT_HELP}",
)
output_dir = gr.Textbox(
init_config.output_dir,
label=f"Output directory. {OUTPUT_DIR_HELP}",
)
with gr.Tab("Plugins"):
enable_interactive_seg = gr.Checkbox(
init_config.enable_interactive_seg, label=INTERACTIVE_SEG_HELP
)
interactive_seg_model = gr.Radio(
AVAILABLE_INTERACTIVE_SEG_MODELS,
label=f"Segment Anything models. {INTERACTIVE_SEG_MODEL_HELP}",
value=init_config.interactive_seg_model,
)
interactive_seg_device = gr.Radio(
AVAILABLE_INTERACTIVE_SEG_DEVICES,
label="Segment Anything Device",
value=init_config.interactive_seg_device,
)
with gr.Row():
enable_remove_bg = gr.Checkbox(
init_config.enable_remove_bg, label=REMOVE_BG_HELP
)
with gr.Row():
enable_anime_seg = gr.Checkbox(
init_config.enable_anime_seg, label=ANIMESEG_HELP
)
with gr.Row():
enable_realesrgan = gr.Checkbox(
init_config.enable_realesrgan, label=REALESRGAN_HELP
)
realesrgan_device = gr.Radio(
REALESRGAN_AVAILABLE_DEVICES,
label="RealESRGAN Device",
value=init_config.realesrgan_device,
)
realesrgan_model = gr.Radio(
RealESRGANModelNameList,
label="RealESRGAN model",
value=init_config.realesrgan_model,
)
with gr.Row():
enable_gfpgan = gr.Checkbox(
init_config.enable_gfpgan, label=GFPGAN_HELP
)
gfpgan_device = gr.Radio(
GFPGAN_AVAILABLE_DEVICES,
label="GFPGAN Device",
value=init_config.gfpgan_device,
)
with gr.Row():
enable_restoreformer = gr.Checkbox(
init_config.enable_restoreformer, label=RESTOREFORMER_HELP
)
restoreformer_device = gr.Radio(
RESTOREFORMER_AVAILABLE_DEVICES,
label="RestoreFormer Device",
value=init_config.restoreformer_device,
)
enable_gif = gr.Checkbox(init_config.enable_gif, label=GIF_HELP)
with gr.Tab("Diffusion Model"):
sd_local_model_path = gr.Textbox(
init_config.sd_local_model_path, label=f"{SD_LOCAL_MODEL_HELP}"
)
sd_controlnet = gr.Checkbox(
init_config.sd_controlnet, label=f"{SD_CONTROLNET_HELP}"
)
sd_controlnet_method = gr.Radio(
SD_CONTROLNET_CHOICES,
lable="ControlNet method",
value=init_config.sd_controlnet_method,
)
no_half = gr.Checkbox(init_config.no_half, label=f"{NO_HALF_HELP}")
cpu_offload = gr.Checkbox(
init_config.cpu_offload, label=f"{CPU_OFFLOAD_HELP}"
)
sd_cpu_textencoder = gr.Checkbox(
init_config.sd_cpu_textencoder, label=f"{SD_CPU_TEXTENCODER_HELP}"
)
disable_nsfw = gr.Checkbox(
init_config.disable_nsfw, label=f"{DISABLE_NSFW_HELP}"
)
enable_xformers = gr.Checkbox(
init_config.enable_xformers, label=f"{ENABLE_XFORMERS_HELP}"
)
local_files_only = gr.Checkbox(
init_config.local_files_only, label=f"{LOCAL_FILES_ONLY_HELP}"
)
save_btn.click(
save_config,
[
host,
port,
model,
sd_local_model_path,
sd_controlnet,
sd_controlnet_method,
device,
gui,
no_gui_auto_close,
no_half,
cpu_offload,
disable_nsfw,
sd_cpu_textencoder,
enable_xformers,
local_files_only,
model_dir,
input,
output_dir,
quality,
enable_interactive_seg,
interactive_seg_model,
interactive_seg_device,
enable_remove_bg,
enable_anime_seg,
enable_realesrgan,
realesrgan_device,
realesrgan_model,
enable_gfpgan,
gfpgan_device,
enable_restoreformer,
restoreformer_device,
enable_gif,
],
message,
)
demo.launch(inbrowser=True, show_api=False)
| 8,530 | 33.538462 | 88 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/__init__.py | import os
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import warnings
warnings.simplefilter("ignore", UserWarning)
from lama_cleaner.parse_args import parse_args
def entry_point():
args = parse_args()
# To make os.environ["XDG_CACHE_HOME"] = args.model_cache_dir works for diffusers
# https://github.com/huggingface/diffusers/blob/be99201a567c1ccd841dc16fb24e88f7f239c187/src/diffusers/utils/constants.py#L18
from lama_cleaner.server import main
main(args)
| 488 | 24.736842 | 129 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/installer.py | import subprocess
import sys
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
def install_plugins_package():
install("rembg")
install("realesrgan")
install("gfpgan")
| 232 | 16.923077 | 76 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/parse_args.py | import os
import imghdr
import argparse
from pathlib import Path
from loguru import logger
from lama_cleaner.const import *
from lama_cleaner.runtime import dump_environment_info
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--host", default="127.0.0.1")
parser.add_argument("--port", default=8080, type=int)
parser.add_argument(
"--config-installer",
action="store_true",
help="Open config web page, mainly for windows installer",
)
parser.add_argument(
"--load-installer-config",
action="store_true",
help="Load all cmd args from installer config file",
)
parser.add_argument(
"--installer-config", default=None, help="Config file for windows installer"
)
parser.add_argument("--model", default=DEFAULT_MODEL, choices=AVAILABLE_MODELS)
parser.add_argument("--no-half", action="store_true", help=NO_HALF_HELP)
parser.add_argument("--cpu-offload", action="store_true", help=CPU_OFFLOAD_HELP)
parser.add_argument("--disable-nsfw", action="store_true", help=DISABLE_NSFW_HELP)
parser.add_argument(
"--sd-cpu-textencoder", action="store_true", help=SD_CPU_TEXTENCODER_HELP
)
parser.add_argument("--sd-controlnet", action="store_true", help=SD_CONTROLNET_HELP)
parser.add_argument(
"--sd-controlnet-method",
default=DEFAULT_CONTROLNET_METHOD,
choices=SD_CONTROLNET_CHOICES,
)
parser.add_argument("--sd-local-model-path", default=None, help=SD_LOCAL_MODEL_HELP)
parser.add_argument(
"--local-files-only", action="store_true", help=LOCAL_FILES_ONLY_HELP
)
parser.add_argument(
"--enable-xformers", action="store_true", help=ENABLE_XFORMERS_HELP
)
parser.add_argument(
"--device", default=DEFAULT_DEVICE, type=str, choices=AVAILABLE_DEVICES
)
parser.add_argument("--gui", action="store_true", help=GUI_HELP)
parser.add_argument(
"--no-gui-auto-close", action="store_true", help=NO_GUI_AUTO_CLOSE_HELP
)
parser.add_argument(
"--gui-size",
default=[1600, 1000],
nargs=2,
type=int,
help="Set window size for GUI",
)
parser.add_argument("--input", type=str, default=None, help=INPUT_HELP)
parser.add_argument("--output-dir", type=str, default=None, help=OUTPUT_DIR_HELP)
parser.add_argument(
"--model-dir", type=str, default=DEFAULT_MODEL_DIR, help=MODEL_DIR_HELP
)
parser.add_argument(
"--disable-model-switch",
action="store_true",
help="Disable model switch in frontend",
)
parser.add_argument(
"--quality",
default=95,
type=int,
help=QUALITY_HELP,
)
# Plugins
parser.add_argument(
"--enable-interactive-seg",
action="store_true",
help=INTERACTIVE_SEG_HELP,
)
parser.add_argument(
"--interactive-seg-model",
default="vit_l",
choices=AVAILABLE_INTERACTIVE_SEG_MODELS,
help=INTERACTIVE_SEG_MODEL_HELP,
)
parser.add_argument(
"--interactive-seg-device",
default="cpu",
choices=AVAILABLE_INTERACTIVE_SEG_DEVICES,
)
parser.add_argument(
"--enable-remove-bg",
action="store_true",
help=REMOVE_BG_HELP,
)
parser.add_argument(
"--enable-anime-seg",
action="store_true",
help=ANIMESEG_HELP,
)
parser.add_argument(
"--enable-realesrgan",
action="store_true",
help=REALESRGAN_HELP,
)
parser.add_argument(
"--realesrgan-device",
default="cpu",
type=str,
choices=REALESRGAN_AVAILABLE_DEVICES,
)
parser.add_argument(
"--realesrgan-model",
default=RealESRGANModelName.realesr_general_x4v3.value,
type=str,
choices=RealESRGANModelNameList,
)
parser.add_argument(
"--realesrgan-no-half",
action="store_true",
help="Disable half precision for RealESRGAN",
)
parser.add_argument("--enable-gfpgan", action="store_true", help=GFPGAN_HELP)
parser.add_argument(
"--gfpgan-device", default="cpu", type=str, choices=GFPGAN_AVAILABLE_DEVICES
)
parser.add_argument(
"--enable-restoreformer", action="store_true", help=RESTOREFORMER_HELP
)
parser.add_argument(
"--restoreformer-device",
default="cpu",
type=str,
choices=RESTOREFORMER_AVAILABLE_DEVICES,
)
parser.add_argument(
"--enable-gif",
action="store_true",
help=GIF_HELP,
)
parser.add_argument(
"--install-plugins-package",
action="store_true",
)
#########
# useless args
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--hf_access_token", default="", help=argparse.SUPPRESS)
parser.add_argument(
"--sd-disable-nsfw", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument("--sd-run-local", action="store_true", help=argparse.SUPPRESS)
parser.add_argument(
"--sd-enable-xformers", action="store_true", help=argparse.SUPPRESS
)
args = parser.parse_args()
# collect system info to help debug
dump_environment_info()
if args.install_plugins_package:
from lama_cleaner.installer import install_plugins_package
install_plugins_package()
exit()
if args.config_installer:
if args.installer_config is None:
parser.error(
"args.config_installer==True, must set args.installer_config to store config file"
)
from lama_cleaner.web_config import main
logger.info("Launching installer web config page")
main(args.installer_config)
exit()
if args.load_installer_config:
if args.installer_config and not os.path.exists(args.installer_config):
parser.error(f"args.installer_config={args.installer_config} not exists")
logger.info(f"Loading installer config from {args.installer_config}")
_args = load_config(args.installer_config)
for k, v in vars(_args).items():
if k in vars(args):
setattr(args, k, v)
if args.device == "cuda":
import platform
if platform.system() == "Darwin":
logger.info("MacOS does not support cuda, use cpu instead")
setattr(args, "device", "cpu")
else:
import torch
if torch.cuda.is_available() is False:
parser.error(
"torch.cuda.is_available() is False, please use --device cpu or check your pytorch installation"
)
if args.sd_local_model_path and args.model == "sd1.5":
if not os.path.exists(args.sd_local_model_path):
parser.error(
f"invalid --sd-local-model-path: {args.sd_local_model_path} not exists"
)
if not os.path.isfile(args.sd_local_model_path):
parser.error(
f"invalid --sd-local-model-path: {args.sd_local_model_path} is a directory"
)
os.environ["U2NET_HOME"] = DEFAULT_MODEL_DIR
if args.model_dir and args.model_dir is not None:
if os.path.isfile(args.model_dir):
parser.error(f"invalid --model-dir: {args.model_dir} is a file")
if not os.path.exists(args.model_dir):
logger.info(f"Create model cache directory: {args.model_dir}")
Path(args.model_dir).mkdir(exist_ok=True, parents=True)
os.environ["XDG_CACHE_HOME"] = args.model_dir
os.environ["U2NET_HOME"] = args.model_dir
if args.input and args.input is not None:
if not os.path.exists(args.input):
parser.error(f"invalid --input: {args.input} not exists")
if os.path.isfile(args.input):
if imghdr.what(args.input) is None:
parser.error(f"invalid --input: {args.input} is not a valid image file")
else:
if args.output_dir is None:
parser.error(
f"invalid --input: {args.input} is a directory, --output-dir is required"
)
if args.output_dir is not None:
output_dir = Path(args.output_dir)
if not output_dir.exists():
logger.info(f"Creating output directory: {output_dir}")
output_dir.mkdir(parents=True)
else:
if not output_dir.is_dir():
parser.error(f"invalid --output-dir: {output_dir} is not a directory")
return args
| 8,695 | 32.836576 | 116 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/schema.py | from typing import Optional
from enum import Enum
from PIL.Image import Image
from pydantic import BaseModel
class HDStrategy(str, Enum):
# Use original image size
ORIGINAL = "Original"
# Resize the longer side of the image to a specific size(hd_strategy_resize_limit),
# then do inpainting on the resized image. Finally, resize the inpainting result to the original size.
# The area outside the mask will not lose quality.
RESIZE = "Resize"
# Crop masking area(with a margin controlled by hd_strategy_crop_margin) from the original image to do inpainting
CROP = "Crop"
class LDMSampler(str, Enum):
ddim = "ddim"
plms = "plms"
class SDSampler(str, Enum):
ddim = "ddim"
pndm = "pndm"
k_lms = "k_lms"
k_euler = "k_euler"
k_euler_a = "k_euler_a"
dpm_plus_plus = "dpm++"
uni_pc = "uni_pc"
class Config(BaseModel):
class Config:
arbitrary_types_allowed = True
# Configs for ldm model
ldm_steps: int
ldm_sampler: str = LDMSampler.plms
# Configs for zits model
zits_wireframe: bool = True
# Configs for High Resolution Strategy(different way to preprocess image)
hd_strategy: str # See HDStrategy Enum
hd_strategy_crop_margin: int
# If the longer side of the image is larger than this value, use crop strategy
hd_strategy_crop_trigger_size: int
hd_strategy_resize_limit: int
# Configs for Stable Diffusion 1.5
prompt: str = ""
negative_prompt: str = ""
# Crop image to this size before doing sd inpainting
# The value is always on the original image scale
use_croper: bool = False
croper_x: int = None
croper_y: int = None
croper_height: int = None
croper_width: int = None
# Resize the image before doing sd inpainting, the area outside the mask will not lose quality.
# Used by sd models and paint_by_example model
sd_scale: float = 1.0
# Blur the edge of mask area. The higher the number the smoother blend with the original image
sd_mask_blur: int = 0
# Ignore this value, it's useless for inpainting
sd_strength: float = 0.75
# The number of denoising steps. More denoising steps usually lead to a
# higher quality image at the expense of slower inference.
sd_steps: int = 50
# Higher guidance scale encourages to generate images that are closely linked
# to the text prompt, usually at the expense of lower image quality.
sd_guidance_scale: float = 7.5
sd_sampler: str = SDSampler.uni_pc
# -1 mean random seed
sd_seed: int = 42
sd_match_histograms: bool = False
# Configs for opencv inpainting
# opencv document https://docs.opencv.org/4.6.0/d7/d8b/group__photo__inpaint.html#gga8002a65f5a3328fbf15df81b842d3c3ca05e763003a805e6c11c673a9f4ba7d07
cv2_flag: str = "INPAINT_NS"
cv2_radius: int = 4
# Paint by Example
paint_by_example_steps: int = 50
paint_by_example_guidance_scale: float = 7.5
paint_by_example_mask_blur: int = 0
paint_by_example_seed: int = 42
paint_by_example_match_histograms: bool = False
paint_by_example_example_image: Optional[Image] = None
# InstructPix2Pix
p2p_steps: int = 50
p2p_image_guidance_scale: float = 1.5
p2p_guidance_scale: float = 7.5
# ControlNet
controlnet_conditioning_scale: float = 0.4
controlnet_method: str = "control_v11p_sd15_canny"
| 3,399 | 32.333333 | 154 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/file_manager/utils.py | # Copy from: https://github.com/silentsokolov/flask-thumbnails/blob/master/flask_thumbnails/utils.py
import importlib
import os
from pathlib import Path
from typing import Union
def generate_filename(original_filename, *options):
name, ext = os.path.splitext(original_filename)
for v in options:
if v:
name += "_%s" % v
name += ext
return name
def parse_size(size):
if isinstance(size, int):
# If the size parameter is a single number, assume square aspect.
return [size, size]
if isinstance(size, (tuple, list)):
if len(size) == 1:
# If single value tuple/list is provided, exand it to two elements
return size + type(size)(size)
return size
try:
thumbnail_size = [int(x) for x in size.lower().split("x", 1)]
except ValueError:
raise ValueError( # pylint: disable=raise-missing-from
"Bad thumbnail size format. Valid format is INTxINT."
)
if len(thumbnail_size) == 1:
# If the size parameter only contains a single integer, assume square aspect.
thumbnail_size.append(thumbnail_size[0])
return thumbnail_size
def aspect_to_string(size):
if isinstance(size, str):
return size
return "x".join(map(str, size))
IMG_SUFFIX = {'.jpg', '.jpeg', '.png', '.JPG', '.JPEG', '.PNG'}
def glob_img(p: Union[Path, str], recursive: bool = False):
p = Path(p)
if p.is_file() and p.suffix in IMG_SUFFIX:
yield p
else:
if recursive:
files = Path(p).glob("**/*.*")
else:
files = Path(p).glob("*.*")
for it in files:
if it.suffix not in IMG_SUFFIX:
continue
yield it
| 1,758 | 24.867647 | 100 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/file_manager/storage_backends.py | # Copy from https://github.com/silentsokolov/flask-thumbnails/blob/master/flask_thumbnails/storage_backends.py
import errno
import os
from abc import ABC, abstractmethod
class BaseStorageBackend(ABC):
def __init__(self, app=None):
self.app = app
@abstractmethod
def read(self, filepath, mode="rb", **kwargs):
raise NotImplementedError
@abstractmethod
def exists(self, filepath):
raise NotImplementedError
@abstractmethod
def save(self, filepath, data):
raise NotImplementedError
class FilesystemStorageBackend(BaseStorageBackend):
def read(self, filepath, mode="rb", **kwargs):
with open(filepath, mode) as f: # pylint: disable=unspecified-encoding
return f.read()
def exists(self, filepath):
return os.path.exists(filepath)
def save(self, filepath, data):
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(directory):
raise IOError("{} is not a directory".format(directory))
with open(filepath, "wb") as f:
f.write(data)
| 1,293 | 26.531915 | 110 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/file_manager/__init__.py | from .file_manager import FileManager
| 38 | 18.5 | 37 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/file_manager/file_manager.py | # Copy from https://github.com/silentsokolov/flask-thumbnails/blob/master/flask_thumbnails/thumbnail.py
import os
from datetime import datetime
import cv2
import time
from io import BytesIO
from pathlib import Path
import numpy as np
# from watchdog.events import FileSystemEventHandler
# from watchdog.observers import Observer
from PIL import Image, ImageOps, PngImagePlugin
from loguru import logger
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
from .storage_backends import FilesystemStorageBackend
from .utils import aspect_to_string, generate_filename, glob_img
class FileManager:
def __init__(self, app=None):
self.app = app
self._default_root_directory = "media"
self._default_thumbnail_directory = "media"
self._default_root_url = "/"
self._default_thumbnail_root_url = "/"
self._default_format = "JPEG"
self.output_dir: Path = None
if app is not None:
self.init_app(app)
self.image_dir_filenames = []
self.output_dir_filenames = []
self.image_dir_observer = None
self.output_dir_observer = None
self.modified_time = {
"image": datetime.utcnow(),
"output": datetime.utcnow(),
}
# def start(self):
# self.image_dir_filenames = self._media_names(self.root_directory)
# self.output_dir_filenames = self._media_names(self.output_dir)
#
# logger.info(f"Start watching image directory: {self.root_directory}")
# self.image_dir_observer = Observer()
# self.image_dir_observer.schedule(self, self.root_directory, recursive=False)
# self.image_dir_observer.start()
#
# logger.info(f"Start watching output directory: {self.output_dir}")
# self.output_dir_observer = Observer()
# self.output_dir_observer.schedule(self, self.output_dir, recursive=False)
# self.output_dir_observer.start()
def on_modified(self, event):
if not os.path.isdir(event.src_path):
return
if event.src_path == str(self.root_directory):
logger.info(f"Image directory {event.src_path} modified")
self.image_dir_filenames = self._media_names(self.root_directory)
self.modified_time["image"] = datetime.utcnow()
elif event.src_path == str(self.output_dir):
logger.info(f"Output directory {event.src_path} modified")
self.output_dir_filenames = self._media_names(self.output_dir)
self.modified_time["output"] = datetime.utcnow()
def init_app(self, app):
if self.app is None:
self.app = app
app.thumbnail_instance = self
if not hasattr(app, "extensions"):
app.extensions = {}
if "thumbnail" in app.extensions:
raise RuntimeError("Flask-thumbnail extension already initialized")
app.extensions["thumbnail"] = self
app.config.setdefault("THUMBNAIL_MEDIA_ROOT", self._default_root_directory)
app.config.setdefault(
"THUMBNAIL_MEDIA_THUMBNAIL_ROOT", self._default_thumbnail_directory
)
app.config.setdefault("THUMBNAIL_MEDIA_URL", self._default_root_url)
app.config.setdefault(
"THUMBNAIL_MEDIA_THUMBNAIL_URL", self._default_thumbnail_root_url
)
app.config.setdefault("THUMBNAIL_DEFAULT_FORMAT", self._default_format)
@property
def root_directory(self):
path = self.app.config["THUMBNAIL_MEDIA_ROOT"]
if os.path.isabs(path):
return path
else:
return os.path.join(self.app.root_path, path)
@property
def thumbnail_directory(self):
path = self.app.config["THUMBNAIL_MEDIA_THUMBNAIL_ROOT"]
if os.path.isabs(path):
return path
else:
return os.path.join(self.app.root_path, path)
@property
def root_url(self):
return self.app.config["THUMBNAIL_MEDIA_URL"]
@property
def media_names(self):
# return self.image_dir_filenames
return self._media_names(self.root_directory)
@property
def output_media_names(self):
return self._media_names(self.output_dir)
# return self.output_dir_filenames
@staticmethod
def _media_names(directory: Path):
names = sorted([it.name for it in glob_img(directory)])
res = []
for name in names:
path = os.path.join(directory, name)
img = Image.open(path)
res.append(
{
"name": name,
"height": img.height,
"width": img.width,
"ctime": os.path.getctime(path),
"mtime": os.path.getmtime(path),
}
)
return res
@property
def thumbnail_url(self):
return self.app.config["THUMBNAIL_MEDIA_THUMBNAIL_URL"]
def get_thumbnail(
self, directory: Path, original_filename: str, width, height, **options
):
storage = FilesystemStorageBackend(self.app)
crop = options.get("crop", "fit")
background = options.get("background")
quality = options.get("quality", 90)
original_path, original_filename = os.path.split(original_filename)
original_filepath = os.path.join(directory, original_path, original_filename)
image = Image.open(BytesIO(storage.read(original_filepath)))
# keep ratio resize
if width is not None:
height = int(image.height * width / image.width)
else:
width = int(image.width * height / image.height)
thumbnail_size = (width, height)
thumbnail_filename = generate_filename(
original_filename,
aspect_to_string(thumbnail_size),
crop,
background,
quality,
)
thumbnail_filepath = os.path.join(
self.thumbnail_directory, original_path, thumbnail_filename
)
thumbnail_url = os.path.join(
self.thumbnail_url, original_path, thumbnail_filename
)
if storage.exists(thumbnail_filepath):
return thumbnail_url, (width, height)
try:
image.load()
except (IOError, OSError):
self.app.logger.warning("Thumbnail not load image: %s", original_filepath)
return thumbnail_url, (width, height)
# get original image format
options["format"] = options.get("format", image.format)
image = self._create_thumbnail(
image, thumbnail_size, crop, background=background
)
raw_data = self.get_raw_data(image, **options)
storage.save(thumbnail_filepath, raw_data)
return thumbnail_url, (width, height)
def get_raw_data(self, image, **options):
data = {
"format": self._get_format(image, **options),
"quality": options.get("quality", 90),
}
_file = BytesIO()
image.save(_file, **data)
return _file.getvalue()
@staticmethod
def colormode(image, colormode="RGB"):
if colormode == "RGB" or colormode == "RGBA":
if image.mode == "RGBA":
return image
if image.mode == "LA":
return image.convert("RGBA")
return image.convert(colormode)
if colormode == "GRAY":
return image.convert("L")
return image.convert(colormode)
@staticmethod
def background(original_image, color=0xFF):
size = (max(original_image.size),) * 2
image = Image.new("L", size, color)
image.paste(
original_image,
tuple(map(lambda x: (x[0] - x[1]) / 2, zip(size, original_image.size))),
)
return image
def _get_format(self, image, **options):
if options.get("format"):
return options.get("format")
if image.format:
return image.format
return self.app.config["THUMBNAIL_DEFAULT_FORMAT"]
def _create_thumbnail(self, image, size, crop="fit", background=None):
try:
resample = Image.Resampling.LANCZOS
except AttributeError: # pylint: disable=raise-missing-from
resample = Image.ANTIALIAS
if crop == "fit":
image = ImageOps.fit(image, size, resample)
else:
image = image.copy()
image.thumbnail(size, resample=resample)
if background is not None:
image = self.background(image)
image = self.colormode(image)
return image
| 8,685 | 31.654135 | 103 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/realesrgan.py | from enum import Enum
import cv2
from loguru import logger
from lama_cleaner.const import RealESRGANModelName
from lama_cleaner.helper import download_model
from lama_cleaner.plugins.base_plugin import BasePlugin
class RealESRGANUpscaler(BasePlugin):
name = "RealESRGAN"
def __init__(self, name, device, no_half=False):
super().__init__()
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
REAL_ESRGAN_MODELS = {
RealESRGANModelName.realesr_general_x4v3: {
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
"scale": 4,
"model": lambda: SRVGGNetCompact(
num_in_ch=3,
num_out_ch=3,
num_feat=64,
num_conv=32,
upscale=4,
act_type="prelu",
),
"model_md5": "91a7644643c884ee00737db24e478156",
},
RealESRGANModelName.RealESRGAN_x4plus: {
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
"scale": 4,
"model": lambda: RRDBNet(
num_in_ch=3,
num_out_ch=3,
num_feat=64,
num_block=23,
num_grow_ch=32,
scale=4,
),
"model_md5": "99ec365d4afad750833258a1a24f44ca",
},
RealESRGANModelName.RealESRGAN_x4plus_anime_6B: {
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
"scale": 4,
"model": lambda: RRDBNet(
num_in_ch=3,
num_out_ch=3,
num_feat=64,
num_block=6,
num_grow_ch=32,
scale=4,
),
"model_md5": "d58ce384064ec1591c2ea7b79dbf47ba",
},
}
if name not in REAL_ESRGAN_MODELS:
raise ValueError(f"Unknown RealESRGAN model name: {name}")
model_info = REAL_ESRGAN_MODELS[name]
model_path = download_model(model_info["url"], model_info["model_md5"])
logger.info(f"RealESRGAN model path: {model_path}")
self.model = RealESRGANer(
scale=model_info["scale"],
model_path=model_path,
model=model_info["model"](),
half=True if "cuda" in str(device) and not no_half else False,
tile=512,
tile_pad=10,
pre_pad=10,
device=device,
)
def __call__(self, rgb_np_img, files, form):
bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR)
scale = float(form["upscale"])
logger.info(f"RealESRGAN input shape: {bgr_np_img.shape}, scale: {scale}")
result = self.forward(bgr_np_img, scale)
logger.info(f"RealESRGAN output shape: {result.shape}")
return result
def forward(self, bgr_np_img, scale: float):
# 输出是 BGR
upsampled = self.model.enhance(bgr_np_img, outscale=scale)[0]
return upsampled
def check_dep(self):
try:
import realesrgan
except ImportError:
return "RealESRGAN is not installed, please install it first. pip install realesrgan"
| 3,567 | 35.783505 | 122 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/interactive_seg.py | import json
import cv2
import numpy as np
from loguru import logger
from lama_cleaner.helper import download_model
from lama_cleaner.plugins.base_plugin import BasePlugin
from lama_cleaner.plugins.segment_anything import SamPredictor, sam_model_registry
# 从小到大
SEGMENT_ANYTHING_MODELS = {
"vit_b": {
"url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth",
"md5": "01ec64d29a2fca3f0661936605ae66f8",
},
"vit_l": {
"url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth",
"md5": "0b3195507c641ddb6910d2bb5adee89c",
},
"vit_h": {
"url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth",
"md5": "4b8939a88964f0f4ff5f5b2642c598a6",
},
}
class InteractiveSeg(BasePlugin):
name = "InteractiveSeg"
def __init__(self, model_name, device):
super().__init__()
model_path = download_model(
SEGMENT_ANYTHING_MODELS[model_name]["url"],
SEGMENT_ANYTHING_MODELS[model_name]["md5"],
)
logger.info(f"SegmentAnything model path: {model_path}")
self.predictor = SamPredictor(
sam_model_registry[model_name](checkpoint=model_path).to(device)
)
self.prev_img_md5 = None
def __call__(self, rgb_np_img, files, form):
clicks = json.loads(form["clicks"])
return self.forward(rgb_np_img, clicks, form["img_md5"])
def forward(self, rgb_np_img, clicks, img_md5):
input_point = []
input_label = []
for click in clicks:
x = click[0]
y = click[1]
input_point.append([x, y])
input_label.append(click[2])
if img_md5 and img_md5 != self.prev_img_md5:
self.prev_img_md5 = img_md5
self.predictor.set_image(rgb_np_img)
masks, scores, _ = self.predictor.predict(
point_coords=np.array(input_point),
point_labels=np.array(input_label),
multimask_output=False,
)
mask = masks[0].astype(np.uint8) * 255
# TODO: how to set kernel size?
kernel_size = 9
mask = cv2.dilate(
mask, np.ones((kernel_size, kernel_size), np.uint8), iterations=1
)
# fronted brush color "ffcc00bb"
res_mask = np.zeros((mask.shape[0], mask.shape[1], 4), dtype=np.uint8)
res_mask[mask == 255] = [255, 203, 0, int(255 * 0.73)]
res_mask = cv2.cvtColor(res_mask, cv2.COLOR_BGRA2RGBA)
return res_mask
| 2,547 | 32.526316 | 86 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/base_plugin.py | from loguru import logger
class BasePlugin:
def __init__(self):
err_msg = self.check_dep()
if err_msg:
logger.error(err_msg)
exit(-1)
def __call__(self, rgb_np_img, files, form):
...
def check_dep(self):
...
| 280 | 16.5625 | 48 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/gfpgan_plugin.py | import cv2
from loguru import logger
from lama_cleaner.helper import download_model
from lama_cleaner.plugins.base_plugin import BasePlugin
class GFPGANPlugin(BasePlugin):
name = "GFPGAN"
def __init__(self, device, upscaler=None):
super().__init__()
from .gfpganer import MyGFPGANer
url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
model_md5 = "94d735072630ab734561130a47bc44f8"
model_path = download_model(url, model_md5)
logger.info(f"GFPGAN model path: {model_path}")
import facexlib
if hasattr(facexlib.detection.retinaface, "device"):
facexlib.detection.retinaface.device = device
# Use GFPGAN for face enhancement
self.face_enhancer = MyGFPGANer(
model_path=model_path,
upscale=1,
arch="clean",
channel_multiplier=2,
device=device,
bg_upsampler=upscaler.model if upscaler is not None else None,
)
self.face_enhancer.face_helper.face_det.mean_tensor.to(device)
self.face_enhancer.face_helper.face_det = (
self.face_enhancer.face_helper.face_det.to(device)
)
def __call__(self, rgb_np_img, files, form):
weight = 0.5
bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR)
logger.info(f"GFPGAN input shape: {bgr_np_img.shape}")
_, _, bgr_output = self.face_enhancer.enhance(
bgr_np_img,
has_aligned=False,
only_center_face=False,
paste_back=True,
weight=weight,
)
logger.info(f"GFPGAN output shape: {bgr_output.shape}")
# try:
# if scale != 2:
# interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4
# h, w = img.shape[0:2]
# output = cv2.resize(
# output,
# (int(w * scale / 2), int(h * scale / 2)),
# interpolation=interpolation,
# )
# except Exception as error:
# print("wrong scale input.", error)
return bgr_output
def check_dep(self):
try:
import gfpgan
except ImportError:
return (
"gfpgan is not installed, please install it first. pip install gfpgan"
)
| 2,400 | 32.347222 | 92 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/gfpganer.py | import os
import torch
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
from gfpgan import GFPGANv1Clean, GFPGANer
from torch.hub import get_dir
class MyGFPGANer(GFPGANer):
"""Helper for restoration with GFPGAN.
It will detect and crop faces, and then resize the faces to 512x512.
GFPGAN is used to restored the resized faces.
The background is upsampled with the bg_upsampler.
Finally, the faces will be pasted back to the upsample background image.
Args:
model_path (str): The path to the GFPGAN model. It can be urls (will first download it automatically).
upscale (float): The upscale of the final output. Default: 2.
arch (str): The GFPGAN architecture. Option: clean | original. Default: clean.
channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
bg_upsampler (nn.Module): The upsampler for the background. Default: None.
"""
def __init__(
self,
model_path,
upscale=2,
arch="clean",
channel_multiplier=2,
bg_upsampler=None,
device=None,
):
self.upscale = upscale
self.bg_upsampler = bg_upsampler
# initialize model
self.device = (
torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device is None
else device
)
# initialize the GFP-GAN
if arch == "clean":
self.gfpgan = GFPGANv1Clean(
out_size=512,
num_style_feat=512,
channel_multiplier=channel_multiplier,
decoder_load_path=None,
fix_decoder=False,
num_mlp=8,
input_is_latent=True,
different_w=True,
narrow=1,
sft_half=True,
)
elif arch == "RestoreFormer":
from gfpgan.archs.restoreformer_arch import RestoreFormer
self.gfpgan = RestoreFormer()
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, "checkpoints")
# initialize face helper
self.face_helper = FaceRestoreHelper(
upscale,
face_size=512,
crop_ratio=(1, 1),
det_model="retinaface_resnet50",
save_ext="png",
use_parse=True,
device=self.device,
model_rootpath=model_dir,
)
loadnet = torch.load(model_path)
if "params_ema" in loadnet:
keyname = "params_ema"
else:
keyname = "params"
self.gfpgan.load_state_dict(loadnet[keyname], strict=True)
self.gfpgan.eval()
self.gfpgan = self.gfpgan.to(self.device)
| 2,750 | 31.364706 | 110 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/gif.py | import io
import math
from PIL import Image, ImageDraw
from lama_cleaner.helper import load_img
from lama_cleaner.plugins.base_plugin import BasePlugin
def keep_ratio_resize(img, size, resample=Image.BILINEAR):
if img.width > img.height:
w = size
h = int(img.height * size / img.width)
else:
h = size
w = int(img.width * size / img.height)
return img.resize((w, h), resample)
def cubic_bezier(p1, p2, duration: int, frames: int):
"""
Args:
p1:
p2:
duration: Total duration of the curve
frames:
Returns:
"""
x0, y0 = (0, 0)
x1, y1 = p1
x2, y2 = p2
x3, y3 = (1, 1)
def cal_y(t):
return (
math.pow(1 - t, 3) * y0
+ 3 * math.pow(1 - t, 2) * t * y1
+ 3 * (1 - t) * math.pow(t, 2) * y2
+ math.pow(t, 3) * y3
)
def cal_x(t):
return (
math.pow(1 - t, 3) * x0
+ 3 * math.pow(1 - t, 2) * t * x1
+ 3 * (1 - t) * math.pow(t, 2) * x2
+ math.pow(t, 3) * x3
)
res = []
for t in range(0, 1 * frames, duration):
t = t / frames
res.append((cal_x(t), cal_y(t)))
res.append((1, 0))
return res
def make_compare_gif(
clean_img: Image.Image,
src_img: Image.Image,
max_side_length: int = 600,
splitter_width: int = 5,
splitter_color=(255, 203, 0, int(255 * 0.73)),
):
if clean_img.size != src_img.size:
clean_img = clean_img.resize(src_img.size, Image.BILINEAR)
duration_per_frame = 20
num_frames = 50
# erase-in-out
cubic_bezier_points = cubic_bezier((0.33, 0), (0.66, 1), 1, num_frames)
cubic_bezier_points.reverse()
max_side_length = min(max_side_length, max(clean_img.size))
src_img = keep_ratio_resize(src_img, max_side_length)
clean_img = keep_ratio_resize(clean_img, max_side_length)
width, height = src_img.size
# Generate images to make Gif from right to left
images = []
for i in range(num_frames):
new_frame = Image.new("RGB", (width, height))
new_frame.paste(clean_img, (0, 0))
left = int(cubic_bezier_points[i][0] * width)
cropped_src_img = src_img.crop((left, 0, width, height))
new_frame.paste(cropped_src_img, (left, 0, width, height))
if i != num_frames - 1:
# draw a yellow splitter on the edge of the cropped image
draw = ImageDraw.Draw(new_frame)
draw.line(
[(left, 0), (left, height)], width=splitter_width, fill=splitter_color
)
images.append(new_frame)
for i in range(30):
images.append(src_img)
cubic_bezier_points.reverse()
# Generate images to make Gif from left to right
for i in range(num_frames):
new_frame = Image.new("RGB", (width, height))
new_frame.paste(src_img, (0, 0))
right = int(cubic_bezier_points[i][0] * width)
cropped_src_img = clean_img.crop((0, 0, right, height))
new_frame.paste(cropped_src_img, (0, 0, right, height))
if i != num_frames - 1:
# draw a yellow splitter on the edge of the cropped image
draw = ImageDraw.Draw(new_frame)
draw.line(
[(right, 0), (right, height)], width=splitter_width, fill=splitter_color
)
images.append(new_frame)
for _ in range(30):
images.append(clean_img)
img_byte_arr = io.BytesIO()
clean_img.save(
img_byte_arr,
format="GIF",
save_all=True,
include_color_table=True,
append_images=images,
optimize=False,
duration=duration_per_frame,
loop=0,
)
return img_byte_arr.getvalue()
class MakeGIF(BasePlugin):
name = "MakeGIF"
def __call__(self, rgb_np_img, files, form):
origin_image = rgb_np_img
clean_image_bytes = files["clean_img"].read()
clean_image, _ = load_img(clean_image_bytes)
gif_bytes = make_compare_gif(
Image.fromarray(origin_image), Image.fromarray(clean_image)
)
return gif_bytes
| 4,156 | 26.713333 | 88 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/__init__.py | from .interactive_seg import InteractiveSeg
from .remove_bg import RemoveBG
from .realesrgan import RealESRGANUpscaler
from .gfpgan_plugin import GFPGANPlugin
from .restoreformer import RestoreFormerPlugin
from .gif import MakeGIF
from .anime_seg import AnimeSeg
| 263 | 32 | 46 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/anime_seg.py | import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from PIL import Image
from lama_cleaner.helper import load_model
from lama_cleaner.plugins.base_plugin import BasePlugin
class REBNCONV(nn.Module):
def __init__(self, in_ch=3, out_ch=3, dirate=1, stride=1):
super(REBNCONV, self).__init__()
self.conv_s1 = nn.Conv2d(
in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, stride=stride
)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src, tar):
src = F.interpolate(src, size=tar.shape[2:], mode="bilinear", align_corners=False)
return src
### RSU-7 ###
class RSU7(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3, img_size=512):
super(RSU7, self).__init__()
self.in_ch = in_ch
self.mid_ch = mid_ch
self.out_ch = out_ch
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) ## 1 -> 1/2
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
def forward(self, x):
b, c, h, w = x.shape
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1))
hx6dup = _upsample_like(hx6d, hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8)
self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=4)
self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=2)
self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))
hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1))
hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1))
return hx1d + hxin
class ISNetDIS(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(ISNetDIS, self).__init__()
self.conv_in = nn.Conv2d(in_ch, 64, 3, stride=2, padding=1)
self.pool_in = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage1 = RSU7(64, 32, 64)
self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage2 = RSU6(64, 32, 128)
self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage3 = RSU5(128, 64, 256)
self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage4 = RSU4(256, 128, 512)
self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage5 = RSU4F(512, 256, 512)
self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage6 = RSU4F(512, 256, 512)
# decoder
self.stage5d = RSU4F(1024, 256, 512)
self.stage4d = RSU4(1024, 128, 256)
self.stage3d = RSU5(512, 64, 128)
self.stage2d = RSU6(256, 32, 64)
self.stage1d = RSU7(128, 16, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
def forward(self, x):
hx = x
hxin = self.conv_in(hx)
hx = self.pool_in(hxin)
# stage 1
hx1 = self.stage1(hxin)
hx = self.pool12(hx1)
# stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
# stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
# stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
# stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
# stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6, hx5)
# -------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))
# side output
d1 = self.side1(hx1d)
d1 = _upsample_like(d1, x)
return d1.sigmoid()
# 从小到大
ANIME_SEG_MODELS = {
"url": "https://github.com/Sanster/models/releases/download/isnetis/isnetis.pth",
"md5": "5f25479076b73074730ab8de9e8f2051",
}
class AnimeSeg(BasePlugin):
# Model from: https://github.com/SkyTNT/anime-segmentation
name = "AnimeSeg"
def __init__(self):
super().__init__()
self.model = load_model(
ISNetDIS(),
ANIME_SEG_MODELS["url"],
"cpu",
ANIME_SEG_MODELS["md5"],
)
def __call__(self, rgb_np_img, files, form):
return self.forward(rgb_np_img)
@torch.no_grad()
def forward(self, rgb_np_img):
s = 1024
h0, w0 = h, w = rgb_np_img.shape[0], rgb_np_img.shape[1]
if h > w:
h, w = s, int(s * w / h)
else:
h, w = int(s * h / w), s
ph, pw = s - h, s - w
tmpImg = np.zeros([s, s, 3], dtype=np.float32)
tmpImg[ph // 2 : ph // 2 + h, pw // 2 : pw // 2 + w] = (
cv2.resize(rgb_np_img, (w, h)) / 255
)
tmpImg = tmpImg.transpose((2, 0, 1))
tmpImg = torch.from_numpy(tmpImg).unsqueeze(0).type(torch.FloatTensor)
mask = self.model(tmpImg)
mask = mask[0, :, ph // 2 : ph // 2 + h, pw // 2 : pw // 2 + w]
mask = cv2.resize(mask.cpu().numpy().transpose((1, 2, 0)), (w0, h0))
mask = Image.fromarray((mask * 255).astype("uint8"), mode="L")
empty = Image.new("RGBA", (w0, h0), 0)
img = Image.fromarray(rgb_np_img)
cutout = Image.composite(img, empty, mask)
return np.asarray(cutout)
| 13,465 | 28.530702 | 86 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/restoreformer.py | import cv2
from loguru import logger
from lama_cleaner.helper import download_model
from lama_cleaner.plugins.base_plugin import BasePlugin
class RestoreFormerPlugin(BasePlugin):
name = "RestoreFormer"
def __init__(self, device, upscaler=None):
super().__init__()
from .gfpganer import MyGFPGANer
url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth"
model_md5 = "eaeeff6c4a1caa1673977cb374e6f699"
model_path = download_model(url, model_md5)
logger.info(f"RestoreFormer model path: {model_path}")
import facexlib
if hasattr(facexlib.detection.retinaface, "device"):
facexlib.detection.retinaface.device = device
self.face_enhancer = MyGFPGANer(
model_path=model_path,
upscale=1,
arch="RestoreFormer",
channel_multiplier=2,
device=device,
bg_upsampler=upscaler.model if upscaler is not None else None,
)
def __call__(self, rgb_np_img, files, form):
weight = 0.5
bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR)
logger.info(f"RestoreFormer input shape: {bgr_np_img.shape}")
_, _, bgr_output = self.face_enhancer.enhance(
bgr_np_img,
has_aligned=False,
only_center_face=False,
paste_back=True,
weight=weight,
)
logger.info(f"RestoreFormer output shape: {bgr_output.shape}")
return bgr_output
def check_dep(self):
try:
import gfpgan
except ImportError:
return (
"gfpgan is not installed, please install it first. pip install gfpgan"
)
| 1,747 | 30.781818 | 95 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/remove_bg.py | import os
import cv2
import numpy as np
from torch.hub import get_dir
from lama_cleaner.plugins.base_plugin import BasePlugin
class RemoveBG(BasePlugin):
name = "RemoveBG"
def __init__(self):
super().__init__()
from rembg import new_session
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, "checkpoints")
os.environ["U2NET_HOME"] = model_dir
self.session = new_session(model_name="u2net")
def __call__(self, rgb_np_img, files, form):
bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR)
return self.forward(bgr_np_img)
def forward(self, bgr_np_img) -> np.ndarray:
from rembg import remove
# return BGRA image
output = remove(bgr_np_img, session=self.session)
return cv2.cvtColor(output, cv2.COLOR_BGRA2RGBA)
def check_dep(self):
try:
import rembg
except ImportError:
return (
"RemoveBG is not installed, please install it first. pip install rembg"
)
| 1,053 | 25.35 | 87 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/predictor.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from .modeling import Sam
from typing import Optional, Tuple
class SamPredictor:
def __init__(
self,
sam_model: Sam,
) -> None:
"""
Uses SAM to calculate the image embedding for an image, and then
allow repeated, efficient mask prediction given prompts.
Arguments:
sam_model (Sam): The model to use for mask prediction.
"""
super().__init__()
self.model = sam_model
from .utils.transforms import ResizeLongestSide
self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
self.reset_image()
def set_image(
self,
image: np.ndarray,
image_format: str = "RGB",
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method.
Arguments:
image (np.ndarray): The image for calculating masks. Expects an
image in HWC uint8 format, with pixel values in [0, 255].
image_format (str): The color format of the image, in ['RGB', 'BGR'].
"""
assert image_format in [
"RGB",
"BGR",
], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
if image_format != self.model.image_format:
image = image[..., ::-1]
# Transform the image to the form expected by the model
input_image = self.transform.apply_image(image)
input_image_torch = torch.as_tensor(input_image, device=self.device)
input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[
None, :, :, :
]
self.set_torch_image(input_image_torch, image.shape[:2])
@torch.no_grad()
def set_torch_image(
self,
transformed_image: torch.Tensor,
original_image_size: Tuple[int, ...],
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method. Expects the input
image to be already transformed to the format expected by the model.
Arguments:
transformed_image (torch.Tensor): The input image, with shape
1x3xHxW, which has been transformed with ResizeLongestSide.
original_image_size (tuple(int, int)): The size of the image
before transformation, in (H, W) format.
"""
assert (
len(transformed_image.shape) == 4
and transformed_image.shape[1] == 3
and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
self.reset_image()
self.original_size = original_image_size
self.input_size = tuple(transformed_image.shape[-2:])
input_image = self.model.preprocess(transformed_image)
self.features = self.model.image_encoder(input_image)
self.is_image_set = True
def predict(
self,
point_coords: Optional[np.ndarray] = None,
point_labels: Optional[np.ndarray] = None,
box: Optional[np.ndarray] = None,
mask_input: Optional[np.ndarray] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks for the given input prompts, using the currently set image.
Arguments:
point_coords (np.ndarray or None): A Nx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (np.ndarray or None): A length N array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
box (np.ndarray or None): A length 4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form 1xHxW, where
for SAM, H=W=256.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(np.ndarray): The output masks in CxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(np.ndarray): An array of length C containing the model's
predictions for the quality of each mask.
(np.ndarray): An array of shape CxHxW, where C is the number
of masks and H=W=256. These low resolution logits can be passed to
a subsequent iteration as mask input.
"""
if not self.is_image_set:
raise RuntimeError(
"An image must be set with .set_image(...) before mask prediction."
)
# Transform input prompts
coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
if point_coords is not None:
assert (
point_labels is not None
), "point_labels must be supplied if point_coords is supplied."
point_coords = self.transform.apply_coords(point_coords, self.original_size)
coords_torch = torch.as_tensor(
point_coords, dtype=torch.float, device=self.device
)
labels_torch = torch.as_tensor(
point_labels, dtype=torch.int, device=self.device
)
coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
if box is not None:
box = self.transform.apply_boxes(box, self.original_size)
box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
box_torch = box_torch[None, :]
if mask_input is not None:
mask_input_torch = torch.as_tensor(
mask_input, dtype=torch.float, device=self.device
)
mask_input_torch = mask_input_torch[None, :, :, :]
masks, iou_predictions, low_res_masks = self.predict_torch(
coords_torch,
labels_torch,
box_torch,
mask_input_torch,
multimask_output,
return_logits=return_logits,
)
masks = masks[0].detach().cpu().numpy()
iou_predictions = iou_predictions[0].detach().cpu().numpy()
low_res_masks = low_res_masks[0].detach().cpu().numpy()
return masks, iou_predictions, low_res_masks
@torch.no_grad()
def predict_torch(
self,
point_coords: Optional[torch.Tensor],
point_labels: Optional[torch.Tensor],
boxes: Optional[torch.Tensor] = None,
mask_input: Optional[torch.Tensor] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks for the given input prompts, using the currently set image.
Input prompts are batched torch tensors and are expected to already be
transformed to the input frame using ResizeLongestSide.
Arguments:
point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (torch.Tensor or None): A BxN array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
box (np.ndarray or None): A Bx4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form Bx1xHxW, where
for SAM, H=W=256. Masks returned by a previous iteration of the
predict method do not need further transformation.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(torch.Tensor): The output masks in BxCxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(torch.Tensor): An array of shape BxC containing the model's
predictions for the quality of each mask.
(torch.Tensor): An array of shape BxCxHxW, where C is the number
of masks and H=W=256. These low res logits can be passed to
a subsequent iteration as mask input.
"""
if not self.is_image_set:
raise RuntimeError(
"An image must be set with .set_image(...) before mask prediction."
)
if point_coords is not None:
points = (point_coords, point_labels)
else:
points = None
# Embed prompts
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
points=points,
boxes=boxes,
masks=mask_input,
)
# Predict masks
low_res_masks, iou_predictions = self.model.mask_decoder(
image_embeddings=self.features,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
# Upscale the masks to the original image resolution
masks = self.model.postprocess_masks(
low_res_masks, self.input_size, self.original_size
)
if not return_logits:
masks = masks > self.model.mask_threshold
return masks, iou_predictions, low_res_masks
def get_image_embedding(self) -> torch.Tensor:
"""
Returns the image embeddings for the currently set image, with
shape 1xCxHxW, where C is the embedding dimension and (H,W) are
the embedding spatial dimension of SAM (typically C=256, H=W=64).
"""
if not self.is_image_set:
raise RuntimeError(
"An image must be set with .set_image(...) to generate an embedding."
)
assert (
self.features is not None
), "Features must exist if an image has been set."
return self.features
@property
def device(self) -> torch.device:
return self.model.device
def reset_image(self) -> None:
"""Resets the currently set image."""
self.is_image_set = False
self.features = None
self.orig_h = None
self.orig_w = None
self.input_h = None
self.input_w = None
| 11,845 | 40.41958 | 100 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/build_sam.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from functools import partial
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
sam_model_registry = {
"default": build_sam,
"vit_h": build_sam,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
sam.eval()
if checkpoint is not None:
with open(checkpoint, "rb") as f:
state_dict = torch.load(f)
sam.load_state_dict(state_dict)
return sam
| 2,929 | 26.12963 | 89 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .build_sam import (
build_sam,
build_sam_vit_h,
build_sam_vit_l,
build_sam_vit_b,
sam_model_registry,
)
from .predictor import SamPredictor
| 363 | 23.266667 | 61 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/utils/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| 197 | 32 | 61 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/utils/transforms.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch.nn import functional as F
from torchvision.transforms.functional import resize, to_pil_image # type: ignore
from copy import deepcopy
from typing import Tuple
class ResizeLongestSide:
"""
Resizes images to longest side 'target_length', as well as provides
methods for resizing coordinates and boxes. Provides methods for
transforming both numpy array and batched torch tensors.
"""
def __init__(self, target_length: int) -> None:
self.target_length = target_length
def apply_image(self, image: np.ndarray) -> np.ndarray:
"""
Expects a numpy array with shape HxWxC in uint8 format.
"""
target_size = self.get_preprocess_shape(
image.shape[0], image.shape[1], self.target_length
)
return np.array(resize(to_pil_image(image), target_size))
def apply_coords(
self, coords: np.ndarray, original_size: Tuple[int, ...]
) -> np.ndarray:
"""
Expects a numpy array of length 2 in the final dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).astype(float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
def apply_boxes(
self, boxes: np.ndarray, original_size: Tuple[int, ...]
) -> np.ndarray:
"""
Expects a numpy array shape Bx4. Requires the original image size
in (H, W) format.
"""
boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
"""
Expects batched images with shape BxCxHxW and float format. This
transformation may not exactly match apply_image. apply_image is
the transformation expected by the model.
"""
# Expects an image in BCHW format. May not exactly match apply_image.
target_size = self.get_preprocess_shape(
image.shape[0], image.shape[1], self.target_length
)
return F.interpolate(
image, target_size, mode="bilinear", align_corners=False, antialias=True
)
def apply_coords_torch(
self, coords: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with length 2 in the last dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).to(torch.float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
def apply_boxes_torch(
self, boxes: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with shape Bx4. Requires the original image
size in (H, W) format.
"""
boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
@staticmethod
def get_preprocess_shape(
oldh: int, oldw: int, long_side_length: int
) -> Tuple[int, int]:
"""
Compute the output size given input size and target long side length.
"""
scale = long_side_length * 1.0 / max(oldh, oldw)
newh, neww = oldh * scale, oldw * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return (newh, neww)
| 4,054 | 34.884956 | 84 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/mask_decoder.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
from typing import List, Tuple, Type
from .common import LayerNorm2d
class MaskDecoder(nn.Module):
def __init__(
self,
*,
transformer_dim: int,
transformer: nn.Module,
num_multimask_outputs: int = 3,
activation: Type[nn.Module] = nn.GELU,
iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
) -> None:
"""
Predicts masks given an image and prompt embeddings, using a
tranformer architecture.
Arguments:
transformer_dim (int): the channel dimension of the transformer
transformer (nn.Module): the transformer used to predict masks
num_multimask_outputs (int): the number of masks to predict
when disambiguating masks
activation (nn.Module): the type of activation to use when
upscaling masks
iou_head_depth (int): the depth of the MLP used to predict
mask quality
iou_head_hidden_dim (int): the hidden dimension of the MLP
used to predict mask quality
"""
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_token = nn.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
self.output_upscaling = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
activation(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
activation(),
)
self.output_hypernetworks_mlps = nn.ModuleList(
[
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
for i in range(self.num_mask_tokens)
]
)
self.iou_prediction_head = MLP(
transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
)
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Arguments:
image_embeddings (torch.Tensor): the embeddings from the image encoder
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
multimask_output (bool): Whether to return multiple masks or a single
mask.
Returns:
torch.Tensor: batched predicted masks
torch.Tensor: batched predictions of mask quality
"""
masks, iou_pred = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
)
# Select the correct mask or masks for outptu
if multimask_output:
mask_slice = slice(1, None)
else:
mask_slice = slice(0, 1)
masks = masks[:, mask_slice, :, :]
iou_pred = iou_pred[:, mask_slice]
# Prepare output
return masks, iou_pred
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Predicts masks. See 'forward' for more details."""
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
src = src + dense_prompt_embeddings
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
upscaled_embedding = self.output_upscaling(src)
hyper_in_list: List[torch.Tensor] = []
for i in range(self.num_mask_tokens):
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding.shape
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred
# Lightly adapted from
# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
class MLP(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
sigmoid_output: bool = False,
) -> None:
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
self.sigmoid_output = sigmoid_output
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
if self.sigmoid_output:
x = F.sigmoid(x)
return x
| 6,614 | 36.372881 | 123 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/image_encoder.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(out_chans),
nn.Conv2d(
out_chans,
out_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(out_chans),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
if self.pos_embed is not None:
x = x + self.pos_embed
for blk in self.blocks:
x = blk(x)
x = self.neck(x.permute(0, 3, 1, 2))
return x
class Block(nn.Module):
"""Transformer blocks with support of window attention and residual propagation blocks"""
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then
use global attention.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
input_size=input_size if window_size == 0 else (window_size, window_size),
)
self.norm2 = norm_layer(dim)
self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
self.window_size = window_size
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.attn(x)
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
x = shortcut + x
x = x + self.mlp(self.norm2(x))
return x
class Attention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
qkv_bias (bool: If True, add a learnable bias to query, key, value.
rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
assert (
input_size is not None
), "Input size must be provided if using relative positional encoding."
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.use_rel_pos:
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
attn = attn.softmax(dim=-1)
x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
x = self.proj(x)
return x
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
if pad_h > 0 or pad_w > 0:
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(
windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
) -> torch.Tensor:
"""
Window unpartition into original sequences and removing padding.
Args:
x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
window_size (int): window size.
pad_hw (Tuple): padded height and width (Hp, Wp).
hw (Tuple): original height and width (H, W) before padding.
Returns:
x: unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
if Hp > H or Wp > W:
x = x[:, :H, :W, :].contiguous()
return x
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def add_decomposed_rel_pos(
attn: torch.Tensor,
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: Tuple[int, int],
k_size: Tuple[int, int],
) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
Args:
attn (Tensor): attention map.
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
Returns:
attn (Tensor): attention map with added relative positional embeddings.
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
attn = (
attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
).view(B, q_h * q_w, k_h * k_w)
return attn
class PatchEmbed(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(
self,
kernel_size: Tuple[int, int] = (16, 16),
stride: Tuple[int, int] = (16, 16),
padding: Tuple[int, int] = (0, 0),
in_chans: int = 3,
embed_dim: int = 768,
) -> None:
"""
Args:
kernel_size (Tuple): kernel size of the projection layer.
stride (Tuple): stride of the projection layer.
padding (Tuple): padding size of the projection layer.
in_chans (int): Number of input image channels.
embed_dim (int): embed_dim (int): Patch embedding dimension.
"""
super().__init__()
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x)
# B C H W -> B H W C
x = x.permute(0, 2, 3, 1)
return x
| 14,407 | 35.383838 | 202 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/prompt_encoder.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch import nn
from typing import Any, Optional, Tuple, Type
from .common import LayerNorm2d
class PromptEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
image_embedding_size: Tuple[int, int],
input_image_size: Tuple[int, int],
mask_in_chans: int,
activation: Type[nn.Module] = nn.GELU,
) -> None:
"""
Encodes prompts for input to SAM's mask decoder.
Arguments:
embed_dim (int): The prompts' embedding dimension
image_embedding_size (tuple(int, int)): The spatial size of the
image embedding, as (H, W).
input_image_size (int): The padded size of the image as input
to the image encoder, as (H, W).
mask_in_chans (int): The number of hidden channels used for
encoding input masks.
activation (nn.Module): The activation to use when encoding
input masks.
"""
super().__init__()
self.embed_dim = embed_dim
self.input_image_size = input_image_size
self.image_embedding_size = image_embedding_size
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
self.point_embeddings = nn.ModuleList(point_embeddings)
self.not_a_point_embed = nn.Embedding(1, embed_dim)
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
self.mask_downscaling = nn.Sequential(
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans // 4),
activation(),
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans),
activation(),
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
)
self.no_mask_embed = nn.Embedding(1, embed_dim)
def get_dense_pe(self) -> torch.Tensor:
"""
Returns the positional encoding used to encode point prompts,
applied to a dense set of points the shape of the image encoding.
Returns:
torch.Tensor: Positional encoding with shape
1x(embed_dim)x(embedding_h)x(embedding_w)
"""
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
def _embed_points(
self,
points: torch.Tensor,
labels: torch.Tensor,
pad: bool,
) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
points = torch.cat([points, padding_point], dim=1)
labels = torch.cat([labels, padding_label], dim=1)
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
point_embedding[labels == -1] = 0.0
point_embedding[labels == -1] += self.not_a_point_embed.weight
point_embedding[labels == 0] += self.point_embeddings[0].weight
point_embedding[labels == 1] += self.point_embeddings[1].weight
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.reshape(-1, 2, 2)
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
return corner_embedding
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
"""Embeds mask inputs."""
mask_embedding = self.mask_downscaling(masks)
return mask_embedding
def _get_batch_size(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> int:
"""
Gets the batch size of the output given the batch size of the input prompts.
"""
if points is not None:
return points[0].shape[0]
elif boxes is not None:
return boxes.shape[0]
elif masks is not None:
return masks.shape[0]
else:
return 1
def _get_device(self) -> torch.device:
return self.point_embeddings[0].weight.device
def forward(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense
embeddings.
Arguments:
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
and labels to embed.
boxes (torch.Tensor or none): boxes to embed
masks (torch.Tensor or none): masks to embed
Returns:
torch.Tensor: sparse embeddings for the points and boxes, with shape
BxNx(embed_dim), where N is determined by the number of input points
and boxes.
torch.Tensor: dense embeddings for the masks, in the shape
Bx(embed_dim)x(embed_H)x(embed_W)
"""
bs = self._get_batch_size(points, boxes, masks)
sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
if points is not None:
coords, labels = points
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
if boxes is not None:
box_embeddings = self._embed_boxes(boxes)
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
if masks is not None:
dense_embeddings = self._embed_masks(masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
class PositionEmbeddingRandom(nn.Module):
"""
Positional encoding using random spatial frequencies.
"""
def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
super().__init__()
if scale is None or scale <= 0.0:
scale = 1.0
self.register_buffer(
"positional_encoding_gaussian_matrix",
scale * torch.randn((2, num_pos_feats)),
)
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
"""Positionally encode points that are normalized to [0,1]."""
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coords = 2 * coords - 1
coords = coords @ self.positional_encoding_gaussian_matrix
coords = 2 * np.pi * coords
# outputs d_1 x ... x d_n x C shape
return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
def forward(self, size: Tuple[int, int]) -> torch.Tensor:
"""Generate positional encoding for a grid of the specified size."""
h, w = size
device: Any = self.positional_encoding_gaussian_matrix.device
grid = torch.ones((h, w), device=device, dtype=torch.float32)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / h
x_embed = x_embed / w
pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
return pe.permute(2, 0, 1) # C x H x W
def forward_with_coords(
self, coords_input: torch.Tensor, image_size: Tuple[int, int]
) -> torch.Tensor:
"""Positionally encode points that are not normalized to [0,1]."""
coords = coords_input.clone()
coords[:, :, 0] = coords[:, :, 0] / image_size[1]
coords[:, :, 1] = coords[:, :, 1] / image_size[0]
return self._pe_encoding(coords.to(torch.float)) # B x N x C
| 8,594 | 38.976744 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.