Search is not available for this dataset
text stringlengths 75 104k |
|---|
def fetch_metric(self, metric, start, end, tags={}, aggregator="sum",
downsample=None, ms_resolution=True):
"""Fetch time series data from OpenTSDB
Parameters:
metric:
A string representing a valid OpenTSDB metric.
tags:
A dict mapping tag names to tag values. Tag names and values are
always strings.
{ 'user_id': '44' }
start:
A datetime.datetime-like object representing the start of the
range to query over.
end:
A datetime.datetime-like object representing the end of the
range to query over.
aggregator:
The function for merging multiple time series together. For
example, if the "user_id" tag is not specified, this aggregator
function is used to combine all heart rate time series into one
time series. (Yes, this isn't very useful.)
For queries that return only one time series, this parameter is
not relevant.
Valid values: "sum", "min", "max", "avg", "dev"
See: http://opentsdb.net/docs/build/html/user_guide/query/aggregators.html
downsampling:
A relative time interval to "downsample". This isn't true
downsampling; rather, if you specify a downsampling of "5m"
(five minutes), OpenTSDB will split data into five minute
intervals, and return one data point in the middle of each
interval whose value is the average of all data points within
that interval.
Valid relative time values are strings of the following format:
"<amount><time_unit>"
Valid time units: "ms", "s", "m", "h", "d", "w", "n", "y"
Date and time format: http://opentsdb.net/docs/build/html/user_guide/query/dates.html
ms_resolution:
Whether or not to output data point timestamps in milliseconds
or seconds. If this flag is false and there are multiple
data points within a second, those data points will be down
sampled using the query's aggregation function.
Returns:
A dict mapping timestamps to data points
"""
query = "{aggregator}:{downsample}{metric}{{{tags}}}".format(
aggregator=aggregator,
downsample=downsample + "-avg:" if downsample else "",
metric=metric,
tags=','.join("%s=%s" % (k, v) for k, v in tags.items())
)
params = {
'ms': ms_resolution,
'start': '{0:.3f}'.format(start.timestamp()),
'end': '{0:.3f}'.format(end.timestamp()),
'm': query
}
response = self.__request("/query", params)
if response.status_code == 200:
try:
return response.json()[0]['dps']
except IndexError:
# empty data set
return {}
raise QueryError(response.json()) |
def fetch_sorted_metric(self, *args, **kwargs):
"""Fetch and sort time series data from OpenTSDB
Takes the same parameters as `fetch_metric`, but returns a list of
(timestamp, value) tuples sorted by timestamp.
"""
return sorted(self.fetch_metric(*args, **kwargs).items(),
key=lambda x: float(x[0])) |
def pfreduce(func, iterable, initial=None):
"""A pointfree reduce / left fold function: Applies a function of two
arguments cumulatively to the items supplied by the given iterable, so
as to reduce the iterable to a single value. If an initial value is
supplied, it is placed before the items from the iterable in the
calculation, and serves as the default when the iterable is empty.
:param func: A function of two arguments
:param iterable: An iterable yielding input for the function
:param initial: An optional initial input for the function
:rtype: Single value
Example::
>>> from operator import add
>>> sum_of_squares = pfreduce(add, initial=0) * pfmap(lambda n: n**2)
>>> sum_of_squares([3, 4, 5, 6])
86
"""
iterator = iter(iterable)
try:
first_item = next(iterator)
if initial:
value = func(initial, first_item)
else:
value = first_item
except StopIteration:
return initial
for item in iterator:
value = func(value, item)
return value |
def pfcollect(iterable, n=None):
"""Collects and returns a list of values from the given iterable. If
the n parameter is not specified, collects all values from the
iterable.
:param iterable: An iterable yielding values for the list
:param n: An optional maximum number of items to collect
:rtype: List of values from the iterable
Example::
>>> @pointfree
... def fibonaccis():
... a, b = 0, 1
... while True:
... a, b = b, a+b
... yield a
>>> (pfcollect(n=10) * fibonaccis)()
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
"""
if n:
return list(itertools.islice(iterable, n))
else:
return list(iterable) |
def pfprint(item, end='\n', file=None):
"""Prints an item.
:param item: The item to print
:param end: String to append to the end of printed output
:param file: File to which output is printed
:rtype: None
Example::
>>> from operator import add
>>> fn = pfreduce(add, initial=0) >> pfprint
>>> fn([1, 2, 3, 4])
10
"""
# Can't just make sys.stdout the file argument's default value, because
# then we would be capturing the stdout file descriptor, and then
# doctest -- which works by redefining sys.stdout -- would fail:
if file is None:
file = sys.stdout
print(item, end=end, file=file) |
def pfprint_all(iterable, end='\n', file=None):
"""Prints each item from an iterable.
:param iterable: An iterable yielding values to print
:param end: String to append to the end of printed output
:param file: File to which output is printed
:rtype: None
Example::
>>> @pointfree
... def prefix_all(prefix, iterable):
... for item in iterable:
... yield "%s%s" % (prefix, item)
>>> fn = prefix_all("An item: ") >> pfprint_all
>>> fn(["foo", "bar", "baz"])
An item: foo
An item: bar
An item: baz
"""
for item in iterable:
pfprint(item, end=end, file=file) |
def __sig_from_func(self, func):
"""Extract function signature, default arguments, keyword-only
arguments, and whether or not variable positional or keyword
arguments are allowed. This also supports calling unbound instance
methods by passing an object instance as the first argument;
however, unbound classmethod and staticmethod objects are not
callable, so we do not attempt to support them here."""
if isinstance(func, types.MethodType):
# A bound instance or class method.
argspec = getfullargspec(func.__func__)
self.pargl = argspec[0][1:]
else:
# A regular function, an unbound instance method, or a
# bound static method.
argspec = getfullargspec(func)
self.pargl = argspec[0][:]
if argspec[3] is not None:
def_offset = len(self.pargl) - len(argspec[3])
self.def_argv = dict((self.pargl[def_offset+i],argspec[3][i]) \
for i in range(len(argspec[3])))
else:
self.def_argv = {}
self.var_pargs = argspec[1] is not None
self.var_kargs = argspec[2] is not None
self.kargl = argspec[4]
# We need keyword-only arguments' default values too.
if argspec[5] is not None:
self.def_argv.update(argspec[5]) |
def __sig_from_partial(self, inst):
"""Extract function signature from an existing partial instance."""
self.pargl = list(inst.pargl)
self.kargl = list(inst.kargl)
self.def_argv = inst.def_argv.copy()
self.var_pargs = inst.var_pargs
self.var_kargs = inst.var_kargs |
def make_copy(klass, inst, func=None, argv=None, extra_argv=None, copy_sig=True):
"""Makes a new instance of the partial application wrapper based on
an existing instance, optionally overriding the original's wrapped
function and/or saved arguments.
:param inst: The partial instance we're copying
:param func: Override the original's wrapped function
:param argv: Override saved argument values
:param extra_argv: Override saved extra positional arguments
:param copy_sig: Copy original's signature?
:rtype: New partial wrapper instance
"""
dest = klass(func or inst.func)
dest.argv = (argv or inst.argv).copy()
dest.extra_argv = list(extra_argv if extra_argv else inst.extra_argv)
if copy_sig:
dest.__sig_from_partial(inst)
return dest |
def __new_argv(self, *new_pargs, **new_kargs):
"""Calculate new argv and extra_argv values resulting from adding
the specified positional and keyword arguments."""
new_argv = self.argv.copy()
new_extra_argv = list(self.extra_argv)
for v in new_pargs:
arg_name = None
for name in self.pargl:
if not name in new_argv:
arg_name = name
break
if arg_name:
new_argv[arg_name] = v
elif self.var_pargs:
new_extra_argv.append(v)
else:
num_prev_pargs = len([name for name in self.pargl if name in self.argv])
raise TypeError("%s() takes exactly %d positional arguments (%d given)" \
% (self.__name__,
len(self.pargl),
num_prev_pargs + len(new_pargs)))
for k,v in new_kargs.items():
if not (self.var_kargs or (k in self.pargl) or (k in self.kargl)):
raise TypeError("%s() got an unexpected keyword argument '%s'" \
% (self.__name__, k))
new_argv[k] = v
return (new_argv, new_extra_argv) |
def ignore_certain_metainf_files(filename):
"""
We do not support multiple signatures in XPI signing because the client
side code makes some pretty reasonable assumptions about a single signature
on any given JAR. This function returns True if the file name given is one
that we dispose of to prevent multiple signatures.
"""
ignore = ("META-INF/manifest.mf",
"META-INF/*.sf",
"META-INF/*.rsa",
"META-INF/*.dsa",
"META-INF/ids.json")
for glob in ignore:
# Explicitly match against all upper case to prevent the kind of
# runtime errors that lead to https://bugzil.la/1169574
if fnmatch.fnmatchcase(filename.upper(), glob.upper()):
return True
return False |
def file_key(filename):
'''Sort keys for xpi files
The filenames in a manifest are ordered so that files not in a
directory come before files in any directory, ordered
alphabetically but ignoring case, with a few exceptions
(install.rdf, chrome.manifest, icon.png and icon64.png come at the
beginning; licenses come at the end).
This order does not appear to affect anything in any way, but it
looks nicer.
'''
prio = 4
if filename == 'install.rdf':
prio = 1
elif filename in ["chrome.manifest", "icon.png", "icon64.png"]:
prio = 2
elif filename in ["MPL", "GPL", "LGPL", "COPYING",
"LICENSE", "license.txt"]:
prio = 5
return (prio, os.path.split(filename.lower())) |
def vlq2int(data):
"""Read one VLQ-encoded integer value from an input data stream."""
# The VLQ is little-endian.
byte = ord(data.read(1))
value = byte & 0x7F
shift = 1
while byte & 0x80 != 0:
byte = ord(data.read(1))
value = ((byte & 0x7F) << shift * 7) | value
shift += 1
return value |
def read_table(data, fields):
"""Read a table structure.
These are used by Blizzard to collect pieces of data together. Each
value is prefixed by two bytes, first denoting (doubled) index and the
second denoting some sort of key -- so far it has always been '09'. The
actual value follows as a Variable-Length Quantity, also known as uintvar.
The actual value is also doubled.
In some tables the keys might jump from 0A 09 to 04 09 for example.
I have no idea why this happens, as the next logical key is 0C. Perhaps
it's a table in a table? Some sort of headers might exist for these
tables, I'd imagine at least denoting length. Further research required.
"""
def read_field(field_name):
data.read(2)
table[field_name] = vlq2int(data) / 2
# Discard unknown fields.
if field_name == 'unknown':
del table[field_name]
table = {}
for field in fields:
read_field(field)
return table |
def _parse_header(self):
"""Parse the user data header portion of the replay."""
header = OrderedDict()
user_data_header = self.archive.header['user_data_header']['content']
if re.search(r'StarCraft II replay', user_data_header):
user_data_header = StringIO.StringIO(user_data_header)
user_data_header.seek(30) # Just skip the beginning.
header.update(read_table(user_data_header, ['release_flag',
'major_version',
'minor_version',
'maintenance_version',
'build_number',
'unknown',
'unknown',
'duration']))
# Some post processing is required.
header['version'] = '%s.%s.%s.%s' % (header['major_version'],
header['minor_version'],
header['maintenance_version'],
header['build_number'])
if not header['release_flag']:
header['version'] += ' (dev)'
# Duration is actually stored as 1/16th of a seconds. Go figure.
header['duration'] /= 16
else:
raise ValueError("The given file is not a StarCraft II replay.")
return header |
def get_duration(self, seconds):
"""Transform duration into a human-readable form."""
duration = ""
minutes, seconds = divmod(seconds, 60)
if minutes >= 60:
hours, minutes = divmod(minutes, 60)
duration = "%sh " % hours
duration += "%sm %ss" % (minutes, seconds)
return duration |
def print_details(self):
"""Print a summary of the game details."""
print 'Map ', self.map
print 'Duration ', self.duration
print 'Version ', self.version
print 'Team Player Race Color'
print '-----------------------------------'
for player in self.players:
print '{team:<5} {name:12} {race:10} {color}'.format(**player) |
def data(self):
"""
This function gets back data that the user typed.
"""
self.batch_name_value = self.ui.batch_name_value.text()
self.saa_values = self.ui.saa_values.text()
self.sza_values = self.ui.sza_values.text()
self.p_values = self.ui.p_values.text()
self.x_value = self.ui.x_value.text()
self.y_value = self.ui.y_value.text()
self.g_value = self.ui.g_value.text()
self.s_value = self.ui.s_value.text()
self.z_value = self.ui.z_value.text()
self.wavelength_values = self.ui.wavelength_values.text()
self.verbose_value = self.ui.verbose_value.text()
self.phytoplankton_path = self.ui.phyto_path.text()
self.bottom_path = self.ui.bottom_path.text()
self.executive_path = self.ui.exec_path.text()
self.nb_cpu = self.ui.nb_cpu.currentText()
self.report_parameter_value = str(self.ui.report_parameter_value.text()) |
def search_file_result(self):
"""
This function once the file found, display data's file and the graphic associated.
"""
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.result_file = self.file_dialog.getOpenFileName(caption=str("Open Report File"), directory="./outputs")
if not self.result_file == '':
self.ui.show_all_curves.setDisabled(False)
self.ui.show_grid.setDisabled(False)
self.data_processing()
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
self.authorized_display = True |
def check_values(self):
"""
This function checks if there is no problem about values given.
If there is a problem with a or some values, their label's color is changed to red,
and call a function to display an error message.
If there is no problem, their label, if it is necessary, is changed to grey (default color).
"""
error_color = 'color: red'
no_error_color = 'color: 0.75' # light gray
self.error_batch_name = False
self.error_report_parameter = False
self.error_saa_result = False
self.error_sza_result = False
self.error_p_result = False
self.error_wavelength_result = False
self.error_x_result = False
self.error_y_result = False
self.error_g_result = False
self.error_s_result = False
self.error_z_result = False
self.error_verbose_result = False
self.error_phytoplankton_path_result = False
self.error_bottom_path_result = False
self.error_executive_path_result = False
"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
No particular checking for paths!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
check_num = '(.*)' # Regular expression to use
self.prog = re.compile(check_num) # Analysis object creation
batch_name_result = self.prog.search(
self.batch_name_value) # String retrieval thanks to the regular expression
report_parameter_result = self.prog.search(self.report_parameter_value)
try:
if (self.ui.batch_name_value.text().isEmpty()) | (
batch_name_result.group() != self.ui.batch_name_value.text()):
self.ui.batch_name_label.setStyleSheet(error_color)
self.error_batch_name = True
else:
self.ui.batch_name_label.setStyleSheet(no_error_color)
self.error_batch_name = False
except AttributeError:
self.ui.batch_name_label.setStyleSheet(error_color)
self.error_batch_name = True
try:
if (self.ui.report_parameter_value.text().isEmpty()) | (
report_parameter_result.group() != self.ui.report_parameter_value.text()):
self.ui.report_parameter_label.setStyleSheet(error_color)
self.error_report_parameter = True
else:
self.ui.report_parameter_label.setStyleSheet(no_error_color)
self.error_report_parameter = False
except AttributeError:
self.ui.report_parameter_label.setStyleSheet(error_color)
self.error_report_parameter = True
# -----------------------------------------------------------#
# The following checks values separate by comas without space.
# -----------------------------------------------------------#
"""
Problem : The user can write just one letter or starts with a dot or finishes the list with a dot.
"""
# check_num_1 = '(^([0-9]+[.]?[0-9]*[,]?){0,}[^,])|(^([0-9]*[,]){1,}[^,])' # Regular expression to use
# self.prog_1 = re.compile(check_num_1) # Analysis object creation
# self.wavelength_values = str(self.wavelength_values).translate(None, ' ').strip(' ')
# # print(self.wavelength_values)
# p_result = self.prog_1.search(self.p_values) # String retrieval thanks to the regular expression
# saa_result = self.prog_1.search(self.saa_values)
# sza_result = self.prog_1.search(self.sza_values)
# wavelength_result = self.prog_1.search(self.wavelength_values)
# # print(wavelength_result.group())
#
# try:
#
# if saa_result.group() != self.ui.saa_values.text():
# self.ui.saa_label.setStyleSheet(error_color)
# self.error_sza_result = True
# else:
# self.ui.saa_label.setStyleSheet(no_error_color)
# self.error_sza_result = False
# except AttributeError:
# self.ui.saa_label.setStyleSheet(error_color)
# self.error_sza_result = True
# try:
# if sza_result.group() != self.ui.sza_values.text():
# self.ui.sza_label.setStyleSheet(error_color)
# self.error_saa_result = True
# else:
# self.ui.sza_label.setStyleSheet(no_error_color)
# self.error_saa_result = False
# except AttributeError:
# self.ui.sza_label.setStyleSheet(error_color)
# self.error_saa_result = True
# try:
# if p_result.group() != self.ui.p_values.text():
# self.ui.p_label.setStyleSheet(error_color)
# self.error_p_result = True
# else:
# self.ui.p_label.setStyleSheet(no_error_color)
# self.error_p_result = False
# except AttributeError:
# self.ui.p_label.setStyleSheet(error_color)
# self.error_p_result = True
# try:
# if wavelength_result.group() != str(self.ui.wavelength_values.text()).translate(None, ' ').strip(' '):
# self.ui.waveL_label.setStyleSheet(error_color)
# self.error_wavelength_result = True
# else:
# self.ui.waveL_label.setStyleSheet(no_error_color)
# self.error_wavelength_result = False
# except AttributeError:
# self.ui.waveL_label.setStyleSheet(error_color)
# self.error_wavelength_result = True
# ---------------------------------------------------#
# The following checks values containing only numbers.
# ---------------------------------------------------#
check_num_2 = '(^([0-9]+[.]?[0-9]*[,]?){0,}[^,])|(^([0-9]+[,]){1,}[^,])'
self.prog_2 = re.compile(check_num_2)
x_result = self.prog_2.search(self.x_value)
y_result = self.prog_2.search(self.y_value)
g_result = self.prog_2.search(self.g_value)
s_result = self.prog_2.search(self.s_value)
z_result = self.prog_2.search(self.z_value)
try:
if x_result.group() != self.ui.x_value.text():
self.ui.particles_label.setStyleSheet(error_color)
self.ui.x_label.setStyleSheet(error_color)
self.error_x_result = True
else:
self.ui.particles_label.setStyleSheet(no_error_color)
self.ui.x_label.setStyleSheet(no_error_color)
self.error_x_result = False
except AttributeError:
self.ui.particles_label.setStyleSheet(no_error_color)
self.ui.x_label.setStyleSheet(error_color)
self.error_x_result = True
try:
if y_result.group() != self.ui.y_value.text():
self.ui.particles_label.setStyleSheet(error_color)
self.ui.y_label.setStyleSheet(error_color)
self.error_y_result = True
else:
self.ui.particles_label.setStyleSheet(no_error_color)
self.ui.y_label.setStyleSheet(no_error_color)
self.error_y_result = False
except AttributeError:
self.ui.particles_label.setStyleSheet(error_color)
self.ui.y_label.setStyleSheet(error_color)
self.error_y_result = True
try:
if g_result.group() != self.ui.g_value.text():
self.ui.organic_label.setStyleSheet(error_color)
self.ui.g_label.setStyleSheet(error_color)
self.error_g_result = True
else:
self.ui.organic_label.setStyleSheet(no_error_color)
self.ui.g_label.setStyleSheet(no_error_color)
self.error_g_result = False
except AttributeError:
self.ui.organic_label.setStyleSheet(error_color)
self.ui.g_label.setStyleSheet(error_color)
self.error_g_result = True
try:
if s_result.group() != self.ui.s_value.text():
self.ui.organic_label.setStyleSheet(error_color)
self.ui.s_label.setStyleSheet(error_color)
self.error_s_result = True
else:
self.ui.organic_label.setStyleSheet(no_error_color)
self.ui.s_label.setStyleSheet(no_error_color)
self.error_x_result = False
except AttributeError:
self.ui.organic_label.setStyleSheet(error_color)
self.ui.s_label.setStyleSheet(error_color)
self.error_x_result = True
try:
if z_result.group() != self.ui.z_value.text():
self.ui.z_label.setStyleSheet(error_color)
self.error_z_result = True
else:
self.ui.z_label.setStyleSheet(no_error_color)
self.error_z_result = False
except AttributeError:
self.ui.z_label.setStyleSheet(error_color)
self.error_z_result = True
check_num_3 = '[1-6]+'
self.prog_3 = re.compile(check_num_3)
verbose_result = self.prog_3.search(self.verbose_value)
try:
if verbose_result.group() != self.ui.verbose_value.text():
self.ui.verbose_label.setStyleSheet(error_color)
self.error_verbose_result = True
else:
self.ui.verbose_label.setStyleSheet(no_error_color)
self.error_verbose_result = False
except AttributeError:
self.ui.verbose_label.setStyleSheet(error_color)
self.error_verbose_result = True
# ------------------------------------------------#
# The following checks values containing only path.
# ------------------------------------------------#
"""
#!!!!!!!!!!!!!!!!!!!!!!!!!!
#Syntax test doesn't work ! -> #check_num4 = '[/]([A-Za-z]+[/]?)+[A-Za-z]$'
#!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
check_num_4 = '(.*)' # Take all strings possible.
self.prog_4 = re.compile(check_num_4)
phytoplankton_path_result = self.prog_4.search(self.phytoplankton_path)
bottom_path_result = self.prog_4.search(self.bottom_path)
executive_path_result = self.prog_4.search(self.executive_path)
try:
if phytoplankton_path_result.group() != self.ui.phyto_path.text():
self.ui.phyto_label.setStyleSheet(error_color)
self.error_phytoplankton_path_result = True
else:
self.ui.phyto_label.setStyleSheet(no_error_color)
self.error_phytoplankton_path_result = False
except AttributeError:
self.ui.phyto_label.setStyleSheet(error_color)
self.error_phytoplankton_path_result = True
try:
if bottom_path_result.group() != self.ui.bottom_path.text():
self.ui.bottom_label.setStyleSheet(error_color)
self.error_bottom_path_result = True
else:
self.ui.bottom_label.setStyleSheet(no_error_color)
self.error_bottom_path_result = False
except AttributeError:
self.ui.bottom_label.setStyleSheet(error_color)
self.error_bottom_path_result = True
try:
if executive_path_result.group() != self.ui.exec_path.text():
self.ui.execPath_label.setStyleSheet(error_color)
self.error_executive_path_result = True
else:
self.ui.execPath_label.setStyleSheet(no_error_color)
self.error_executive_path_result = False
except AttributeError:
self.ui.execPath_label.setStyleSheet(error_color)
self.error_executive_path_result = True
if (self.error_batch_name == True) | (self.error_report_parameter == True) | (
self.error_saa_result == True) | (self.error_sza_result == True) | (
self.error_p_result == True) | (self.error_wavelength_result == True) | (
self.error_x_result == True) | (self.error_y_result == True) | (
self.error_g_result == True) | (self.error_s_result == True) | (
self.error_z_result == True) | (self.error_verbose_result == True) | (
self.error_phytoplankton_path_result == True) | (self.error_bottom_path_result == True) | (
self.error_executive_path_result == True):
self.without_error = False
else:
self.without_error = True |
def write_to_file(self):
"""
This function calls "gui_batch.py" with inputs values to write the batch file.
"""
bt = BatchFile(self.batch_name_value, self.p_values, self.x_value, self.y_value, self.g_value, self.s_value,
self.z_value, self.wavelength_values, self.verbose_value, self.phytoplankton_path,
self.bottom_path, self.nb_cpu, self.executive_path, self.saa_values,
self.sza_values, self.report_parameter_value)
# bt.write_batch_to_file(str(self.batch_name_value + "_batch.txt"))
bt.write_batch_to_file(str(self.batch_name_value + "_batch.txt")) |
def data_processing(self):
"""
This function separates data, from the file to display curves, and will put them in the good arrays.
"""
the_file_name = str(self.result_file)
the_file = open(the_file_name, 'r')
lines = the_file.readlines()
# We put all lines in an array and we put each cell of the line in a column.
lines_array = []
for line in lines:
line = line.split(',') # Each time there is a tabulation, there is a new cell
lines_array.append(line)
labels_line = lines_array[0]
cell_labels_line = 0 # Iterator on each cell of the line labels_line.
flag = True # Become FALSE when we find the word which separate data from wavelength values.
try:
while flag: # While it is TRUE, so if the word doesn't match, it's an infinite loop,
if "wave length (nm)" in labels_line[cell_labels_line]:
index = labels_line.index(labels_line[cell_labels_line]) # Find the index of the string searched.
flag = False
else:
cell_labels_line += 1
except IndexError: # In case of an infinite loop.
raise sys.exit("Warning : There is no value named 'wavelength' in the file used to plot curves. "
"So, I can't separate data to plot curves and data about tests linking with these curves.")
self.information = [] # This array will contain the data displayed under the curves.
data_wavelength = [] # This array will contain the data to plot curves.
self.num_line = 0 # Iterator on each line of lines_array,
# The array containing data about information and wavelength.
for line in lines_array:
cell_line = 0 # Iterator on each cell of the line.
self.information.append([])
data_wavelength.append([])
while cell_line < len(line):
if cell_line < index:
self.information[self.num_line].append(line[cell_line])
elif cell_line > index:
data_wavelength[self.num_line].append(line[cell_line])
cell_line += 1
self.num_line += 1
# We transform wavelengths from strings to floats.
line_wavelength = 0 # Iterator on each line of data_wavelength
for row_data_wavelength in data_wavelength:
row_data_wavelength = [float(item.strip('\n').strip('\"')) for item in row_data_wavelength]
data_wavelength[line_wavelength] = row_data_wavelength
line_wavelength += 1
self.wavelength = data_wavelength[0] # The first line contains wavelength
self.data_wanted = data_wavelength[1:] # The others contain data useful to plot curves.
the_file.close() |
def display_the_graphic(self, num_line, wavelength, data_wanted, information):
"""
This function calls the class "MplCanvas" of "gui_matplotlibwidgetFile.py" to plot results.
Inputs : num_line : The number of cases.
wavelength : The wavelengths.
data_wanted : The data for wavelengths.
information : The array which contains the information, of all curves to display.
"""
self.nb_case = num_line - 1 # This is the number of line, the number of test.
self.graphic_slider(self.nb_case)
self.mpl_canvas.update_fields(wavelength, data_wanted, self.slider_value)
# Following if the checkbox is checked "All curves" or not.
if self.ui.show_grid.checkState() == 2:
grid = True
else:
grid = False
if self.ui.show_all_curves.checkState() == 2:
self.flag_curves = True
self.mpl_canvas.display_graphic(self.flag_curves, self.ui, grid)
self.print_graphic_information(self.slider_value, information)
else:
self.flag_curves = False
self.mpl_canvas.display_graphic(self.flag_curves, self.ui, grid)
self.print_graphic_information(self.slider_value, information) |
def display_the_graphic_connection(self):
"""
The following permits to attribute the function "display_the_graphic" to the slider.
Because, to make a connection, we can not have parameters for the function, but "display_the_graphic" has some.
"""
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information) |
def print_graphic_information(self, num_curve, information):
"""
This function displays information about curves.
Inputs ; num_curve ; The index of the curve's line that we have to display.
information ; The array which contains the information, of all curves to display.
"""
"""In this function, the best would to create labels each time we need to create one,
following the number of labels in label_information.
#self.essai = QtGui.QLabel(self.ui.tab)
#self.essai.setGeometry(PyQt4.QtCore.QRect(870,650,111,16))
#self.essai.setText("ESSAI")
"""
label_information = information[0]
data_information = information[1:]
count_nb_label = 0 # Iterator on all labels of label_information
nb_label = len(label_information)
while count_nb_label <= nb_label:
self.ui.column1_label.setText(label_information[0].strip('\"'))
self.ui.column2_label.setText(label_information[1].strip('\"'))
self.ui.column3_label.setText(label_information[2].strip('\"'))
self.ui.column4_label.setText(label_information[3].strip('\"'))
self.ui.column5_label.setText(label_information[4].strip('\"'))
self.ui.column6_label.setText(label_information[5].strip('\"'))
self.ui.column7_label.setText(label_information[6].strip('\"'))
self.ui.column8_label.setText(label_information[7].strip('\"'))
count_nb_label += 1
line_of_data = 0 # Iterator on each line of data_information.
while line_of_data < len(data_information):
if line_of_data == num_curve:
self.ui.column1_result.setText(data_information[line_of_data][0])
self.ui.column2_result.setText(data_information[line_of_data][1])
self.ui.column3_result.setText(data_information[line_of_data][2])
self.ui.column4_result.setText(data_information[line_of_data][3])
self.ui.column5_result.setText(data_information[line_of_data][4])
self.ui.column6_result.setText(data_information[line_of_data][5])
self.ui.column7_result.setText(data_information[line_of_data][6])
self.ui.column8_result.setText(data_information[line_of_data][7])
line_of_data += 1 |
def graphic_slider(self, nb_case):
"""
This function scales the slider for curves displayed.
Input : The number of cases (curves).
Return ; The slider value.
"""
"""
The slider range is created each time we call this function. Search to set its range just when it is necessary.
"""
self.ui.sens.setDisabled(False)
self.ui.sens.setRange(0, int(nb_case - 1))
self.slider_value = self.ui.sens.value()
return self.slider_value |
def display_error_message(self):
"""
This function displays an error message when a wrong value is typed.
"""
self.ui.error_label.setScaledContents(True) # Warning image shown.
self.ui.error_text_label.show() # Warning message shown.
self.ui.error_text_label.setStyleSheet('color: red') |
def hide_error_message(self):
"""
This function hides the error message when all values are correct.
"""
self.ui.error_label.setScaledContents(False) # Warning image hiden.
self.ui.error_text_label.hide() |
def run(self):
"""
This function executes planarRad using the batch file.
"""
"""
Error when planarRad start : /bin/sh: 1: ../planarrad.py: not found
"""
print('Executing planarrad')
# If we are not in the reverse_mode :
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.data()
self.check_values()
if self.without_error == False:
self.display_error_message()
elif self.without_error == True:
self.is_running = True
self.hide_error_message()
self.write_to_file()
os.chdir('./')
self.progress_bar()
this_dir = os.path.dirname(os.path.realpath(__file__)).rstrip('gui/')
batch_file = os.path.join(this_dir, "inputs/batch_files/" + str(self.batch_name_value) + "_batch.txt")
print(batch_file)
self.p = subprocess.Popen(
["./planarrad.py -i " + batch_file],
shell=True)
if self.ui.progressBar.value() == 100:
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information) |
def cancel_planarrad(self):
"""
This function cancels PlanarRad.
"""
"""
This function needs to be tested. We don't know if she works.
"""
if (self.is_running == True) & (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE):
cancel = QtGui.QMessageBox.question(self.ui.cancel, 'Cancel PlanarRad', "Are you sure to cancel ?",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if cancel == QtGui.QMessageBox.Yes:
self.is_running = False
os.kill(self.p.pid, signal.SIGTERM)
print("Necessary to check if cancel_planarrad works well !")
self.ui.progressBar.reset()
else:
pass |
def quit(self):
"""
This function quits PlanarRad, checking if PlanarRad is running before.
"""
"""
Nothing programmed for displaying a message box when the user clicks on the window cross in order to quit.
"""
if self.is_running == True:
warning_planarrad_running = QtGui.QMessageBox.warning(self.ui.quit, 'Warning !',
"PlanarRad is running. Stop it before quit !",
QtGui.QMessageBox.Ok)
else:
quit = QtGui.QMessageBox.question(self.ui.quit, 'Quit PlanarRad', "Are you sure to quit ?",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if quit == QtGui.QMessageBox.Yes:
QtGui.qApp.quit() |
def save_figure(self):
"""
This function programs the button to save the figure displayed
and save it in a png file in the current repository.
"""
"""
Increment the name of the figure in order to not erase the previous figure if the user use always this method.
The png file is put in the "Artists_saved" file localized in the "planarradpy" folder.
"""
default_name = 'Default_figure.png'
self.ui.graphic_widget.canvas.print_figure(default_name)
src = './' + default_name
dst = './Artists_saved'
os.system("mv" + " " + src + " " + dst) |
def save_figure_as(self):
"""
This function programs the button to save the figure displayed
and save it in a png file where you want / with the name you want thanks to a file dialog.
"""
self.file_name = QtGui.QFileDialog.getSaveFileName()
self.file_name = self.file_name + ".png"
self.ui.graphic_widget.canvas.print_figure(str(self.file_name)) |
def open_log_file(self):
"""
The following opens the log file of PlanarRad.
"""
"""
TO DO.
"""
# webbrowser.open('https://marrabld.github.io/planarradpy/')
f = open(os.path.expanduser('~/.planarradpy/log/libplanarradpy.log'))
# self.uiLog.textEdit.setText(str(f.readlines()))
self.uiLog.textEdit.setPlainText(str(f.read()))
self.log_window.show() |
def open_documentation(self):
"""
The following opens the documentation file.
"""
"""
TO DO.
"""
# webbrowser.open('https://marrabld.github.io/planarradpy/')
window = Window()
html = QtCore.QUrl.fromLocalFile(os.path.join(os.getcwd(), './docs/_build/html/index.html')) #open('./docs/_build/html/index.html').read()
#window.show()
window.view.load(html)
window.show()
window.exec_() |
def prerequisite_actions(self):
"""
This function does all required actions at the beginning when we run the GUI.
"""
self.hide_error_message()
self.ui.show_all_curves.setDisabled(True)
self.ui.sens.setDisabled(True)
self.ui.show_grid.setDisabled(True)
pathname = os.path.dirname(sys.argv[0])
path = os.path.abspath(pathname)
# self.phytoplankton_path = self.ui.phyto_path.setText(path.replace('gui', 'inputs/iop_files'))
# self.bottom_path = self.ui.bottom_path.setText(path.replace('gui', 'inputs/bottom_files'))
# self.executive_path = self.ui.exec_path.setText("Decide where will be 'jude2_install/bin'")
self.verbose_value = self.ui.verbose_value.setText("6")
self.report_parameter_value = self.ui.report_parameter_value.setText("Rrs")
self.ui.progressBar.reset() |
def click(self, event):
"""
This function intercepts the mouse's right click and its position.
"""
if event.button == 3:
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.pos = QtGui.QCursor().pos()
self.graphic_context_menu(self.pos) |
def mouse_move(self, event):
"""
The following gets back coordinates of the mouse on the canvas.
"""
if (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE):
self.posX = event.xdata
self.posY = event.ydata
self.graphic_target(self.posX, self.posY) |
def graphic_context_menu(self, pos):
"""
This function will open a context menu on the graphic to save it.
Inputs : pos : The position of the mouse cursor.
"""
menu = QtGui.QMenu()
self.actionSave_bis = menu.addAction("Save Figure")
self.actionSave_as_bis = menu.addAction("Save Figure As ...")
action = menu.exec_(self.table_widget.mapFromGlobal(pos))
if action == self.actionSave_bis:
self.save_figure()
elif action == self.actionSave_as_bis:
self.save_figure_as() |
def graphic_target(self, x, y):
"""
The following update labels about mouse coordinates.
"""
if self.authorized_display == True:
try:
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
self.ui.mouse_coordinate.setText("(%0.3f, %0.3f)" % (x, y))
except:
pass |
def genesis_signing_lockset(genesis, privkey):
"""
in order to avoid a complicated bootstrapping, we define
the genesis_signing_lockset as a lockset with one vote by any validator.
"""
v = VoteBlock(0, 0, genesis.hash)
v.sign(privkey)
ls = LockSet(num_eligible_votes=1)
ls.add(v)
assert ls.has_quorum
return ls |
def sign(self, privkey):
"""Sign this with a private key"""
if self.v:
raise InvalidSignature("already signed")
if privkey in (0, '', '\x00' * 32):
raise InvalidSignature("Zero privkey cannot sign")
rawhash = sha3(rlp.encode(self, self.__class__.exclude(['v', 'r', 's'])))
if len(privkey) == 64:
privkey = encode_privkey(privkey, 'bin')
pk = PrivateKey(privkey, raw=True)
signature = pk.ecdsa_recoverable_serialize(pk.ecdsa_sign_recoverable(rawhash, raw=True))
signature = signature[0] + chr(signature[1])
self.v = ord(signature[64]) + 27
self.r = big_endian_to_int(signature[0:32])
self.s = big_endian_to_int(signature[32:64])
self._sender = None
return self |
def hash(self):
"signatures are non deterministic"
if self.sender is None:
raise MissingSignatureError()
class HashSerializable(rlp.Serializable):
fields = [(field, sedes) for field, sedes in self.fields
if field not in ('v', 'r', 's')] + [('_sender', binary)]
_sedes = None
return sha3(rlp.encode(self, HashSerializable)) |
def hr(self):
"""compute (height,round)
We might have multiple rounds before we see consensus for a certain height.
If everything is good, round should always be 0.
"""
assert len(self), 'no votes, can not determine height'
h = set([(v.height, v.round) for v in self.votes])
assert len(h) == 1, len(h)
return h.pop() |
def has_quorum(self):
"""
we've seen +2/3 of all eligible votes voting for one block.
there is a quorum.
"""
assert self.is_valid
bhs = self.blockhashes()
if bhs and bhs[0][1] > 2 / 3. * self.num_eligible_votes:
return bhs[0][0] |
def has_noquorum(self):
"""
less than 1/3 of the known votes are on the same block
"""
assert self.is_valid
bhs = self.blockhashes()
if not bhs or bhs[0][1] <= 1 / 3. * self.num_eligible_votes:
assert not self.has_quorum_possible
return True |
def check(self):
"either invalid or one of quorum, noquorum, quorumpossible"
if not self.is_valid:
return True
test = (self.has_quorum, self.has_quorum_possible, self.has_noquorum)
assert 1 == len([x for x in test if x is not None])
return True |
def to_block(self, env, parent=None):
"""Convert the transient block to a :class:`ethereum.blocks.Block`"""
return Block(self.header, self.transaction_list, self.uncles, env=env, parent=parent) |
def validate_votes(self, validators_H, validators_prevH):
"set of validators may change between heights"
assert self.sender
def check(lockset, validators):
if not lockset.num_eligible_votes == len(validators):
raise InvalidProposalError('lockset num_eligible_votes mismatch')
for v in lockset:
if v.sender not in validators:
raise InvalidProposalError('invalid signer')
if self.round_lockset:
check(self.round_lockset, validators_H)
check(self.signing_lockset, validators_prevH)
return True |
def validate_votes(self, validators_H):
"set of validators may change between heights"
assert self.sender
if not self.round_lockset.num_eligible_votes == len(validators_H):
raise InvalidProposalError('round_lockset num_eligible_votes mismatch')
for v in self.round_lockset:
if v.sender not in validators_H:
raise InvalidProposalError('invalid signer') |
def transfer(ctx, _to='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success)
"""
log.DEV('In Fungible.transfer')
if ctx.accounts[ctx.msg_sender] >= _value:
ctx.accounts[ctx.msg_sender] -= _value
ctx.accounts[_to] += _value
ctx.Transfer(ctx.msg_sender, _to, _value)
return OK
else:
return INSUFFICIENTFUNDS |
def transferFrom(ctx, _from='address', _to='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function transferFrom(address _from, address _to, uint256 _value) returns (bool success)
"""
auth = ctx.allowances[_from][ctx.msg_sender]
if ctx.accounts[_from] >= _value and auth >= _value:
ctx.allowances[_from][ctx.msg_sender] -= _value
ctx.accounts[_from] -= _value
ctx.accounts[_to] += _value
ctx.Transfer(_from, _to, _value)
return OK
else:
return INSUFFICIENTFUNDS |
def approve(ctx, _spender='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function approve(address _spender, uint256 _value) returns (bool success)
"""
ctx.allowances[ctx.msg_sender][_spender] += _value
ctx.Approval(ctx.msg_sender, _spender, _value)
return OK |
def issue_funds(ctx, amount='uint256', rtgs_hash='bytes32', returns=STATUS):
"In the IOU fungible the supply is set by Issuer, who issue funds."
# allocate new issue as result of a new cash entry
ctx.accounts[ctx.msg_sender] += amount
ctx.issued_amounts[ctx.msg_sender] += amount
# Store hash(rtgs)
ctx.Issuance(ctx.msg_sender, rtgs_hash, amount)
return OK |
def last_lock(self):
"highest lock on height"
rs = list(self.rounds)
assert len(rs) < 2 or rs[0] > rs[1] # FIXME REMOVE
for r in self.rounds: # is sorted highest to lowest
if self.rounds[r].lock is not None:
return self.rounds[r].lock |
def last_voted_blockproposal(self):
"the last block proposal node voted on"
for r in self.rounds:
if isinstance(self.rounds[r].proposal, BlockProposal):
assert isinstance(self.rounds[r].lock, Vote)
if self.rounds[r].proposal.blockhash == self.rounds[r].lock.blockhash:
return self.rounds[r].proposal |
def last_valid_lockset(self):
"highest valid lockset on height"
for r in self.rounds:
ls = self.rounds[r].lockset
if ls.is_valid:
return ls
return None |
def get_timeout(self):
"setup a timeout for waiting for a proposal"
if self.timeout_time is not None or self.proposal:
return
now = self.cm.chainservice.now
round_timeout = ConsensusManager.round_timeout
round_timeout_factor = ConsensusManager.round_timeout_factor
delay = round_timeout * round_timeout_factor ** self.round
self.timeout_time = now + delay
return delay |
def request(self):
"""
sync the missing blocks between:
head
highest height with signing lockset
we get these locksets by collecting votes on all heights
"""
missing = self.missing
self.cm.log('sync.request', missing=len(missing), requested=len(self.requested),
received=len(self.received))
if self.requested:
self.cm.log('waiting for requested')
return
if len(self.received) + self.max_getproposals_count >= self.max_queued:
self.cm.log('queue is full')
return
if not missing:
self.cm.log('insync')
return
if self.last_active_protocol is None: # FIXME, check if it is active
self.cm.log('no active protocol', last_active_protocol=self.last_active_protocol)
return
self.cm.log('collecting')
blocknumbers = []
for h in missing:
if h not in self.received and h not in self.requested:
blocknumbers.append(h)
self.requested.add(h)
if len(blocknumbers) == self.max_getproposals_count:
break
self.cm.log('collected', num=len(blocknumbers))
if not blocknumbers:
return
self.cm.log('requesting', num=len(blocknumbers),
requesting_range=(blocknumbers[0], blocknumbers[-1]))
self.last_active_protocol.send_getblockproposals(*blocknumbers)
# setup alarm
self.cm.chainservice.setup_alarm(self.timeout, self.on_alarm, blocknumbers) |
def on_proposal(self, proposal, proto):
"called to inform about synced peers"
assert isinstance(proto, HDCProtocol)
assert isinstance(proposal, Proposal)
if proposal.height >= self.cm.height:
assert proposal.lockset.is_valid
self.last_active_protocol = proto |
def wait_next_block_factory(app, timeout=None):
"""Creates a `wait_next_block` function, that
will wait `timeout` seconds (`None` = indefinitely)
for a new block to appear.
:param app: the app-instance the function should work for
:param timeout: timeout in seconds
"""
chain = app.services.chain
# setup new block callbacks and events
new_block_evt = gevent.event.Event()
def _on_new_block(app):
log.DEV('new block mined')
new_block_evt.set()
chain.on_new_head_cbs.append(_on_new_block)
def wait_next_block():
bn = chain.chain.head.number
chain.consensus_manager.log('waiting for new block', block=bn)
new_block_evt.wait(timeout)
new_block_evt.clear()
if chain.chain.head.number > bn:
chain.consensus_manager.log('new block event', block=chain.chain.head.number)
elif chain.chain.head.number == bn:
chain.consensus_manager.log('wait_next_block timed out', block=bn)
return wait_next_block |
def mk_privkeys(num):
"make privkeys that support coloring, see utils.cstr"
privkeys = []
assert num <= num_colors
for i in range(num):
j = 0
while True:
k = sha3(str(j))
a = privtoaddr(k)
an = big_endian_to_int(a)
if an % num_colors == i:
break
j += 1
privkeys.append(k)
return privkeys |
def delay(self, sender, receiver, packet, add_delay=0):
"""
bandwidths are inaccurate, as we don't account for parallel transfers here
"""
bw = min(sender.ul_bandwidth, receiver.dl_bandwidth)
delay = sender.base_latency + receiver.base_latency
delay += len(packet) / bw
delay += add_delay
return delay |
def deliver(self, sender, receiver, packet):
"deliver on edge of timeout_window"
to = ConsensusManager.round_timeout
assert to > 0
print "in slow transport deliver"
super(SlowTransport, self).deliver(sender, receiver, packet, add_delay=to) |
def abi_encode_args(method, args):
"encode args for method: method_id|data"
assert issubclass(method.im_class, NativeABIContract), method.im_class
m_abi = method.im_class._get_method_abi(method)
return zpad(encode_int(m_abi['id']), 4) + abi.encode_abi(m_abi['arg_types'], args) |
def chain_nac_proxy(chain, sender, contract_address, value=0):
"create an object which acts as a proxy for the contract on the chain"
klass = registry[contract_address].im_self
assert issubclass(klass, NativeABIContract)
def mk_method(method):
def m(s, *args):
data = abi_encode_args(method, args)
block = chain.head_candidate
output = test_call(block, sender, contract_address, data)
if output is not None:
return abi_decode_return_vals(method, output)
return m
class cproxy(object):
pass
for m in klass._abi_methods():
setattr(cproxy, m.__func__.func_name, mk_method(m))
return cproxy() |
def address_to_native_contract_class(self, address):
"returns class._on_msg_unsafe, use x.im_self to get class"
assert isinstance(address, bytes) and len(address) == 20
assert self.is_instance_address(address)
nca = self.native_contract_address_prefix + address[-4:]
return self.native_contracts[nca] |
def register(self, contract):
"registers NativeContract classes"
assert issubclass(contract, NativeContractBase)
assert len(contract.address) == 20
assert contract.address.startswith(self.native_contract_address_prefix)
if self.native_contracts.get(contract.address) == contract._on_msg:
log.debug("already registered", contract=contract, address=contract.address)
return
assert contract.address not in self.native_contracts, 'address already taken'
self.native_contracts[contract.address] = contract._on_msg
log.debug("registered native contract", contract=contract, address=contract.address) |
def validators_from_config(validators):
"""Consolidate (potentially hex-encoded) list of validators
into list of binary address representations.
"""
result = []
for validator in validators:
if len(validator) == 40:
validator = validator.decode('hex')
result.append(validator)
return result |
def update(self, data):
"returns True if unknown"
if data not in self.filter:
self.filter.append(data)
if len(self.filter) > self.max_items:
self.filter.pop(0)
return True
else:
self.filter.append(self.filter.pop(0))
return False |
def add_transaction(self, tx, origin=None, force_broadcast=False):
"""
Warning:
Locking proposal_lock may block incoming events which are necessary to unlock!
I.e. votes / blocks!
Take care!
"""
self.consensus_manager.log(
'add_transaction', blk=self.chain.head_candidate, lock=self.proposal_lock)
log.debug('add_transaction', lock=self.proposal_lock)
block = self.proposal_lock.block
self.proposal_lock.acquire()
self.consensus_manager.log('add_transaction acquired lock', lock=self.proposal_lock)
assert not hasattr(self.chain.head_candidate, 'should_be_locked')
success = super(ChainService, self).add_transaction(tx, origin, force_broadcast)
if self.proposal_lock.is_locked(): # can be unlock if we are at a new block
self.proposal_lock.release(if_block=block)
log.debug('added transaction', num_txs=self.chain.head_candidate.num_transactions())
return success |
def on_receive_transactions(self, proto, transactions):
"receives rlp.decoded serialized"
log.debug('----------------------------------')
log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
def _add_txs():
for tx in transactions:
self.add_transaction(tx, origin=proto)
gevent.spawn(_add_txs) |
def img_from_vgg(x):
'''Decondition an image from the VGG16 model.'''
x = x.transpose((1, 2, 0))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:,:,::-1] # to RGB
return x |
def img_to_vgg(x):
'''Condition an image for use with the VGG16 model.'''
x = x[:,:,::-1] # to BGR
x[:, :, 0] -= 103.939
x[:, :, 1] -= 116.779
x[:, :, 2] -= 123.68
x = x.transpose((2, 0, 1))
return x |
def get_f_layer(self, layer_name):
'''Create a function for the response of a layer.'''
inputs = [self.net_input]
if self.learning_phase is not None:
inputs.append(K.learning_phase())
return K.function(inputs, [self.get_layer_output(layer_name)]) |
def get_layer_output(self, name):
'''Get symbolic output of a layer.'''
if not name in self._f_layer_outputs:
layer = self.net.get_layer(name)
self._f_layer_outputs[name] = layer.output
return self._f_layer_outputs[name] |
def get_features(self, x, layers):
'''Evaluate layer outputs for `x`'''
if not layers:
return None
inputs = [self.net.input]
if self.learning_phase is not None:
inputs.append(self.learning_phase)
f = K.function(inputs, [self.get_layer_output(layer_name) for layer_name in layers])
feature_outputs = f([x])
features = dict(zip(layers, feature_outputs))
return features |
def create_key_file(path):
"""
Creates a new encryption key in the path provided and sets the file
permissions. Setting the file permissions currently does not work
on Windows platforms because of the differences in how file
permissions are read and modified.
"""
iv = "{}{}".format(os.urandom(32), time.time())
new_key = generate_key(ensure_bytes(iv))
with open(path, "wb") as f:
f.write(base64.b64encode(new_key))
os.chmod(path, 0o400) |
def cleanup(self):
"""
Drops any existing work tables, as returned by
:meth:`~giraffez.load.TeradataBulkLoad.tables`.
:raises `giraffez.TeradataPTError`: if a Teradata error ocurred
"""
threads = []
for i, table in enumerate(filter(lambda x: self.mload.exists(x), self.tables)):
log.info("BulkLoad", "Dropping table '{}'...".format(table))
t = threading.Thread(target=self.mload.drop_table, args=(table,))
threads.append(t)
t.start()
for t in threads:
t.join() |
def finish(self):
"""
Finishes the load job. Called automatically when the connection closes.
:return: The exit code returned when applying rows to the table
"""
if self.finished:
return self.exit_code
checkpoint_status = self.checkpoint()
self.exit_code = self._exit_code()
if self.exit_code != 0:
raise TeradataPTError("BulkLoad job finished with return code '{}'".format(self.exit_code))
# TODO(chris): should this happen every time?
if self.applied_count > 0:
self._end_acquisition()
self._apply_rows()
self.exit_code = self._exit_code()
if self.exit_code != 0:
raise TeradataPTError("BulkLoad job finished with return code '{}'".format(self.exit_code))
self.finished = True
return self.exit_code |
def from_file(self, filename, table=None, delimiter='|', null='NULL',
panic=True, quotechar='"', parse_dates=False):
"""
Load from a file into the target table, handling each step of the
load process.
Can load from text files, and properly formatted giraffez archive
files. In both cases, if Gzip compression is detected the file will be
decompressed while reading and handled appropriately. The encoding is
determined automatically by the contents of the file.
It is not necessary to set the columns in use prior to loading from a file.
In the case of a text file, the header is used to determine column names
and their order. Valid delimiters include '|', ',', and '\\t' (tab). When
loading an archive file, the column information is decoded alongside the data.
:param str filename: The location of the file to be loaded
:param str table: The name of the target table, if it was not specified
to the constructor for the isntance
:param str null: The string that indicates a null value in the rows being
inserted from a file. Defaults to 'NULL'
:param str delimiter: When loading a file, indicates that fields are
separated by this delimiter. Defaults to :code:`None`, which causes the
delimiter to be determined from the header of the file. In most
cases, this behavior is sufficient
:param str quotechar: The character used to quote fields containing special characters,
like the delimiter.
:param bool panic: If :code:`True`, when an error is encountered it will be
raised. Otherwise, the error will be logged and :code:`self.error_count`
is incremented.
:return: The output of the call to
:meth:`~giraffez.load.TeradataBulkLoad.finish`
:raises `giraffez.errors.GiraffeError`: if table was not set and :code:`table`
is :code:`None`, or if a Teradata error ocurred while retrieving table info.
:raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there
are format errors in the row values.
"""
if not self.table:
if not table:
raise GiraffeError("Table must be set or specified to load a file.")
self.table = table
if not isinstance(null, basestring):
raise GiraffeError("Expected 'null' to be str, received {}".format(type(null)))
with Reader(filename, delimiter=delimiter, quotechar=quotechar) as f:
if not isinstance(f.delimiter, basestring):
raise GiraffeError("Expected 'delimiter' to be str, received {}".format(type(delimiter)))
self.columns = f.header
if isinstance(f, ArchiveFileReader):
self.mload.set_encoding(ROW_ENCODING_RAW)
self.preprocessor = lambda s: s
if parse_dates:
self.preprocessor = DateHandler(self.columns)
self._initiate()
self.mload.set_null(null)
self.mload.set_delimiter(delimiter)
i = 0
for i, line in enumerate(f, 1):
self.put(line, panic=panic)
if i % self.checkpoint_interval == 1:
log.info("\rBulkLoad", "Processed {} rows".format(i), console=True)
checkpoint_status = self.checkpoint()
self.exit_code = self._exit_code()
if self.exit_code != 0:
return self.exit_code
log.info("\rBulkLoad", "Processed {} rows".format(i))
return self.finish() |
def put(self, items, panic=True):
"""
Load a single row into the target table.
:param list items: A list of values in the row corresponding to the
fields specified by :code:`self.columns`
:param bool panic: If :code:`True`, when an error is encountered it will be
raised. Otherwise, the error will be logged and :code:`self.error_count`
is incremented.
:raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there
are format errors in the row values.
:raises `giraffez.errors.GiraffeError`: if table name is not set.
:raises `giraffez.TeradataPTError`: if there is a problem
connecting to Teradata.
"""
if not self.initiated:
self._initiate()
try:
row_status = self.mload.put_row(self.preprocessor(items))
self.applied_count += 1
except (TeradataPTError, EncoderError) as error:
self.error_count += 1
if panic:
raise error
log.info("BulkLoad", error) |
def release(self):
"""
Attempt release of target mload table.
:raises `giraffez.errors.GiraffeError`: if table was not set by
the constructor, the :code:`TeradataBulkLoad.table`, or
:meth:`~giraffez.load.TeradataBulkLoad.from_file`.
"""
if self.table is None:
raise GiraffeError("Cannot release. Target table has not been set.")
log.info("BulkLoad", "Attempting release for table {}".format(self.table))
self.mload.release(self.table) |
def tables(self):
"""
The names of the work tables used for loading.
:return: A list of four tables, each the name of the target table
with the added suffixes, "_wt", "_log", "_e1", and "_e2"
:raises `giraffez.errors.GiraffeError`: if table was not set by
the constructor, the :code:`TeradataBulkLoad.table`, or
:meth:`~giraffez.load.TeradataBulkLoad.from_file`.
"""
if self.table is None:
raise GiraffeError("Target table has not been set.")
return [
"{}_wt".format(self.table),
"{}_log".format(self.table),
"{}_e1".format(self.table),
"{}_e2".format(self.table),
] |
def fix_compile(remove_flags):
"""
Monkey-patch compiler to allow for removal of default compiler flags.
"""
import distutils.ccompiler
def _fix_compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
for flag in remove_flags:
if flag in self.compiler_so:
self.compiler_so.remove(flag)
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros,
include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
return objects
distutils.ccompiler.CCompiler.compile = _fix_compile |
def find_teradata_home():
"""
Attempts to find the Teradata install directory with the defaults
for a given platform. Should always return `None` when the defaults
are not present and the TERADATA_HOME environment variable wasn't
explicitly set to the correct install location.
"""
if platform.system() == 'Windows':
# The default installation path for Windows is split between the
# Windows directories for 32-bit/64-bit applications. It is
# worth noting that Teradata archiecture installed should match
# the architecture of the Python architecture being used (i.e.
# TTU 32-bit is required /w Python 32-bit and TTU 64-bit is
# required for Python 64-bit).
if is_64bit():
return latest_teradata_version("C:/Program Files/Teradata/Client")
else:
return latest_teradata_version("C:/Program Files (x86)/Teradata/Client")
elif platform.system() == 'Linux':
return latest_teradata_version("/opt/teradata/client")
elif platform.system() == 'Darwin':
return latest_teradata_version("/Library/Application Support/teradata/client")
else:
# In the case nothing is found, the default for Linux is
# attempted as a last effort to find the correct install
# directory.
return latest_teradata_version("/opt/teradata/client") |
def get(self, key):
"""
Retrieve the decrypted value of a key in a giraffez
configuration file.
:param str key: The key used to lookup the encrypted value
"""
if not key.startswith("secure.") and not key.startswith("connections."):
key = "secure.{0}".format(key)
value = self.config.get_value(key)
if not isinstance(value, basestring):
value = None
return value |
def set(self, key, value):
"""
Set a decrypted value by key in a giraffez configuration file.
:param str key: The key used to lookup the encrypted value
:param value: Value to set at the given key, can be any value that is
YAML serializeable.
"""
if not key.startswith("secure."):
key = "secure.{0}".format(key)
self.config.set_value(key, value)
self.config.write() |
def do_table(self, line):
"""Display results in table format"""
if len(line) > 0:
if line.strip().lower() == "on":
log.write("Table ON")
self.table_output = True
return
elif line.strip().lower() == "off":
log.write("Table OFF")
self.table_output = False
return
log.write("Table output: {}".format("ON" if self.table_output else "OFF")) |
def to_dict(self):
"""
Sets the current encoder output to Python `dict` and returns
the cursor. This makes it possible to set the output encoding
and iterate over the results:
.. code-block:: python
with giraffez.Cmd() as cmd:
for row in cmd.execute(query).to_dict():
print(row)
Or can be passed as a parameter to an object that consumes an iterator:
.. code-block:: python
result = cmd.execute(query)
list(result.to_dict())
"""
self.conn.set_encoding(ROW_ENCODING_DICT)
self.processor = lambda x, y: y
return self |
def to_list(self):
"""
Set the current encoder output to :class:`giraffez.Row` objects
and returns the cursor. This is the default value so it is not
necessary to select this unless the encoder settings have been
changed already.
"""
self.conn.set_encoding(ROW_ENCODING_LIST)
self.processor = lambda x, y: Row(x, y)
return self |
def execute(self, command, coerce_floats=True, parse_dates=False, header=False, sanitize=True,
silent=False, panic=None, multi_statement=False, prepare_only=False):
"""
Execute commands using CLIv2.
:param str command: The SQL command to be executed
:param bool coerce_floats: Coerce Teradata decimal types into Python floats
:param bool parse_dates: Parses Teradata datetime types into Python datetimes
:param bool header: Include row header
:param bool sanitize: Whether or not to call :func:`~giraffez.sql.prepare_statement`
on the command
:param bool silent: Silence console logging (within this function only)
:param bool panic: If :code:`True`, when an error is encountered it will be
raised.
:param bool multi_statement: Execute in multi-statement mode
:param bool prepare_only: Only prepare the command (no results)
:return: a cursor over the results of each statement in the command
:rtype: :class:`~giraffez.cmd.Cursor`
:raises `giraffez.TeradataError`: if the query is invalid
:raises `giraffez.errors.GiraffeError`: if the return data could not be decoded
"""
if panic is None:
panic = self.panic
self.options("panic", panic)
self.options("multi-statement mode", multi_statement, 3)
if isfile(command):
self.options("file", command, 2)
with open(command, 'r') as f:
command = f.read()
else:
if log.level >= VERBOSE:
self.options("query", command, 2)
else:
self.options("query", truncate(command), 2)
if not silent and not self.silent:
log.info("Command", "Executing ...")
log.info(self.options)
if sanitize:
command = prepare_statement(command) # accounts for comments and newlines
log.debug("Debug[2]", "Command (sanitized): {!r}".format(command))
self.cmd.set_encoding(ENCODER_SETTINGS_DEFAULT)
return Cursor(self.cmd, command, multi_statement=multi_statement, header=header,
prepare_only=prepare_only, coerce_floats=coerce_floats, parse_dates=parse_dates,
panic=panic) |
def exists(self, object_name, silent=False):
"""
Check that object (table or view) :code:`object_name` exists, by executing a :code:`show table object_name` query,
followed by a :code:`show view object_name` query if :code:`object_name` is not a table.
:param str object_name: The name of the object to check for existence.
:param bool silent: Silence console logging (within this function only)
:return: :code:`True` if the object exists, :code:`False` otherwise.
:rtype: bool
"""
try:
self.execute("show table {}".format(object_name), silent=silent)
return True
except TeradataError as error:
if error.code != TD_ERROR_OBJECT_NOT_TABLE:
return False
try:
self.execute("show view {}".format(object_name), silent=silent)
return True
except TeradataError as error:
if error.code not in [TD_ERROR_OBJECT_NOT_VIEW, TD_ERROR_OBJECT_NOT_EXIST]:
return True
return False |
def fetch_columns(self, table_name, silent=False):
"""
Return the column information for :code:`table_name` by executing a :code:`select top 1 * from table_name` query.
:param str table_name: The fully-qualified name of the table to retrieve schema for
:param bool silent: Silence console logging (within this function only)
:return: the columns of the table
:rtype: :class:`~giraffez.types.Columns`
"""
return self.execute("select top 1 * from {}".format(table_name), silent=silent, prepare_only=True).columns |
def insert(self, table_name, rows, fields=None, delimiter=None, null='NULL', parse_dates=False, quotechar='"'):
"""
Load a text file into the specified :code:`table_name` or Insert Python :code:`list` rows into the specified :code:`table_name`
:param str table_name: The name of the destination table
:param list/str rows: A list of rows **or** the name of an input file. Each row
must be a :code:`list` of field values.
:param list fields: The names of the target fields, in the order that
the data will be presented (defaults to :code:`None` for all columns
in the table).
:param str delimiter: The delimiter used by the input file (or :code:`None`
to infer it from the header).
:param str null: The string used to indicated nulled values in the
file (defaults to :code:`'NULL'`).
:param str quotechar: The character used to quote fields containing special
characters, like the delimiter.
:param bool parse_dates: If :code:`True`, attempts to coerce date fields
into a standard format (defaults to :code:`False`).
:raises `giraffez.errors.GiraffeEncodeError`: if the number of values in a row does not match
the length of :code:`fields`
:raises `giraffez.errors.GiraffeError`: if :code:`panic` is set and the insert statement
caused an error.
:return: A dictionary containing counts of applied rows and errors
:rtype: :class:`dict`
For most insertions, this will be faster and produce less strain on
Teradata than using :class:`~giraffez.load.TeradataBulkLoad` (:class:`giraffez.BulkLoad <giraffez.load.TeradataBulkLoad>`).
Requires that any input file be a properly delimited text file, with a
header that corresponds to the target fields for insertion. Valid delimiters
include '|', ',', and <tab> or a properly encoded JSON stream.
"""
if not isfile(rows):
return self._insert(table_name, rows, fields, parse_dates)
with Reader(rows, delimiter=delimiter, quotechar=quotechar) as f:
preprocessor = null_handler(null)
rows = (preprocessor(l) for l in f)
if isinstance(f, CSVReader):
self.options("delimiter", unescape_string(f.reader.dialect.delimiter), 1)
self.options("quote char", f.reader.dialect.quotechar, 2)
elif isinstance(f, JSONReader):
self.options("encoding", "json", 1)
return self._insert(table_name, rows, f.header, parse_dates) |
def connections(self):
"""
Return a :code:`dict` of connections from the configuration settings.
:raises `giraffez.errors.ConfigurationError`: if connections are not present
"""
if "connections" not in self.settings:
raise ConfigurationError("Could not retrieve connections from config file '{}'.".format(self._config_file))
return self.settings.get("connections") |
def get_connection(self, dsn=None):
"""
Retrieve a connection by the given :code:`dsn`, or the default connection.
:param str dsn: The name of the connection to retrieve. Defaults to :code:`None`,
which retrieves the default connection.
:return: A dict of connection settings
:raises `giraffez.errors.ConfigurationError`: if :code:`dsn` does not exist and a
:raises `giraffez.errors.ConnectionLock`: if the corresponding connection is currently locked
default is not set.
"""
if dsn is None:
dsn = self.connections.get("default", None)
if dsn is None:
raise ConfigurationError("No default DSN set")
connection = self.connections.get(dsn, None)
if connection is None:
raise ConfigurationError("DSN '{}' does not exist".format(dsn))
connection = self.decrypt(connection.copy())
if connection.get('lock', 0) > 1:
raise ConnectionLock(dsn)
connection['name'] = dsn
return connection |
def get_value(self, key, default={}, nested=True, decrypt=True):
"""
Retrieve a value from the configuration based on its key. The key
may be nested.
:param str key: A path to the value, with nested levels joined by '.'
:param default: Value to return if the key does not exist (defaults to :code:`dict()`)
:param bool decrypt: If :code:`True`, decrypt an encrypted value before returning
(if encrypted). Defaults to :code:`True`.
"""
key = key.lstrip()
if key.endswith("."):
key = key[:-1]
if nested:
path = key.split(".")
curr = self.settings
for p in path[:-1]:
curr = curr.get(p, {})
try:
value = curr[path[-1]]
except KeyError:
return default
value = self.decrypt(value, path)
return value
else:
return self.settings.get(key, default) |
def list_value(self, decrypt=False):
"""
Return the contents of the configuration as a :code:`dict`. Depending on
the structure of the YAML settings, the return value may contain nested
:code:`dict` objects.
:param bool decrypt: If :code:`True`, decrypt the contents before returning.
:return: (potentially) nested :code:`dict` of keys and values, as parsed from
the configuration file YAML contents.
"""
if decrypt:
settings = self.decrypt(self.settings)
else:
settings = self.settings
return yaml.dump(settings, default_flow_style=False) |
def lock_connection(cls, conf, dsn, key=None):
"""
A class method to lock a connection (given by :code:`dsn`) in the specified
configuration file. Automatically opens the file and writes to it before
closing.
:param str conf: The configuration file to modify
:param str dsn: The name of the connection to lock
:raises `giraffez.errors.ConfigurationError`: if the connection does not exist
"""
with Config(conf, "w", key) as c:
connection = c.get_connection(dsn)
if not connection:
raise ConfigurationError("Unable to lock connection")
if dsn is None:
dsn = c.settings["connections"]["default"]
value = "connections.{}.lock".format(dsn)
lock = c.get_value("connections.{}.lock".format(dsn), default=0)
if lock >= 2:
raise ConnectionLock(dsn)
lock += 1
c.set_value("connections.{}.lock".format(dsn), lock)
c.write() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.