text
stringlengths 8
6.05M
|
|---|
import os
# 当前工程文件绝对路径
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
print(BASE_PATH)
|
"""Plot page shell and instantiate word-plotting class."""
import numpy as np
import xmlPlotWords
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
class xmlPlotPage(object):
def __init__(self, data, page_line_data, manual_firms):
self.lines_dict = page_line_data[0]
self.words_excluded = page_line_data[1]
self.page_data = data
if manual_firms:
self.manual_firms = manual_firms
self.page_plot = self.plot_page()
self.company_titles_words = self.page_company_names()
self.plot_lines()
self.plot_words()
def plot_page(self):
"""Initialise canvas on which to plot word locations."""
page_figure = plt.figure(figsize=(8.5, 11), dpi=150)
page_plot = page_figure.add_subplot(111)
page_figure.tight_layout(pad=5.5)
page_plot.set_xlim([0, 1.01])
page_plot.set_ylim([0, 1.01])
page_plot.set_xticks(np.arange(0.0, 1.1, 0.1))
page_plot.set_yticks(np.arange(0.0, 1.1, 0.1))
page_plot.set_title(self.page_data.page, fontsize=13, fontweight='bold', y=1.025)
page_plot.set_xlabel('sheet width (norm. 0-1)')
page_plot.set_ylabel('sheet height (norm. 0-1)')
page_plot.xaxis.set_label_coords(.5, -.05)
page_plot.yaxis.set_label_coords(-.11, .5)
page_plot.add_patch(Rectangle((self.page_data.page_left, self.page_data.page_bottom),
(self.page_data.page_right - self.page_data.page_left),
(self.page_data.page_top - self.page_data.page_bottom),
fill=None, edgecolor='b', alpha=1))
return page_plot
def plot_lines(self):
"""Plot blue lines ID'ing lines of text"""
for index, line_dict in self.lines_dict.items():
for line_index in line_dict.keys():
if len(self.lines_dict) > 1 and len(self.lines_dict) < 3:
if index == 0:
xmax_value = self.page_data.center - .005
xmin_value = 0
elif index == 1:
xmax_value = 1
xmin_value = self.page_data.center - .005
elif len(self.lines_dict) > 2:
if index == 0:
xmax_value = self.page_data.third_first - .005
xmin_value = 0
elif index == 1:
xmax_value = self.page_data.third_second - .005
xmin_value = self.page_data.third_first - .005
elif index == 2:
xmax_value = 1
xmin_value = self.page_data.third_second - .005
else:
xmin_value = 0
xmax_value = 1
self.page_plot.axhline(y=line_index, xmin=xmin_value, xmax=xmax_value)
def page_company_names(self):
"""Define list of firm names on page."""
company_titles_words = []
if hasattr(self, 'manual_firms'):
for column in self.manual_firms.values():
column_dict = dict(column)
for company_word_list in column_dict.values():
for word in company_word_list:
company_titles_words.append(word)
return company_titles_words
def plot_words(self):
"""Trigger xmlPlotWords class."""
for word in self.page_data.word_data:
colour = 'k'
if (word[2] > self.page_data.page_left and word[4] < self.page_data.page_right and
word[1] > self.page_data.page_bottom and word[3] < self.page_data.page_top):
if word in self.company_titles_words:
colour = 'm'
xmlPlotWords.xmlPlotWords(self.page_plot, word, colour)
|
import os
from ..custom.video_dataset_adapter import VideoDatasetAdapter
from .skoltech_cameras_calibration_factory import SkoltechCamerasCalibrationFactory
from ..data_transform_manager import DataTransformManager
from ..unsupervised_depth_data_module import UnsupervisedDepthDataModule
from ..video_dataset import VideoDataset
class SkoltechDataModuleFactory():
def __init__(self, directory="datasets"):
self._left_directory = os.path.join(os.path.join(directory,'sequences','01','image_2'))
self._right_directory = os.path.join(os.path.join(directory,'sequences','01','image_3'))
def make_dataset_manager(self,
final_size,
transform_manager_parameters,
batch_size=64,
split=(80, 10, 10),
num_workers=4,
device="cpu"):
left_dataset = VideoDatasetAdapter(self._left_directory)
right_dataset = VideoDatasetAdapter(self._right_directory)
original_image_size = left_dataset.get_image_size()
transform_manager = DataTransformManager(
original_image_size,
final_size,
transform_manager_parameters
)
dataset = VideoDataset(
left_dataset,
right_dataset
)
cameras_calibration = SkoltechCamerasCalibrationFactory().make_cameras_calibration(original_image_size,
final_size, device)
return UnsupervisedDepthDataModule(dataset,
transform_manager,
cameras_calibration,
batch_size,
num_workers=num_workers,
split=split)
|
"""
Python code that will be transpiled to JS to implement the client side.
"""
from pscript.stubs import window, document, undefined, Math, Date # JS
from pscript.stubs import data_per_db, text_color # are made available
panels = []
# %% Button callbacks
def toggle_utc():
info = get_hash_info()
if info.get("utc", False):
info.pop("utc")
else:
info["utc"] = True
return refresh(None, info)
def toggle_columns():
info = get_hash_info()
columns = info.get("columns", 0)
if not columns:
if window.document.body.clientWidth >= 1200:
info["columns"] = 2
else:
info["columns"] = 1
else:
info.pop("columns")
return refresh(None, info)
def update_range(action=""):
ndays = window.ndays
daysago = window.daysago
if action == "zoomout":
if ndays < 4:
ndays += 1
elif ndays < 10:
ndays += 2
elif ndays < 30:
ndays += 5
else:
ndays += 10
elif action == "zoomin":
if ndays <= 4:
ndays -= 1
elif ndays <= 10:
ndays -= 2
elif ndays <= 30:
ndays -= 5
else:
ndays -= 10
ndays = max(1, ndays)
elif action == "older":
daysago += ndays
elif action == "newer":
daysago -= ndays
info = get_query_info()
info["ndays"] = ndays
if daysago > 0:
info["daysago"] = daysago
else:
info.pop("daysago", None)
return refresh(info, None)
def refresh(self, query_info=None, hash_info=None):
if query_info is None:
query_info = get_query_info()
if hash_info is None:
hash_info = get_hash_info()
url = window.location.origin + window.location.pathname
encode_uri_component = window.encodeURIComponent
if query_info:
url += "?" + "&".join(
[key + "=" + encode_uri_component(val) for key, val in query_info.items()]
)
if True:
url += "#" + "&".join(
[key + "=" + encode_uri_component(val) for key, val in hash_info.items()]
)
if url == window.location.href:
window.location.reload()
else:
window.location.href = url
return undefined
def panel_sort_func(x):
t = x.split("|")[1]
if t:
t = {"num": "anum", "cat": "zcat"}.get(t, t)
return (t + "|" + x).lower()
# %%
def on_init():
for dbname, data in data_per_db.items():
# Create panel container (and a title)
title_el = document.createElement("div")
container_el = document.createElement("div")
title_el.innerText = dbname # .replace("_", " ")
title_el.classList.add("panelcontainertitle")
container_el.classList.add("panelcontainer")
document.body.appendChild(title_el)
document.body.appendChild(container_el)
if dbname == "system" and window.info:
panels.append(InfoPanel(container_el, dbname, "info", "system info"))
# Collect panel types
panel_kinds = {}
for i in range(len(data)):
aggr = data[i]
for key in aggr.keys():
panel_kinds[key] = True
# Sort the panel types - count, dcount, num, cat
panel_kinds = panel_kinds.keys()
panel_kinds.sort(key=panel_sort_func)
# Create panels
for i in range(len(panel_kinds)):
key = panel_kinds[i]
# Select panel class
key_parts = key.split("|")
if len(key_parts) == 2:
name, type = key_parts
unit = ""
elif len(key_parts) == 3:
name, type, unit = key_parts
else:
continue
if type == "time":
continue # skip time info
elif type == "count":
title = "# " + name
Cls = CountPanel # noqa: N806
elif type == "dcount":
title = "# daily " + name
Cls = DailyCountPanel # noqa: N806
elif type == "mcount":
title = "# monthly " + name
Cls = MonthlyCountPanel # noqa: N806
elif type == "cat":
title = name + "'s"
Cls = CategoricalPanel # noqa: N806
elif type == "num":
title = name
Cls = NumericalPanel # noqa: N806
else:
window.console.warn(f"Don't know what to do with {key}")
continue
if unit:
title = title + " " + unit
# Create panel
panel = Cls(container_el, dbname, key, title, unit)
panels.append(panel)
on_hash_change() # calls on_resize()
def on_resize():
window.setTimeout(_on_resize, 1)
def get_query_info():
url = window.location.href
q = ""
if "?" in url:
q = window.location.href.split("?", 1)[-1].split("#")[0]
return get_dict_from_hash_or_query(q)
def get_hash_info():
return get_dict_from_hash_or_query(window.location.hash.lstrip("#"))
def get_dict_from_hash_or_query(s):
info = {}
for s in s.split("&"):
key, _, val = s.partition("=")
if key and val:
val = window.decodeURIComponent(val)
if val.lower() == "true":
val = True
elif val.lower() == "false":
val = False
elif val in "0123456789":
val = int(val)
info[key] = val
elif s:
info[s] = True
return info
def on_hash_change():
info = get_hash_info()
containers = document.getElementsByClassName("panelcontainer")
columns = int(info.get("columns", "")) or 0
if columns > 0:
grid_template_columns = "auto ".repeat(columns)
else:
grid_template_columns = None
height = int(info.get("height", "")) or 0
if height > 0:
grid_auto_rows = height + "px"
else:
grid_auto_rows = None
for i in range(len(containers)):
containers[i].style.gridAutoRows = grid_auto_rows
containers[i].style.gridTemplateColumns = grid_template_columns
on_resize()
def _on_resize():
for panel in panels:
if panel.canvas:
# Get dimensions
w = panel.node.clientWidth - 10
h = panel.node.clientHeight - 35
pixel_ratio = get_pixel_ratio(panel.canvas.getContext("2d"))
# Set dimensions
panel.canvas.style.width = w + "px"
panel.canvas.style.height = h + "px"
panel.canvas.width = w * pixel_ratio
panel.canvas.height = h * pixel_ratio
# Set some info on the object
panel.pixel_ratio = pixel_ratio
panel.width = w
panel.height = h
if panel.draw:
panel.draw()
def get_pixel_ratio(ctx):
"""Get the ratio of logical pixel to screen pixel."""
PSCRIPT_OVERLOAD = False # noqa
dpr = window.devicePixelRatio or 1
bsr = (
ctx.webkitBackingStorePixelRatio
or ctx.mozBackingStorePixelRatio
or ctx.msBackingStorePixelRatio
or ctx.oBackingStorePixelRatio
or ctx.backingStorePixelRatio
or 1
)
return dpr / bsr
def _create_tick_units():
# Create tick units
tick_units = []
for e in range(-14, 14):
for i in [10, 20, 25, 50]:
tick_units.append(i * 10**e)
return tick_units
_tick_units = _create_tick_units()
# def split_group(s, sep):
# group, _, sub = s.partition(sep)
# if len(sub) == 0:
# return "", group
# else:
# return group, sub
class BasePanel:
def __init__(self, container, dbname, key, title, unit):
self.dbname = dbname
self.key = key
self.title = title
self.unit = unit
self.node = document.createElement("div")
self.node.classList.add("panel")
container.appendChild(self.node)
self.titlenode = document.createElement("div")
self.titlenode.classList.add("title")
self.titlenode.innerText = title
self.node.appendChild(self.titlenode)
class InfoPanel(BasePanel):
def __init__(self, *args):
super().__init__(*args)
self.content = document.createElement("div")
self.content.classList.add("content")
self.node.appendChild(self.content)
hider = document.createElement("div")
hider.classList.add("scrollhider")
self.node.appendChild(hider)
self._create()
def _create(self):
PSCRIPT_OVERLOAD = False # noqa
if not window.info:
return
lines = []
lines.append("<table>")
for key, value in window.info.items():
lines.append(f"<tr> <td>{key}</td> <td>{value}</td> </tr>")
lines.append("</table>")
self.content.innerHTML = "\n".join(lines)
class CategoricalPanel(InfoPanel):
def _create(self):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
# First aggregate
data = data_per_db[self.dbname]
totalcount = 0
rows = {}
for i in range(len(data)):
aggr = data[i]
meas = aggr.get(key, {})
for k, v in meas.items():
rows[k] = rows.get(k, 0) + v
totalcount += v
# Group so we can sort in a grouped fashion
groups = {}
group_counts = {}
for key, count in rows.items():
group, _, subkey = key.partition(" - ")
groups.setdefault(group, []).append((subkey, count))
group_counts[group] = group_counts.get(group, 0) + count
group_counts = [(k, v) for k, v in group_counts.items()]
# Sort groups and items inside the groupd
group_counts.sort(key=lambda x: -x[1])
for subs in groups.values():
subs.sort(key=lambda x: -x[1])
lines = []
lines.append("<table>")
for group, _ in group_counts:
for sub, count in groups[group]:
key = group + " - " + sub
key = key.strip(" -")
pct = 100 * count / totalcount
lines.append(
f"<tr> <td>{pct:0.0f}%</td> <td>{count}</td> <td>{key}</td> </tr>"
)
lines.append("</table>")
self.content.innerHTML = "\n".join(lines)
class PlotPanel(BasePanel):
_values_are_integer = False
def __init__(self, *args):
super().__init__(*args)
self.canvas = document.createElement("canvas")
self.node.appendChild(self.canvas)
def _draw_text(self, ctx, text, x, y, angle=0):
PSCRIPT_OVERLOAD = False # noqa
ctx.save()
ctx.translate(x, y)
ctx.scale(1, -1)
ctx.rotate(angle)
ctx.fillText(text, 0, 0)
ctx.restore()
def _get_min_max(self):
return 0, 1
def _get_ticks(self, scale, mi, ma, min_tick_dist=40):
PSCRIPT_OVERLOAD = False # noqa
# Inspired from flexx' PlotWidget, which took inspirartion from visvis
# Get tick multipliers and unit modifier
if self.unit == "iB":
if ma >= 2**30:
mult, unit = 1 / 2**30, "G"
elif ma >= 2**20:
mult, unit = 1 / 2**20, "M"
elif ma >= 2**10:
mult, unit = 1 / 2**10, "K"
else:
mult, unit = 1, ""
else:
if ma >= 10_000_000_000:
mult, unit = 1 / 1_000_000_000, "G"
elif ma >= 10_000_000:
mult, unit = 1 / 1_000_000, "M"
elif ma >= 10000:
mult, unit = 1 / 1000, "K"
elif ma < 0.0001:
mult, unit = 1_000_000, "u"
elif ma < 0.1:
mult, unit = 1000, "m"
else:
mult, unit = 1, ""
if self.unit in ("iB", "s"):
title = self.title.replace(" " + self.unit, " " + unit + self.unit)
self.titlenode.innerText = title
unit = ""
# Get tick unit
is_int = self._values_are_integer
for tick_unit in _tick_units:
if is_int and str(tick_unit).indexOf(".") >= 0:
continue
if tick_unit * scale / mult >= min_tick_dist:
break
else:
return []
# Calculate tick values
first_tick = Math.ceil(mi * mult / tick_unit) * tick_unit
last_tick = Math.floor(ma * mult / tick_unit) * tick_unit
ticks = {}
t = first_tick # t does not mean time here!
while t <= last_tick:
ticks[t / mult] = t
t += tick_unit
# Stringify
for realt, t in ticks.items():
if t == 0:
s = "0"
elif mult == 1 and is_int:
s = str(int(t))
else:
s = t.toPrecision(4) # t is already multiplied
if "." in s:
while len(s) > 5 and s.endsWith("0"):
s = s[:-1]
ticks[realt] = s + unit
return ticks
def draw(self):
PSCRIPT_OVERLOAD = False # noqa
ctx = self.canvas.getContext("2d")
# Prepare hidpi mode for canvas (flush state just in case)
for i in range(4):
ctx.restore()
ctx.save()
ctx.scale(self.pixel_ratio, self.pixel_ratio)
# Flip y-axis
ctx.scale(1, -1)
ctx.translate(0, -self.height)
# Clear bg
ctx.clearRect(0, 0, self.width, self.height)
# Determine drawing area
x0 = 45
y0 = 35
width = self.width - x0 - 15
height = self.height - y0 - 5
data = data_per_db[self.dbname]
if len(data) == 0:
return
# Get bounding box
t1 = data[0].time_start
t2 = data[-1].time_stop
mi, ma = self._get_min_max()
if ma <= mi:
return
hscale = width / (t2 - t1)
vscale = height / (ma - mi)
unix_from_utc_tuple = Date.UTC # avoid triggering new
utc = get_hash_info().get("utc", False)
xticks = {}
# Prepare x ticks for hours (one hour is the smallest granularity)
hourly_tick_units = (1, 3600), (2, 7200), (6, 21600)
min_tick_dist = 60
for nhours, tick_unit in hourly_tick_units:
if tick_unit * hscale >= min_tick_dist:
break
else:
tick_unit = 0
#
if tick_unit > 0:
d = Date(t1 * 1000)
if utc:
tup = [
d.getUTCFullYear(),
d.getUTCMonth(),
d.getUTCDate(),
d.getUTCHours(),
]
tup[-1] = nhours * int(tup[-1] / nhours)
t = unix_from_utc_tuple(tup[0], tup[1], tup[2], tup[3]) / 1000
else:
tup = [d.getFullYear(), d.getMonth(), d.getDate(), d.getHours()]
tup[-1] = nhours * int(tup[-1] / nhours)
t = Date(tup[0], tup[1], tup[2], tup[3]).getTime() / 1000
while t <= t2:
if t >= t1:
d = Date(t * 1000)
if utc:
xticks[t] = f"{d.getUTCHours():02i}:{d.getUTCMinutes():02i}"
else:
xticks[t] = f"{d.getHours():02i}:{d.getMinutes():02i}"
t += tick_unit
# Prepare x ticks for days/months
day_tick_units = (2, 1), (2, 2), (2, 5), (1, 1), (1, 2), (1, 3), (0, 365)
min_tick_dist = 60
for dindex, nsomething in day_tick_units:
tick_unit = nsomething * [365 * 86400, 30 * 86400, 86400][dindex]
if tick_unit * hscale >= min_tick_dist:
break
else:
tick_unit = nsomething = 0
#
n_date_ticks = 0
if nsomething > 0:
d = Date(t1 * 1000)
if utc:
tup = [d.getUTCFullYear(), d.getUTCMonth(), d.getUTCDate()]
tup[dindex] = nsomething * int(tup[dindex] / nsomething)
t = unix_from_utc_tuple(tup[0], tup[1], tup[2]) / 1000
else:
tup = [d.getFullYear(), d.getMonth(), d.getDate()]
tup[dindex] = nsomething * int(tup[dindex] / nsomething)
t = Date(tup[0], tup[1], tup[2]).getTime() / 1000
while t <= t2:
if t >= t1:
n_date_ticks += 1
d = Date(t * 1000)
if utc:
dd = f"{d.getUTCDate():02i}"
mm = f"{d.getUTCMonth()+1:02i}"
yy = f"{d.getUTCFullYear()}"
xticks[t] = f"{dd}-{mm}-{yy}"
else:
dd = f"{d.getDate():02i}"
mm = f"{d.getMonth()+1:02i}"
yy = f"{d.getFullYear()}"
xticks[t] = f"{dd}-{mm}-{yy}"
tup[dindex] += nsomething
if utc:
t = unix_from_utc_tuple(tup[0], tup[1], tup[2]) / 1000
else:
t = Date(tup[0], tup[1], tup[2]).getTime() / 1000
#
extra_x_tick = ""
if n_date_ticks < 2:
xtickskeys = xticks.keys()
if len(xtickskeys) > 0 and hscale * (xtickskeys[0] - t1) < 30:
xticks.pop(xtickskeys[0])
d = Date(t1 * 1000)
if utc:
extra_x_tick = (
f"{d.getUTCFullYear()}-{d.getUTCMonth()+1:02i}-{d.getUTCDate():02i}"
)
else:
extra_x_tick = (
f"{d.getFullYear()}-{d.getMonth()+1:02i}-{d.getDate():02i}"
)
# Prepare y ticks
yticks = self._get_ticks(vscale, mi, ma, 25) # text -> value
# Prepare drawing
ctx.lineWidth = 1
# Draw grid lines
ctx.strokeStyle = "rgba(128, 128, 128, 0.3)"
ctx.beginPath()
for v, text in yticks.items():
y = y0 + (float(v) - mi) * vscale
ctx.moveTo(x0, y)
ctx.lineTo(x0 + width, y)
ctx.stroke()
# Draw x ticks
ctx.strokeStyle = text_color
ctx.fillStyle = text_color
ctx.textAlign = "center"
ctx.textBaseline = "top" # middle
ctx.beginPath()
for t, text in xticks.items():
x = x0 + (float(t) - t1) * hscale
ctx.moveTo(x, y0)
ctx.lineTo(x, y0 - 4)
ctx.stroke()
for t, text in xticks.items():
x = x0 + (float(t) - t1) * hscale
angle = 0 # -0.15 * Math.PI
x = min(x, x0 + width - 15)
self._draw_text(ctx, text, x, y0 - 10, angle)
if extra_x_tick:
ctx.textAlign = "left"
ctx.textBaseline = "bottom"
self._draw_text(ctx, extra_x_tick, 0, 0)
# Draw y ticks
ctx.textAlign = "right"
ctx.textBaseline = "middle"
ctx.beginPath()
for v, text in yticks.items():
y = y0 + (float(v) - mi) * vscale
ctx.moveTo(x0 - 4, y)
ctx.lineTo(x0, y)
ctx.stroke()
for v, text in yticks.items():
y = y0 + (float(v) - mi) * vscale
self._draw_text(ctx, text, x0 - 8, y)
# Draw axis
ctx.strokeStyle = text_color
ctx.beginPath()
ctx.moveTo(x0, y0)
ctx.lineTo(x0 + width, y0)
ctx.moveTo(x0, y0)
ctx.lineTo(x0, y0 + height)
ctx.stroke()
# Draw content
self._draw_content(ctx, mi, ma, t1, t2, x0, y0, hscale, vscale)
# Draw local / UTC
ctx.fillStyle = "rgba(128, 128, 128, 0.5)"
ctx.textAlign = "right"
ctx.textBaseline = "bottom"
self._draw_text(ctx, "UTC" if utc else "Local time", self.width, 0)
class CountPanel(PlotPanel):
_values_are_integer = True
clr = 50, 250, 50
def _get_min_max(self):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
mi = 0
ma = -9_999_999
data = data_per_db[self.dbname]
for i in range(len(data)):
aggr = data[i]
v = aggr[key]
if v is undefined:
continue
ma = max(ma, v)
return mi, ma
def _draw_content(self, ctx, mi, ma, t1, t2, x0, y0, hscale, vscale):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
clr = self.clr
ctx.fillStyle = f"rgba({clr[0]}, {clr[1]}, {clr[2]}, 0.8)"
data = data_per_db[self.dbname]
for i in range(len(data)):
aggr = data[i]
v = aggr[key]
if v is undefined:
continue
if aggr.time_start > t2:
continue
x = x0 + (aggr.time_start - t1) * hscale
w = (aggr.time_stop - aggr.time_start) * hscale
w = max(w - 1, 1)
ctx.fillRect(x, y0, w, v * vscale)
class DailyCountPanel(CountPanel):
clr = 220, 250, 0
def _get_min_max(self):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
mi = 0
ma = -9_999_999
self.daily = daily = []
prev_day = ""
data = data_per_db[self.dbname]
for i in range(len(data)):
aggr = data[i]
v = aggr[key]
if v is undefined:
continue
day = aggr.time_key[:10]
if day != prev_day:
if len(daily) > 0:
ma = max(ma, daily[-1][key])
new_aggr = {"time_start": aggr.time_start, "time_stop": aggr.time_stop}
new_aggr[key] = aggr[key]
daily.append(new_aggr)
prev_day = day
else:
daily[-1][key] += v
daily[-1].time_stop = aggr.time_stop
if len(daily) > 0:
ma = max(ma, daily[-1][key])
return mi, ma
def _draw_content(self, ctx, mi, ma, t1, t2, x0, y0, hscale, vscale):
PSCRIPT_OVERLOAD = False # noqa
# Draw daily
key = self.key
clr = self.clr
ctx.fillStyle = f"rgba({clr[0]}, {clr[1]}, {clr[2]}, 0.4)"
for i in range(len(self.daily)):
aggr = self.daily[i]
v = aggr[key]
if aggr.time_start > t2:
continue
x = x0 + (aggr.time_start - t1) * hscale
w = (aggr.time_stop - aggr.time_start) * hscale
w = max(w - 1, 1)
ctx.fillRect(x, y0, w, v * vscale)
# Draw per unit
super()._draw_content(ctx, mi, ma, t1, t2, x0, y0, hscale, vscale)
class MonthlyCountPanel(CountPanel):
clr = 250, 200, 0
def _get_min_max(self):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
mi = 0
ma = -9_999_999
self.monthly = monthly = []
prev_month = ""
data = data_per_db[self.dbname]
for i in range(len(data)):
aggr = data[i]
v = aggr[key]
if v is undefined:
continue
month = aggr.time_key[:7]
if month != prev_month:
if len(monthly) > 0:
ma = max(ma, monthly[-1][key])
new_aggr = {"time_start": aggr.time_start, "time_stop": aggr.time_stop}
new_aggr[key] = aggr[key]
monthly.append(new_aggr)
prev_month = month
else:
monthly[-1][key] += v
monthly[-1].time_stop = aggr.time_stop
if len(monthly) > 0:
ma = max(ma, monthly[-1][key])
return mi, ma
def _draw_content(self, ctx, mi, ma, t1, t2, x0, y0, hscale, vscale):
PSCRIPT_OVERLOAD = False # noqa
# Draw monthly
key = self.key
clr = self.clr
ctx.fillStyle = f"rgba({clr[0]}, {clr[1]}, {clr[2]}, 0.4)"
for i in range(len(self.monthly)):
aggr = self.monthly[i]
v = aggr[key]
if aggr.time_start > t2:
continue
x = x0 + (aggr.time_start - t1) * hscale
w = (aggr.time_stop - aggr.time_start) * hscale
w = max(w - 1, 1)
ctx.fillRect(x, y0, w, v * vscale)
# Draw per unit
super()._draw_content(ctx, mi, ma, t1, t2, x0, y0, hscale, vscale)
class NumericalPanel(PlotPanel):
clr = 0, 220, 250
def __init__(self, *args):
super().__init__(*args)
def _get_min_max(self):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
mi = +1e20
ma = -1e20
data = data_per_db[self.dbname]
for i in range(len(data)):
aggr = data[i]
meas = aggr[key]
if meas is undefined or meas.n == 0:
continue
mi = min(mi, meas.min)
ma = max(ma, meas.max)
if ma >= mi:
mi = min(0.8 * ma, mi) # Select a good min point
mi = 0
if self.unit == "%":
mi = 0
ma = max(ma, 100) # percentages can be larger than 100
return mi, ma
def _draw_content(self, ctx, mi, ma, t1, t2, x0, y0, hscale, vscale):
PSCRIPT_OVERLOAD = False # noqa
key = self.key
clr = self.clr
ctx.fillStyle = f"rgba({clr[0]}, {clr[1]}, {clr[2]}, 0.2)"
ctx.strokeStyle = f"rgba({clr[0]}, {clr[1]}, {clr[2]}, 1.0)"
data = data_per_db[self.dbname]
mean_points = []
for i in range(len(data)):
aggr = data[i]
meas = aggr[key]
if meas is undefined or meas.n == 0:
continue
if aggr.time_start > t2:
continue
x = x0 + (aggr.time_start - t1) * hscale
w = (aggr.time_stop - aggr.time_start) * hscale
w = max(w, 1)
# Draw rectangle for min max
y = y0 + (meas.min - mi) * vscale
h = (meas.max - meas.min) * vscale
ctx.fillRect(x, y, w, h)
# Draw rectangle for std
mean = meas.mean
std = (meas.magic / meas.n) ** 0.5 # Welford
st1 = max(meas.min, mean - std)
st2 = min(meas.max, mean + std)
y = y0 + (st1 - mi) * vscale
h = (st2 - st1) * vscale
ctx.fillRect(x, y, w, h)
y = y0 + (mean - mi) * vscale
mean_points.append((x + 0.3333 * w, y))
mean_points.append((x + 0.6666 * w, y))
# Draw mean
if len(mean_points) > 0:
ctx.beginPath()
ctx.moveTo(mean_points[0], mean_points[1])
for x, y in mean_points:
ctx.lineTo(x, y)
ctx.stroke()
window.addEventListener("load", on_init)
window.addEventListener("resize", on_resize)
window.addEventListener("hashchange", on_hash_change)
|
<<<<<<< HEAD
print("Hello Git!")
=======
print ("hello")
print ("Bye")
print ("Step 14 of task one")
>>>>>>> master
|
from rest_framework import permissions, serializers, routers
from rest_framework import generics, mixins, viewsets
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import action
from ..my_models import poetry
import logging
logger = logging.getLogger(__name__)
class PoetryViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
queryset = poetry.Poetry.objects.all().order_by('-id')
serializer_class = poetry.PoetrySerializers
class PoetryCommnetViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
#queryset = poetry.PoetryComment.objects.all().filter(owner=self.request.user).order_by('-id')
serializer_class = poetry.PoetryCommentSerializers
@action(methods=['GET'],detail=False)
def poetry_comments(self, request):
user = request.user
logger.info("username : "+ user.username + str(user.id) )
id = request.query_params["poetry_id"]
self.queryset = poetry.PoetryComment.objects.all().filter(poetry_id=id,owner=user).order_by('-id')
#serializer = self.get_serializer(self.queryset, many=True)
page = self.paginate_queryset(self.queryset)
if page is not None:
logger.info(page)
serializer = self.get_serializer(page, many=True)
logger.debug("章节序列化结果"+str(serializer.data) )
return self.get_paginated_response(serializer.data)
else :
serializer = self.get_serializer(queryset, many=True)
logger.debug("章节序列化结果"+str(serializer.data) )
return Response(serializer.data)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
def get_queryset(self) :
user = self.request.user
logger.info("Current User: " + user)
return poetry.PoetryComment.objects.all().filter(owner=user).order_by('-id')
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 31 10:17:39 2019
@author: elif.ayvali
"""
import scipy.io as sio
import numpy as np
import vtk
delta_t=0.5
class VTK_Tools:
def addSTL(filename, color,opacity):
STL_reader = vtk.vtkSTLReader()
STL_reader.SetFileName(filename)
STL_normals = vtk.vtkPolyDataNormals()
STL_normals.SetFeatureAngle(160)
STL_normals.SetInputConnection(STL_reader.GetOutputPort())
mapperSTLscope = vtk.vtkPolyDataMapper()
mapperSTLscope.SetInputConnection(STL_normals.GetOutputPort())
STLActor = vtk.vtkActor()
STLActor.SetMapper(mapperSTLscope)
STLActor.GetProperty().SetColor(color) # (R,G,B)
STLActor.GetProperty().SetOpacity(opacity)
STLActor.GetProperty().SetInterpolationToGouraud()
return STLActor
def addAxesMarker(xfm,scalex,scaley,scalez) :
""" Place axes orientation marker in the scene, transformed by given by vtkTransform
"""
axesActor = vtk.vtkAxesActor()
axesActor.SetTotalLength(scalex,scaley,scalez)
axesActor.SetShaftTypeToCylinder()
axesActor.AxisLabelsOff()
axesActor.SetUserTransform(xfm)
return axesActor
def get_text(text, position_view_port, font_size, color, opacity, bold_flag):
text_actor = vtk.vtkTextActor()
text_actor.SetInput(text)
text_actor.GetTextProperty().SetColor(color)
text_actor.GetTextProperty().SetBold(bold_flag)
text_actor.GetTextProperty().SetFontSize(font_size)
text_actor.GetTextProperty().SetOpacity(opacity)
text_actor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
text_actor.SetPosition(position_view_port)
return text_actor
class CameraData(object):
__slots__ = ('position', 'focal_point', 'view_up')
def __init__(self, position, focal_point, view_up):
self.position = position
self.focal_point = focal_point
self.view_up = view_up
def rotmat_to_vtk(mat):
xfm = vtk.vtkTransform()
xfm.PostMultiply()
xfm.Identity()
cnt=0
Rot = vtk.vtkMatrix4x4()
Rot.Identity()
for j in range(3):
for i in range(3):
Rot.SetElement(i, j, mat[cnt])
cnt+=1
xfm.SetMatrix(Rot)
return xfm
class vtkTimerCallback():
def __init__(self):
self.counter = 0 #start from data index
self.play_rate=1
print('Started Rendering')
def execute(self,obj,event):
#get actor handles
STLActor_mario = self.actors[0]
STLActor_mario2 = self.actors[1]
STLActor_mario3 = self.actors[2]
STLActor_mario4 = self.actors[3]
display_time = self.actors[4]
#get renderer handles
ren = self.renderers[0]
ren2 = self.renderers[1]
ren3 = self.renderers[2]
ren4 = self.renderers[3]
#update rotation
rot = self.data[0][self.counter,:]
rot2 = self.data[1][self.counter,:]
rot3 = self.data[2][self.counter,:]
rot4 = self.data[3][self.counter,:]
xfm=VTK_Tools.rotmat_to_vtk(rot)
# print('rot1',xfm.GetMatrix())
STLActor_mario.SetUserTransform(xfm)
xfm2=VTK_Tools.rotmat_to_vtk(rot2)
# print('rot2',xfm2.GetMatrix())
STLActor_mario2.SetUserTransform(xfm2)
xfm3=VTK_Tools.rotmat_to_vtk(rot3)
# print('rot3',xfm3.GetMatrix())
STLActor_mario3.SetUserTransform(xfm3)
xfm4=VTK_Tools.rotmat_to_vtk(rot4)
# print('rot4',xfm4.GetMatrix())
STLActor_mario4.SetUserTransform(xfm4)
display_text=' Time elapsed(s): {0:1.1f}\n '.format(float(delta_t*self.counter))
display_time.SetInput(display_text)
ren.ResetCameraClippingRange()
ren2.ResetCameraClippingRange()
ren3.ResetCameraClippingRange()
ren4.ResetCameraClippingRange()
iren = obj
iren.GetRenderWindow().Render()
self.counter += self.play_rate
def main(data_file_name, stl_name):
Data_mat = sio.loadmat(data_file_name)
R_exp=Data_mat['R_exp']#nx4
R_from_nonunit_q_RK4=Data_mat['R_from_nonunit_q_RK4']#nx4
R_from_q_exp=Data_mat['R_from_q_exp']#nx9
R_rk4=Data_mat['R_rk4']#nx9
rot_seq=R_rk4
rot_seq2=R_exp
rot_seq3=R_from_nonunit_q_RK4
rot_seq4=R_from_q_exp
xfm_mario = vtk.vtkTransform()
xfm_mario.Identity()
xfm_origin= vtk.vtkTransform()
xfm_origin.Identity()
axes_origin_actor=VTK_Tools.addAxesMarker(xfm_origin,100,100,100)
#Import Mario STL
STLActor_mario=VTK_Tools.addSTL(filename= stl_name,color=[1,1,1],opacity=1)
STLActor_mario.SetUserTransform(xfm_mario)
display_flag1 = VTK_Tools.get_text( 'R RK4 integration', position_view_port=[0.40, 0.10], font_size=18, color=[1, 1, 0], opacity=1, bold_flag=1)
STLActor_mario2=VTK_Tools.addSTL(filename= stl_name,color=[1,1,1],opacity=1)
STLActor_mario2.SetUserTransform(xfm_mario)
display_flag2 = VTK_Tools.get_text( 'Exponential matrix update', position_view_port=[0.40, 0.10], font_size=18, color=[1, 1, 0], opacity=1, bold_flag=1)
STLActor_mario3=VTK_Tools.addSTL(filename= stl_name,color=[1,1,1],opacity=1)
STLActor_mario3.SetUserTransform(xfm_mario)
display_flag3 = VTK_Tools.get_text( 'Nonunit q RK4 integration', position_view_port=[0.40, 0.10], font_size=18, color=[1, 1, 0], opacity=1, bold_flag=1)
STLActor_mario4=VTK_Tools.addSTL(filename= stl_name,color=[1,1,1],opacity=1)
STLActor_mario4.SetUserTransform(xfm_mario)
display_flag4 = VTK_Tools.get_text( 'Exponential quaternion update', position_view_port=[0.40, 0.10], font_size=18, color=[1, 1, 0], opacity=1, bold_flag=1)
display_time = VTK_Tools.get_text( '', position_view_port=[0.40, 0.90], font_size=18, color=[1, 1, 0], opacity=1, bold_flag=1)
#--------------Initialize World Renderer------------------#
ren = vtk.vtkRenderer()
ren.SetBackground(0.14,0.14,0.14)
ren.SetViewport(0.0, 0.0, 0.5, 0.5) # left bottom quadrant
ren.GradientBackgroundOn();
ren.AddActor(STLActor_mario)
ren.AddActor(axes_origin_actor)
ren.AddActor(display_flag1)
ren.AddActor(display_time)
ren2 = vtk.vtkRenderer()
ren2.SetBackground(0.14,0.14,0.14)
ren2.SetViewport(0.5, 0.0, 1.0, 0.5) # right bottom quadrant
ren2.GradientBackgroundOn();
ren2.AddActor(STLActor_mario2)
ren2.AddActor(axes_origin_actor)
ren2.AddActor(display_flag2)
ren3 = vtk.vtkRenderer()
ren3.SetBackground(0.14,0.14,0.14)
ren3.SetViewport(0.0, 0.5, 0.5, 1.0) # left top quadrant
ren3.GradientBackgroundOn();
ren3.AddActor(STLActor_mario3)
ren3.AddActor(axes_origin_actor)
ren3.AddActor(display_flag3)
ren4 = vtk.vtkRenderer()
ren4.SetBackground(0.14,0.14,0.14)
ren4.SetViewport(0.5, 0.5, 1.0, 1.0) # right top quadrant
ren4.GradientBackgroundOn();
ren4.AddActor(STLActor_mario4)
ren4.AddActor(axes_origin_actor)
ren4.AddActor(display_flag4)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.AddRenderer(ren2)
renWin.AddRenderer(ren3)
renWin.AddRenderer(ren4)
# Create a renderwindowinteractor#
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
#Enable user interface interactor
iren.Initialize()
#variables to pass into the callback class cb
actorCollection = []
actorCollection.append(STLActor_mario)
actorCollection.append(STLActor_mario2)
actorCollection.append(STLActor_mario3)
actorCollection.append(STLActor_mario4)
actorCollection.append(display_time)
rendererCollection=[]
rendererCollection.append(ren)
rendererCollection.append(ren2)
rendererCollection.append(ren3)
rendererCollection.append(ren4)
dataCollection=[]
dataCollection.append(rot_seq)
dataCollection.append(rot_seq2)
dataCollection.append(rot_seq3)
dataCollection.append(rot_seq4)
# Sign up to receive TimerEvent
cb = vtkTimerCallback()
cb.actors=actorCollection;
cb.data=dataCollection;
#cb.cameras=cameraCollection;
cb.renderers=rendererCollection;
iren.AddObserver('TimerEvent', cb.execute)
iren.CreateRepeatingTimer(500);
#start the interaction and timer
iren.Start()
iren.DestroyTimer()
if __name__ == '__main__':
main(data_file_name='test_data.mat', stl_name = 'mario.stl')
|
from selenium import webdriver
from time import sleep
from smtplib import SMTP
from credentials import email, password
class Coronavirus():
def __init__(self):
self.driver = webdriver.Chrome()
def get_data(self):
country_element = "India"
sleep(4)
search_field = self.driver.find_element_by_xpath('//*[@id="main_table_countries_today_filter"]/label/input')
sleep(2)
search_field.send_keys(country_element)
row = self.driver.find_element_by_xpath('//*[@id="main_table_countries_today"]/tbody[1]/tr')
data = row.text.split(" ")
total_cases = data[1]
new_cases = data[2]
total_deaths = data[3]
new_deaths = data[4]
total_recovered = data[5]
active_cases = data[6]
server = SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(email, password)
subject = 'Coronavirus stats in your country today!'
body = 'Today in ' + country_element + '\
\nThere is new data on coronavirus:\
\nTotal cases: ' + total_cases +'\
\nNew cases: ' + new_cases + '\
\nTotal deaths: ' + total_deaths + '\
\nNew deaths: ' + new_deaths + '\
\nActive cases: ' + active_cases + '\
\nTotal recovered: ' + total_recovered + '\
\nCheck the link: https://www.worldometers.info/coronavirus/'
msg = f"Subject: {subject}\n\n{body}"
server.sendmail('Coronavirus',email,msg)
print('Hey Email has been sent!')
server.quit()
bot = Coronavirus()
bot.driver.get('https://www.worldometers.info/coronavirus/')
bot.get_data()
|
# import sys
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import ConfigParser
import time
import datetime
import os
# import requests
# import re
# from contextlib import closing
import subprocess
# from selenium.common.exceptions import NoSuchElementException
class episodeList:
def main(self):
print "Started at "+datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
section = "Kissanime"
config = self.getConfig()
user = config.get(section, "username")
passwrd = config.get(section, "password")
length = config.get(section, "length")
qlty = config.get(section, "quality")
IDM_path = config.get(section, "IDM_location")
chromeDriverLoc = config.get(section, "chromedriver_location")
#os.environ["webdriver.chrome.driver"] = chromeDriverLoc
browser = webdriver.Chrome(chromeDriverLoc) # Get local session of chrome
browser.get("https://kissanime.to/Login") # Load page
os.chdir(IDM_path)
time.sleep(10) # Let the page load, will be added to the API
assert "Login" in browser.title
userName = browser.find_element_by_name("username") # Find the username box
userName.send_keys(user)
passWord = browser.find_element_by_name("password")
passWord.send_keys(passwrd + Keys.RETURN)
print datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S') + " Logged in"
found = "Not found"
while found == "Not found":
keyword = raw_input("Enter search string: ")
search = browser.find_element_by_id("keyword")
search.send_keys(keyword + Keys.RETURN)
time.sleep(5)
dictUrl = {}
if "Find anime" in browser.title:
i = 3
found = browser.find_elements_by_xpath('//*[@id="leftside"]/div/div[2]/div[2]')[0].text
while i > 2 and found != "Not found":
try:
table = browser.find_element_by_xpath('//*[@id="leftside"]/div/div[2]/div[2]/table/tbody/tr['+str(i)+']/td[1]/a')
dictUrl[i-2] = table.get_attribute("href")
i += 1
except:
break
if found != "Not found":
for k in dictUrl:
print str(k)+" : "+dictUrl[k]
k = raw_input("Enter Selection: ")
url = dictUrl[int(k)]
browser.get(url)
time.sleep(3)
else:
print found
else:
break
browser.get(browser.find_element_by_xpath("/html/body/div[1]/div[4]/div[1]/div[2]/div[2]/div[2]/table/tbody/tr[last()]/td[1]/a").get_attribute("href"))
linksList = []
print datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S') + " Started retrieving links"
j = 1
while j <= length:
try:
HD = browser.find_element_by_xpath(".//div[@id='divDownload']/a["+qlty+"]")
except:
print "Can't find the required quality for "+browser.title()
continue
name = browser.title.replace(" ", "%20")
link = HD.get_attribute("href")+"&title="+name
linksList.append(link)
subprocess.call(["IDMan.exe", '/d', link, '/a'])
if j == 1:
subprocess.call(["IDMan.exe", '/s'])
j += 1
try:
next = browser.find_element_by_id("btnNext")
next.click()
except:
print "Reached end of series."
break
browser.close()
print datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S') + " Done retrieving links and adding to IDM queue."
def getConfig(self):
configUtil = ConfigUtil('./script.cfg')
return configUtil.getAllProperties()
class ConfigUtil:
configFile = "script.cfg"
def __init__(self, file):
self.configFile = file
def getProperty(self, section, key):
config = self.getAllProperties()
val = config.get(section, key)
return val
def getAllProperties(self):
config = ConfigParser.ConfigParser()
config.read(self.configFile)
return config
if __name__ == '__main__':
episodeList().main()
'''with open(fileLoc, 'a') as links:
for i in linksList:
links.write(i)
links.write('\n')
print i
name = re.search(r"&title=(.*)", "%s" % i).group(1)
name = name.replace("%20", " ")
#print name
#print "get url"
with closing(requests.get(i, stream=True)) as r:
#r = requests.get(i,stream=True)
#print r
video = open(name+".mp4", 'wb')
#print "start"
k = 1
for chunk in r.iter_content(chunk_size=1024*1024):
if chunk:
print k
video.write(chunk)
k += 1
print "end"
video.close()'''
|
"""
WRITTEN BY : Mahnatse, Yanga, Allen, Sibusiso, Thabo and Zweli
DATE : 13 November 2019
COHORT : C17 Data Science
COHORT MANAGER : Singi
GROUP LEADER : Zweli
"""
#------------------------------------------------------------------------------------
"""
PROBLEM
Given two lists a and b, write a function that returns true if any
of the numbers in list a is divisible by all numbers in list b. Divisible in
for an example, given a = [1, 2, 3] and b = [2 , 1], the function would return True
since one of the elements of list a, which is 2 is divisible by all elements in list b.
"""
def divides_each_element(a,b):
for i in range(len(a)):
#the truth list stores True and False values based on whether the number we are currently
# dividing in list a is divisible or not divisible by the number in list b
truth_list = []
n = a[i]
for j in range(len(b)):
if n % b[j] == 0:
truth_list.append(True)
else:
if n % b[j] != 0:
truth_list.append(False)
# Here we are checking if the truth list contains only True values
# if that is the case, we return True immediately because it means
# the number n in a is divisible by all numbers in b.
if truth_list.count(False) == 0:
return True
#if this line gets executed or reached, it means
# none of the numbers in a are divisible by all the numbers in b
return False
# You are more than welcome to modify the two lists we created
# to do your own tests.
ls1 = [3,5,7,9,12]
ls2 = [2, 4, 6, 12]
print(divides_each_element(ls1, ls2))
|
"""
Parsing with PEGs.
"""
import re
# Glossary:
# peg object representing a parsing expression
# p, q peg
# s subject sequence. Usually a string, but only match() assumes that.
# i position in subject sequence
# far box holding the rightmost i reached so far
# (except during negative matching with invert())
# vals values tuple
# st the state: an (i, vals) pair
# fn function (not a peg)
# A peg's run() function does the work. It takes (s, far, st) and
# returns a list of states, of length 0 or 1: i.e. either [] or
# [st]. (A more general kind of parser could return a list of any
# number of states, enumerated lazily; but that'd change our model
# both semantically (in (P|Q), Q can assume P didn't match) and in
# performance. We use a list anyway because it's convenient to code
# with list comprehensions.)
def Peg(x):
"""Make a peg from a Python value as appropriate for its type. For
a string, that's a literal matcher; for a function it's a feed
action (transform the current values tuple)."""
if isinstance(x, _Peg): return x
if isinstance(x, (str, unicode)): return literal(x)
if callable(x): return feed(x)
raise ValueError("Not a Peg", x)
def maybe(p):
"Return a peg matching 0 or 1 of what p matches."
return label(either(p, empty),
'(%r)?', p)
def plus(p):
"Return a peg matching 1 or more of what p matches."
return label(chain(p, star(p)),
'(%r)+', p)
def star(p):
"Return a peg matching 0 or more of what p matches."
return label(recur(lambda p_star: maybe(chain(p, p_star))),
'(%r)*', p)
def invert(p):
"Return a peg that succeeds just when p fails."
return _Peg(('~(%r)', p),
lambda s, far, st: [] if p.run(s, [0], st) else [st])
class _Peg(object):
"""A parsing expression. It can match a prefix of a sequence,
updating a values tuple in the process, or fail."""
def __init__(self, face, run):
self.face = face
self.run = run
def __repr__(self):
if isinstance(self.face, (str, unicode)): return self.face
if isinstance(self.face, tuple): return self.face[0] % self.face[1:]
assert False, "Bad face"
def __call__(self, sequence):
"""Parse a prefix of sequence and return a tuple of values, or
raise Unparsable."""
far = [0]
for _, vals in self.run(sequence, far, (0, ())):
return vals
raise Unparsable(self, sequence[:far[0]], sequence[far[0]:])
def attempt(self, sequence):
"Parse a prefix of sequence and return a tuple of values or None."
try: return self(sequence)
except Unparsable: return None
def __add__(self, other): return chain(self, Peg(other))
def __radd__(self, other): return chain(Peg(other), self)
def __or__(self, other): return either(self, Peg(other))
def __ror__(self, other): return either(Peg(other), self)
def __rshift__(self, fn): return label(seclude(chain(self, Peg(fn))),
'(%r>>%s)', self, _fn_name(fn))
__invert__ = invert
maybe = maybe
plus = plus
star = star
def _fn_name(fn):
return fn.func_name if hasattr(fn, 'func_name') else repr(fn)
class Unparsable(Exception):
"A parsing failure."
@property
def position(self):
"The rightmost position positively reached in the parse attempt."
return len(self.args[1])
@property
def failure(self): # XXX rename?
"Return slices of the input before and after the parse failure."
return self.args[1], self.args[2]
def label(p, string, *args):
"""Return an equivalent peg whose repr is (string % args), or just
string if no args."""
return _Peg(((string,) + args if args else string),
p.run)
def recur(fn):
"Return a peg p such that p = fn(p). This is like the Y combinator."
p = delay(lambda: fn(p), 'recur(%s)', _fn_name(fn))
return p
def delay(thunk, *face): # XXX document face
"""Precondition: thunk() will return a peg p. We immediately
return a peg q equivalent to that future p, but we'll call thunk()
only once, and not until the first use of q. Use this for
recursive grammars."""
def run(s, far, st):
q.run = Peg(thunk()).run
return q.run(s, far, st)
q = _Peg(face or ('delay(%s)', _fn_name(thunk)),
run)
return q
# TODO: need doc comments or something
fail = _Peg('fail', lambda s, far, st: [])
empty = label(~fail, 'empty')
position = _Peg('position', lambda s, far, (i, vals): [(i, vals + (i,))])
def literal(string):
"Return a peg that matches string exactly."
return label(match(re.escape(string)),
'literal(%r)', string)
def match(regex):
"""Return a peg that matches what regex does, adding any captures
to the values tuple."""
compiled = re.compile(regex)
return _Peg(('/%s/', regex),
lambda s, far, (i, vals):
[(_step(far, m.end()), vals + m.groups())
for m in [compiled.match(s, i)] if m])
def _step(far, i):
"Update far with a new position."
far[0] = max(far[0], i)
return i
def capture(p):
"""Return a peg that acts like p, except it adds to the values
tuple the text that p matched."""
return _Peg(('capture(%r)', p),
lambda s, far, (i, vals):
[(i2, vals2 + (s[i:i2],))
for i2, vals2 in p.run(s, far, (i, vals))])
def seclude(p):
"""Return a peg like p, but where p doesn't get to see or alter
the incoming values tuple."""
return _Peg(('[%r]', p),
lambda s, far, (i, vals):
[(i2, vals + vals2)
for i2, vals2 in p.run(s, far, (i, ()))])
def either(p, q):
"""Return a peg that succeeds just when one of p or q does, trying
them in that order."""
return _Peg(('(%r|%r)', p, q),
lambda s, far, st:
p.run(s, far, st) or q.run(s, far, st))
def chain(p, q):
"""Return a peg that succeeds when p and q both do, with q
starting where p left off."""
return _Peg(('(%r %r)', p, q),
lambda s, far, st:
[st3
for st2 in p.run(s, far, st)
for st3 in q.run(s, far, st2)])
def alter(fn): # XXX better name
"""Return a peg that always succeeds, changing the values tuple
from xs to fn(*xs)."""
return _Peg(('alter(%s)', _fn_name(fn)),
lambda s, far, (i, vals): [(i, fn(*vals))]) # XXX check that result is tuple?
def feed(fn):
"""Return a peg that always succeeds, changing the values tuple
from xs to (fn(*xs),). (We're feeding fn with the values.)"""
return label(alter(lambda *vals: (fn(*vals),)),
':%s', _fn_name(fn))
def push(c):
return label(alter(lambda *xs: xs + (c,)),
'push(%r)' % (c,),)
# Some often-useful actions for feed().
def hug(*vals):
"Make one tuple out of any number of arguments."
return vals
def join(*strs):
"Make one string out of any number of string arguments."
return ''.join(strs)
# Alternative: non-regex basic matchers, good for non-string inputs.
def one_that(ok):
"""Return a peg that eats the first element x of the input, if it
exists and if ok(x). It leaves the values tuple unchanged.
(N.B. the input can be a non-string: anything accessible by
index.)"""
def run(s, far, (i, vals)):
try: item = s[i]
except IndexError: return []
return [(_step(far, i+1), vals)] if ok(item) else []
return _Peg(('one_that(%s)', _fn_name(ok)), run)
def one_of(item):
"Return a peg that eats one element equal to the argument."
return label(one_that(lambda x: item == x),
'one_of(%r)', item)
anyone = label(one_that(lambda x: True), 'anyone')
# Non-strings can include nested sequences:
def nest(p):
"Return a peg that eats one item, a sequence that p eats a prefix of."
def run(s, far, (i, vals)):
try: item = s[i]
except IndexError: return []
if not _is_indexable(item): return []
return [(_step(far, i+1), vals1)
for _, vals1 in p.run(item, [0], (0, vals))]
return _Peg(('nest(%r)', p), run)
def _is_indexable(x):
try: x[0]
except TypeError: return False
except KeyError: return False
except IndexError: return True
return True
## (nest(one_of(1)) + one_of(5)).attempt([1, 5])
## (nest(one_of(1)) + one_of(5)).attempt([[1], 5])
#. ()
# Build pegs from a string representation of a grammar.
def Grammar(string):
"""XXX doc comment
Contrived example:
>>> g = Grammar(r"a = 'x'|b. b = !:p /regex/. # comment")(p=fail)
>>> g.a('x')
()
"""
skeletons = _parse_grammar(string)
def bind(**subs): # subs = substitutions
rules = {}
for rule, (_,f) in skeletons:
rules[rule] = label(f(rules, subs), rule)
# XXX warn about unresolved :foo interpolations at this point?
return _Struct(**rules)
return bind
class _Struct(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def _parse_grammar(string):
try:
skeletons = _grammar_grammar(string)
except Unparsable, e:
raise GrammarError("Bad grammar", e.failure)
lhses = [L for L, R in skeletons]
all_refs = set().union(*[refs for L, (refs,_) in skeletons])
undefined = sorted(all_refs - set(lhses))
if undefined:
raise GrammarError("Undefined rules: %s" % ', '.join(undefined))
dups = sorted(L for L in set(lhses) if 1 != lhses.count(L))
if dups:
raise GrammarError("Multiply-defined rules: %s" % ', '.join(dups))
return skeletons
class GrammarError(Exception): pass
_builtins = __builtins__ if isinstance(__builtins__, dict) else __builtins__.__dict__
_default_subs = dict((k, feed(v))
for k, v in _builtins.items() if callable(v))
_default_subs.update(dict(hug=feed(hug), join=feed(join), position=position))
def _make_grammar_grammar():
def mk_rule_ref(name):
return (set([name]),
lambda rules, subs: delay(lambda: rules[name], name))
def constant(peg): return (set(), lambda rules, subs: peg)
def lift(peg_op):
return lambda *lifted: (
set().union(*[refs for refs,_ in lifted]),
lambda rules, subs: peg_op(*[f(rules, subs) for _,f in lifted])
)
unquote = lambda name: (set(), lambda rules, subs: Peg(subs.get(name)
or _default_subs[name]))
mk_literal = lambda string: constant(literal(string))
mk_push_lit = lambda string: constant(push(string))
mk_match = lambda *cs: constant(match(''.join(cs)))
_ = match(r'(?:\s|#[^\n]*\n?)*') # Whitespace and comments
name = match(r'([A-Za-z_]\w*)') +_
word = match(r'(\w+)') +_
regex_char = match(r'(\\.|[^/])')
quoted_char = match(r'\\(.)') | match(r"([^'])")
qstring = "'" + quoted_char.star() + "'" +_ >> join
pe = seclude(delay(lambda:
term + ('|' +_+ pe + lift(either)).maybe()
| lift(lambda: empty)))
term = seclude(delay(lambda:
factor + (term + lift(chain)).maybe()))
factor = seclude(delay(lambda:
'!' +_+ factor + lift(invert)
| primary + ( '*' +_+ lift(star)
| '+' +_+ lift(plus)
| '?' +_+ lift(maybe)
).maybe()))
primary = ('(' +_+ pe + ')' +_
| '[' +_+ pe + ']' +_ >> lift(seclude)
| '{' +_+ pe + '}' +_ >> lift(capture)
| qstring >> mk_literal
| '/' + regex_char.star() + '/' +_ >> mk_match
| ':' +_+ ( word >> unquote
| qstring >> mk_push_lit)
| name >> mk_rule_ref)
rule = seclude(
name + ('=' +_+ pe
| ':' +_+ (pe >> lift(seclude)))
+ '.' +_ + hug)
grammar = _+ rule.plus() + ~anyone
return grammar
_grammar_grammar = _make_grammar_grammar()
# Smoke test: combinators
## empty
#. empty
## fail.attempt('hello')
## empty('hello')
#. ()
## match(r'(x)').attempt('hello')
## match(r'(h)')('hello')
#. ('h',)
## (match(r'(H)') | match('(.)'))('hello')
#. ('h',)
## (match(r'(h)') + match('(.)'))('hello')
#. ('h', 'e')
## (match(r'h(e)') + match(r'(.)'))('hello')
#. ('e', 'l')
## (~match(r'h(e)') + match(r'(.)'))('xhello')
#. ('x',)
## empty.run('', [0], (0, ()))
#. [(0, ())]
## chain(empty, empty)('')
#. ()
## (match(r'(.)') >> hug)('hello')
#. (('h',),)
## match(r'(.)').star()('')
#. ()
## (match(r'(.)').star())('hello')
#. ('h', 'e', 'l', 'l', 'o')
## (match(r'(.)').star() >> join)('hello')
#. ('hello',)
# Example
def make_var(v): return v
def make_lam(v, e): return '(lambda (%s) %s)' % (v, e)
def make_app(e1, e2): return '(%s %s)' % (e1, e2)
def make_let(v, e1, e2): return '(let ((%s %s)) %s)' % (v, e1, e2)
eof = match(r'$')
_ = match(r'\s*')
identifier = match(r'([A-Za-z_]\w*)\s*')
def test1():
V = identifier
E = delay(lambda:
V >> make_var
| '\\' +_+ V + '.' +_+ E >> make_lam
| '(' +_+ E + E + ')' +_ >> make_app)
start = _+ E #+ eof
return lambda s: start(s)[0]
## test1()('x y')
#. 'x'
## test1()(r'\x.x')
#. '(lambda (x) x)'
## test1()('(x x)')
#. '(x x)'
def test2(string):
V = identifier
F = delay(lambda:
V >> make_var
| '\\' +_+ V.plus() + hug + '.' +_+ E >> fold_lam
| '(' +_+ E + ')' +_)
E = F + F.star() >> fold_app
start = _+ E
vals = start.attempt(string)
return vals and vals[0]
def fold_app(f, *fs): return reduce(make_app, fs, f)
def fold_lam(vp, e): return foldr(make_lam, e, vp)
def foldr(f, z, xs):
for x in reversed(xs):
z = f(x, z)
return z
## test2('x')
#. 'x'
## test2('\\x.x')
#. '(lambda (x) x)'
## test2('(x x)')
#. '(x x)'
## test2('hello')
#. 'hello'
## test2(' x')
#. 'x'
## test2('\\x . y ')
#. '(lambda (x) y)'
## test2('((hello world))')
#. '(hello world)'
## test2(' hello ')
#. 'hello'
## test2('hello there hi')
#. '((hello there) hi)'
## test2('a b c d e')
#. '((((a b) c) d) e)'
## test2('')
## test2('x x . y')
#. '(x x)'
## test2('\\.x')
## test2('(when (in the)')
## test2('((when (in the)))')
#. '(when (in the))'
## test2('\\a.a')
#. '(lambda (a) a)'
## test2(' \\hello . (hello)x \t')
#. '(lambda (hello) (hello x))'
## test2('\\M . (\\f . M (f f)) (\\f . M (f f))')
#. '(lambda (M) ((lambda (f) (M (f f))) (lambda (f) (M (f f)))))'
## test2('\\a b.a')
#. '(lambda (a) (lambda (b) a))'
## test2('\\a b c . a b')
#. '(lambda (a) (lambda (b) (lambda (c) (a b))))'
# Smoke test: grammars
def exceptionally(thunk):
try: return thunk()
except Exception, e: return e
## exceptionally(lambda: Grammar(r"a = . b = a. a = .")())
#. GrammarError('Multiply-defined rules: a',)
## exceptionally(lambda: Grammar(r"a = b|c|d. c = .")())
#. GrammarError('Undefined rules: b, d',)
## exceptionally(lambda: Grammar(r"a = ")())
#. GrammarError('Bad grammar', ('a = ', ''))
pushy = Grammar(r"""
main: :'x'.
""")()
## pushy.main('')
#. ('x',)
nums = Grammar(r"""
# This is a comment.
main : nums !/./. # So's this.
nums : (num (',' num)*)?.
num : /([0-9]+)/ :int.
""")()
sum_nums = lambda s: sum(nums.main(s))
## sum_nums('10,30,43')
#. 83
## nums.nums('10,30,43')
#. (10, 30, 43)
## nums.nums('')
#. ()
## nums.num('10,30,43')
#. (10,)
## nums.main('10,30,43')
#. (10, 30, 43)
## nums.main.attempt('10,30,43 xxx')
gsub_grammar = Grammar(r"""
gsub = [:p :replace | /(.)/]*.
""")
def gsub(text, p, replacement):
g = gsub_grammar(p=p, replace=lambda: replacement)
return ''.join(g.gsub(text))
## gsub('hi there WHEEWHEE to you WHEEEE', 'WHEE', 'GLARG')
#. 'hi there GLARGGLARG to you GLARGEE'
|
import game_framework
from game_object import *
class Ball(GameObject):
PPS = 100
def __init__(self, x, y, dx, dy):
super(Ball, self).__init__()
self.x, self.y = x, y
self.dx, self.dy = dx, dy
self.size = 22
self.w, self.h = 22, 22
self.angle = 1.0
self.speed = 3.0
self.image = self.init_image(Ball, 'ball.png', 3, 4)
def update(self):
self.update_frame()
distance = game_framework.frame_time * Ball.PPS * self.speed
dx = distance * math.cos(self.angle)
dy = distance * math.sin(self.angle)
self.x += dx
self.y += dy
def draw(self):
# print(self.frame, self._count)
self.draw_frame()
def bounceLeft(self):
q = self.angle // (math.pi / 2)
ret = False
if q == 0:
self.angle = math.pi - self.angle
ret = True
if q == 3:
a = self.angle
self.angle = 3 * math.pi - self.angle
ret = True
# print('bl,q3', a, '->', self.angle)
return ret
def bounceRight(self):
q = self.angle // (math.pi / 2)
ret = False
if q == 1:
self.angle = math.pi - self.angle
ret = True
if q == 2:
self.angle = 3 * math.pi - self.angle
ret = True
return ret
def bounceUp(self):
# print('bup', self.angle)
q = self.angle // (math.pi / 2)
ret = False
if q == 2 or q == 3:
self.bounceVert()
ret = True
return ret
def bounceDown(self):
q = self.angle // (math.pi / 2)
ret = False
if q == 0 or q == 1:
self.bounceVert()
ret = True
return ret
def bounceVert(self):
a = self.angle
self.angle = 2 * math.pi - self.angle
# print(a, '->', self.angle)
def bounceHorz(self):
c = self.angle // (math.pi)
m = 1 if c == 0 else 3
self.angle = m * math.pi - self.angle
|
import json
import random
import os
'''a=os.path.isfile("arr.json")
if a:
myfile = open("arr.json")
data = myfile.read()
with open("arr.json","r") as f:
data = json.load(f)
length = len(data)
for key in data:
data[key] = key + str(random.uniform(0,20))
with open("arr.json","w") as f:
json.dump(data,f)
else:
print("not find file")
'''
def notPrime(n):
L = [i * j for i in range(2,50) for j in range(2,50)]
if n in L:
return False
return True
print filter(notPrime,range(1,101))
import math
'''def notPr(n):
if n<=1:
return False
m = int(math.sqrt(n))+1
for i in range(2,m):
if n%i==0:
return False
return True
#注意return 对齐的位置
print filter(notPr,range(2,101))'''
|
#!/usr/bin/env python
import os
import pycurl
import sys
import time
class LCGDM974(object):
"""
Test for https://its.cern.ch/jira/browse/LCGDM-974
"""
def _header(self, header):
splitted = header.split(':', 1)
if len(splitted) == 2:
self.received_headers[splitted[0]] = splitted[1].strip()
return len(header)
def _setupCurl(self):
curl = pycurl.Curl()
curl.setopt(pycurl.SSLCERT, self.ucert)
curl.setopt(pycurl.SSLKEY, self.ukey)
curl.setopt(pycurl.SSL_VERIFYPEER, False)
curl.setopt(pycurl.CAINFO, self.ucert)
curl.setopt(pycurl.CAPATH, self.capath)
curl.setopt(pycurl.FOLLOWLOCATION, 1)
curl.setopt(pycurl.WRITEDATA, sys.stderr)
curl.setopt(pycurl.HEADERFUNCTION, self._header)
return curl
def _fillTestFile(self):
f = open(self.test_file, 'w')
print >>f, "LCGDM974 " * 10
del f
def __init__(self, dpm, home, ucert, ukey, capath):
self.dpm_host = dpm
self.home = home
self.ucert = ucert
self.ukey = ukey
self.capath = capath
self.received_headers = {}
self.curl = self._setupCurl()
self.test_file = '/tmp/LCGDM974'
self._fillTestFile()
def _verifyKeepAlive(self):
# The idea here is: when doing a HEAD/GET on a directory
# without the last slash, DAV implementations will redirect to
# the same file with the slash.
# If Keep-Alive is disabled, that will be two connections
url = "https://%s/%s" % (self.dpm_host, self.home)
self.curl.setopt(pycurl.URL, url)
self.curl.setopt(pycurl.NOBODY, True)
status = self.curl.perform()
if status is not None:
return self.curl.getinfo(pycurl.RESPONSE_CODE,
"Could not verify Keep-Alive: " + status[1])
nredirects = self.curl.getinfo(pycurl.REDIRECT_COUNT)
nconnects = self.curl.getinfo(pycurl.NUM_CONNECTS)
if nredirects < 1:
return (0, "The server didn't redirect us. It should have.")
if nconnects > 1:
return (0, "Keep-alive seems to be disabled in the server")
return None
def _put(self, url, handle):
self.curl.setopt(pycurl.URL, url)
self.curl.setopt(pycurl.UPLOAD, True)
self.curl.setopt(pycurl.READDATA, handle)
status = self.curl.perform()
if status is None:
return (self.curl.getinfo(pycurl.RESPONSE_CODE), 'Could not PUT')
else:
return (0, status[1])
def _delete(self, url):
self.curl.setopt(pycurl.URL, url)
self.curl.setopt(pycurl.CUSTOMREQUEST, 'DELETE')
status = self.curl.perform()
if status is None:
return (self.curl.getinfo(pycurl.RESPONSE_CODE), 'Could not DELETE')
else:
return (0, status[1])
def __call__(self):
keepAliveStatus = self._verifyKeepAlive()
if keepAliveStatus is not None:
print "[%03d] %s" % keepAliveStatus
return 1
base_url = "https://%s/%s/" % (self.dpm_host, self.home)
file_put = "%s/LCGDM974.%d" % (base_url, time.time())
finalStatus = 0
try:
print "PUT on %s" % file_put
handle = open(self.test_file, 'r')
putStatus = self._put(file_put, handle)
if putStatus[0] != 201:
raise Exception("[%03d] %s" % putStatus)
if 'Location' not in self.received_headers:
raise Exception('Did not get a Location header after the PUT')
locationHeader = self.received_headers['Location']
print "Second PUT on %s" % locationHeader
handle.seek(0)
secondPutStatus = self._put(locationHeader, handle)
if secondPutStatus[0] != 403:
raise Exception('The second PUT did not fail with 403 (got %d)'
% secondPutStatus[0])
print "Second PUT rejected. Test passed!"
except Exception, e:
print str(e)
finalStatus = 1
finally:
print "DELETE %s" % file_put
delStatus = self._delete(file_put)
if delStatus[0] != 204:
print "[%03d] %s" % delStatus
finalStatus = 1
return finalStatus
def check_environ(envs):
for e in envs:
if e not in os.environ:
print "%s must be defined in the environment" % e
sys.exit(1)
if __name__ == '__main__':
check_environ(['DPM_HOST', 'DPNS_HOME', 'X509_USER_PROXY', 'X509_CERT_DIR'])
test = LCGDM974(os.environ['DPM_HOST'], os.environ['DPNS_HOME'],
os.environ['X509_USER_PROXY'], os.environ['X509_USER_PROXY'],
os.environ['X509_CERT_DIR'])
if test() == 0:
print '- TEST PASSED -'
sys.exit(0)
else:
print '- TEST FAILED -'
|
import argparse
from gooey import Gooey
from imclassify import ImClassifier
def try_int(x):
try:
return int(x)
except ValueError:
return x
@Gooey(program_name='imclassify')
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-l', '--labels', default='class_1 class_2 class_3', nargs='+',
help='List of class labels to be used (separated by space).')
ap.add_argument('-t', '--train_input', type=try_int, default='0',
help='Path to input video to capture training data from '
'(can be number to indicate webcam; see cv2.VideoCapture() docs).')
ap.add_argument('-o', '--train_output', default='images',
help='Main dir for training images to be saved to '
'(they will be saved to a subdir named by the class label).')
ap.add_argument('-d', '--feature_db', default='features.hdf5',
help='Path to save HDF5 file of features to.')
ap.add_argument('-m', '--model_output', default='model.pickle',
help='Path to save pickled sklearn model to.')
args = vars(ap.parse_args())
im_classifier = ImClassifier(labels=args['labels'],
images_path=args['train_output'],
features_path=args['feature_db'],
model_path=args['model_output'])
im_classifier.gather_images(video_path=args['train_input'])
im_classifier.extract_features()
im_classifier.train_model(percent_train=1.0)
im_classifier.classify_video(video_path=args['train_input'], output_path='test_output.avi')
if __name__ == '__main__':
main()
|
from flask import Flask
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
import os
project_dir = os.path.dirname(os.path.abspath(__file__))
database_file = "sqlite:///{}".format(os.path.join(project_dir, "db/research.db"))
app = Flask(__name__)
CORS(app)
app.config["SQLALCHEMY_DATABASE_URI"] = database_file
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
app.config['JSON_SORT_KEYS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app.controller.AppController import *
|
"""the user manager for the server."""
import hashlib
from twisted.python.filepath import FilePath
class UserManager(object):
"""
The UserManager manages the user paths.
:param path: path where the files will be stored.
:type path: str or FilePath
"""
def __init__(self, path):
if isinstance(path, FilePath):
self.path = path
else:
self.path = FilePath(path)
def get_userid(self, username):
"""
Return the userid for the username.
:param username: the username of the user
:type username: str
:return: the userid of the user
:rtype: str
"""
return "u_" + hashlib.sha256(username).hexdigest()
def get_user_path(self, userid):
"""
Return the path of the user.
:param userid: the id of the user
:type userid: str
:return: the path of the user
:rtype: FilePath
"""
return self.path.child(userid)
def user_exists(self, userid):
"""
Check if the user exist.
:param userid: the userid of the user
:type userid: str
:return: whether the user exists or not
:rtype: bool
"""
return self.get_user_path(userid).exists()
def user_is_setup(self, userid):
"""
Check if the user account is setup.
:param userid: the userid of the user
:type userid: str
:return: whether the user has been setup or not
:rtype: bool
"""
up = self.get_user_path(userid)
abe = up.child("authblock.bin").exists()
hfe = up.child("hash.bin").exists()
return (abe and hfe)
def get_file_path(self, userid, filename):
"""
Return the path of the file of the user.
:param userid: userid of the user
:type userid: str
:param filename: name of the file
:type filename: str
:return: the path of the file.
:rtype: FilePath
"""
up = self.get_user_path(userid)
fp = up.child(filename)
return fp
def get_authblock_path(self, userid):
"""
Return the path of the authblock file of the user.
:param userid: userid of the user
:type userid: str
:return: the path of the authblock file.
:rtype: FilePath
"""
return self.get_file_path(userid, "authblock.bin")
def get_hash_path(self, userid):
"""
Return the path of the hash file of the user.
:param userid: userid of the user
:type userid: str
:return: the path of the hash file.
:rtype: FilePath
"""
return self.get_file_path(userid, "hash.bin")
|
from aiogram.utils.callback_data import CallbackData
from .mixins import ActionsMixin
from app.utils.singleton import singleton_class
@singleton_class
class MainMenu(ActionsMixin):
"""This class consist of text buttons of main menu"""
def __init__(self):
self.parent = None
self.data = (
('englishword', 'English word'),
('russianword', 'Russian word'),
('myvocabulary', 'My vocabulary'),
)
self.prefix = 'MainMenu'
self.callback_data = CallbackData(self.prefix, 'action')
@singleton_class
class EnglishWordMenu(ActionsMixin):
"""This class consist of text buttons of englishword menu"""
def __init__(self):
self.parent = MainMenu()
self.data = (
('back', 'back'),
)
self.prefix = 'EnglishWordMenu'
self.callback_data = CallbackData(self.prefix, 'action')
@singleton_class
class EnglishWordMenuEnterEnglishWord(ActionsMixin):
"""This class consist of text buttons of englishword menu when writing english word"""
def __init__(self):
self.parent = MainMenu()
self.data = (
('back', 'back'),
)
self.prefix = 'EnglishWordMenuEnterEnglishWord'
self.callback_data = CallbackData(self.prefix, 'action')
@singleton_class
class EnglishWordMenuEnterRussianWord(ActionsMixin):
"""This class consist of text buttons of englishword menu when writing russian word"""
def __init__(self):
self.parent = EnglishWordMenu()
self.data = (
('back', 'back'),
('ready', 'ready'),
)
self.prefix = 'EnglishWordMenuEnterRussianWord'
self.callback_data = CallbackData(self.prefix, 'action')
|
class Solution:
def minDepth(self, root: Optional[TreeNode]) -> int:
def recur(cur):
if not cur.left and not cur.right:
return 1
mini = float('inf')
if cur.left:
mini = min(mini, 1+recur(cur.left))
if cur.right:
mini = min(mini, 1+recur(cur.right))
return mini
return recur(root) if root else 0
|
#!usr/bin/env python
import json
import hashlib
try:
from Tkinter import * # PYTHON 2
import tkFont
from urllib2 import urlopen
except ImportError:
from tkinter import * # PYTHON 3
import tkinter.font as tkFont
from urllib.request import urlopen
class Window(Frame):
'''An api GUI template that is modified for the Marvel api. Use your own keys, these are fake.'''
def __init__(self, master = None):
Frame.__init__(self, master)
self.master = master
self.init_window()
def init_window(self):
self.master.title("Api Checker")
self.pack(fill=BOTH, expand=1)
# SET FONT FOR TEXT HEADERS
verd12 = tkFont.Font(family='verdana', size=12, weight='bold')
helv10 = tkFont.Font(family='Helvetica', size=10)
# ADD MENU
menu = Menu(self.master, bd=0.5)
self.master.config(menu=menu)
file = Menu(menu, tearoff=0)
file.add_command(label='Exit', command=self.client_exit)
menu.add_cascade(label='Exit', menu=file)
help = Menu(menu, tearoff=0)
help.add_command(label='Info', command=self.client_exit)
help.add_command(label='Set up', command=self.client_exit)
menu.add_cascade(label='Help', menu=help)
# SET UP FRAMES
lf = Frame(root)
rf = Frame(root)
lf.pack(side=LEFT, expand=True)
rf.pack(padx=(18,18), side=LEFT, expand=True)
rf.pack(pady=(18,12))
# TOP LEFT PANEL LISTBOX
self.lb_tasks = Listbox(lf, bg="white", font=helv10, width=65, height=16)
self.lb_tasks.grid(row=0, column=0)
# BOTTOM LEFT PANEL LISTBOX
self.lb_tasksb = Listbox(lf, bg="white", font=helv10, width=65, height=17)
self.lb_tasksb.grid(row=1, column=0)
# LEFT PANEL TOP SCROLLBAR
self.sb1 = Scrollbar(lf)
self.sb1.grid(row=0,column=1, ipady=164, sticky=N)
self.lb_tasks.configure(yscrollcommand=self.sb1.set)
self.sb1.configure(command=self.lb_tasks.yview)
# LEFT PANEL BOTTOM SCROLLBAR
self.sb2 = Scrollbar(lf)
self.sb2.grid(row=1,column=1, ipady=174, sticky=N)
self.lb_tasksb.configure(yscrollcommand=self.sb2.set)
self.sb2.configure(command=self.lb_tasksb.yview)
# TEXT HEADER
labelT = Label(rf, text="APPLICATION/JSON", font=verd12, fg='blue').pack(pady=(25,15)) #justify=CENTER,
# ENTRY LABELS AND FIELDS
label1 = Label(rf, text="Address").pack(fill=X)
self.entry_1 = Entry(rf) #Never add stuff on the end of here
self.entry_1.pack(fill=X)
label2 = Label(rf, text="API-Key").pack(fill=X)
self.entry_2 = Entry(rf)
self.entry_2.pack(fill=X)
label3 = Label(rf, text="Key-Hash").pack(fill=X)
self.entry_3 = Entry(rf)
self.entry_3.pack(fill=X, pady=(0,30))
# BUTTONS
subbutton = Button(rf, text="Submit", width=43, padx=2, pady=6, command=self.jsonify)
subbutton.pack(fill=X)
savebutton = Button(rf, text="Quit", padx=2, pady=6, command=self.client_exit)
savebutton.pack(fill=X)
# URL TEXT
labelB = Label(rf, text="CHECK FOR OTHER URLS IN API", font=verd12, fg='blue').pack(pady=(45,20))
# BUTTONS
urlbutton = Button(rf, text="URL-Check", padx=2, pady=6, command=self.findurls)
urlbutton.pack(fill=X)
saveurlbutton = Button(rf, text="Submit URL", padx=2, pady=6, command=self.show_url)
saveurlbutton.pack(fill=X)
# RIGHT PANEL BOTTOM LISTBOX
self.lb_tasksbt = Listbox(rf, bg="white", font=helv10, width=64, height=9, selectmode=SINGLE)
self.lb_tasksbt.pack(side=LEFT, fill=X)
# RIGHT PANEL BOTTOM SCROLLBAR
sb3 = Scrollbar(rf)
sb3.pack(side=RIGHT, ipady=86)
self.lb_tasksbt.configure(yscrollcommand=sb3.set)
sb3.configure(command=self.lb_tasksbt.yview)
# GET API AND WRITE TO FILE
def jsonify(self):
try:
addr = self.entry_1.get()
api_key = self.entry_2.get()
auth_hash = self.entry_3.get()
# CONCAT FIELD STRING ENTRIES
url = addr + '?ts=1&apikey=' + api_key + '&hash=' + auth_hash
with urlopen(url) as u:
source = u.read()
data = json.loads(source.decode('utf-8'))
datb = json.dumps(data, indent=2)
with open('last_api.json', 'w') as data:
data.write(datb)
self.jsonfile()
except ValueError as e:
print(e)
# SHOW IN TOP LEFT WINDOW
def jsonfile(self):
with open('last_api.json') as fo:
for line in fo:
self.write_window_a(str(line) + '\n')
# FIND URLS AND SHOW IN BOTTOM RIGHT WINDOW
def findurls(self):
with open('last_api.json') as data:
datu = data.read()
datbu = re.split(r'[,;{}]', datu)
for line in datbu:
if "http" in line:
x = re.sub(r'.*http', 'http', line).strip('"')
self.write_window_b(str(x))
if "ftp" in line:
v = re.sub(r'.*ftp', 'ftp', line).strip('"')
self.write_window_b(v)
# QUIT PROGRAM
def client_exit(self):
exit()
# WRITE TO TOP LEFT WINDOW
def write_window_a(self, text):
self.lb_tasks.insert(END,str(text))
self.update_idletasks()
# WRITE TO BOTTOM RIGHT WINDOW
def write_window_b(self, text):
self.lb_tasksbt.insert(END,str(text))
self.update_idletasks()
# WRITE TO BOTTOM LEFT WINDOW
def write_window_c(self, text):
self.lb_tasksb.insert(END,str(text))
self.update_idletasks()
# SELECT URL FROM SEARCH AND WRITE TO FILE AND SHOW
def show_url(self):
try:
url = self.lb_tasksbt.get(self.lb_tasksbt.curselection())
api_key = self.entry_2.get()
auth_hash = self.entry_3.get()
url = url + '?ts=1&apikey=' + api_key + '&hash=' + auth_hash
with urlopen(url) as u:
source = u.read()
data = json.loads(source.decode('utf-8'))
datb = json.dumps(data, indent=2)
with open('url_last_api.json', 'w') as datan:
datan.write(datb)
with open('url_last_api.json') as fo:
for line in fo:
self.write_window_c(str(line) + '\n')
except ValueError as e:
print(e)
root = Tk()
#root.configure(background='black')
root.option_add('*font', ('verdana', 10))
#root.geometry("1236x720")
#root.resizable(0, 0)
app = Window(root)
root.mainloop()
|
import unittest
import subprocess
import os
import diskspace
from diskspace import subprocess_check_output
from diskspace import bytes_to_readable
from diskspace import show_space_list
class TestDiskspace(unittest.TestCase):
def setUp(self):
self.command = 'du '
self.abs_directory = os.path.abspath('.')
self.command += self.abs_directory
self.path = 'home/teste'
def test_subprocess_check_output(self):
result = diskspace.subprocess_check_output(self.command)
results = subprocess_check_output('du -d 1 {}'.format(self.abs_directory))
self.assertEqual(result, results)
def test_bytes_to_readable(self):
blocks = 100
result = "50.00Kb"
self.assertEqual(bytes_to_readable(blocks), result)
def test_show_space_list(self):
self.assertIsNone(show_space_list(directory='.', depth=-1, order=True))
suite = unittest.TestLoader().loadTestsFromTestCase(TestDiskspace)
unittest.TextTestRunner(verbosity=2).run(suite)
#if __name__ == '__main__':
# unittest.main()
|
import os
import json
from database.database import DataBase, DBLoader
from prepare.structure import Project
def load_local_json(path, fn):
fp = os.path.join(path, fn + ".data")
if os.path.isfile(fp):
with open(fp, "r") as f:
data = json.load(f)
else:
print("file path {} is wrong".format(fn))
return data
def load_local_data(path, project):
print("load local data")
file_list = ["color_abs_coverage", "color_code", "color_diff_coverage", "time_data", "animation_dict"]
color_abs_coverage = load_local_json(path, file_list[0])
color_code = load_local_json(path, file_list[1])
color_diff_coverage = load_local_json(path, file_list[2])
action_data = load_local_json(path, file_list[3])
animation_dict = load_local_json(path, file_list[4])
project.load_color_code(color_code)
project.load_action_data(action_data)
project.load_animation_dict(animation_dict)
project.load_color_diff_coverage(color_diff_coverage)
project.load_color_abs_coverage(color_abs_coverage)
def data_preparation_main(project_id):
db_address = "mysql.minestoryboard.com"
database = DataBase(db_address, "minestory", "2870", "minestory")
database.db_connect()
dl = DBLoader(database, project_id)
project_data = Project(dl, project_id)
# TODO: change local data load to mysql
path = "../../TCL_MineTool/UnityProject/MineStudioPrototype/Assets/StreamingAssets/camera_data_58"
if not os.path.isdir(path):
print("local path is wrong")
else:
load_local_data(path, project_data)
print(project_data.project_id)
return project_data
if __name__ == "__main__":
data_preparation_main(32)
|
def add(*num):
total = 0
for n in num:
total += n
print(total)
add(2,3,4,5,6)
def calculate(**kwargs):
print(type(kwargs))
calculate(add=3, multiply=5)
|
from itertools import groupby
def encode(s):
return ''.join(k + str(sum(1 for _ in g)) for k, g in groupby(s))
|
from rest_framework import serializers
from announcements.models import Announcement
from datetime import datetime
class AnnouncementSerializer(serializers.ModelSerializer):
is_new =serializers.SerializerMethodField()
def get_is_new(self, obj):
res = Announcement.objects.get(pk=obj.id)
if res.publish_datetime.date() == datetime.today().date():
return True
return False
class Meta:
model = Announcement
fields=(
'id',
'title',
'thumbnail',
'body',
'publish_datetime',
'is_new',
)
|
from tweepy import OAuthHandler
from tweepy import API
from tweepy import Cursor
from datetime import datetime, date, time, timedelta
from collections import Counter
import sys
consumer_key=""
consumer_secret=""
access_token=""
access_token_secret=""
consumer_key = "P5wTozEUuNOAJCXMajGnRcDs2"
consumer_secret = "RB7p2JVEZxbodmRT3eaA32caonxpo5fS5DOKXcoTxEKJelTZys"
access_token = "997065391644917761-mSZZ6gkTdLEOdDSOAFfu7clvJO4vQPq"
access_token_secret = "MoAMNPZeAmYMwtjaopDrAs1njCwmx9pdCmC7JBP0A1uxF"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
auth_api = API(auth)
account_list = []
if (len(sys.argv) > 1):
account_list = sys.argv[1:]
else:
print("Please provide a list of usernames at the command line.")
sys.exit(0)
if len(account_list) > 0:
for target in account_list:
print("Getting data for " + target)
item = auth_api.get_user(target)
print("name: " + item.name)
print("screen_name: " + item.screen_name)
print("description: " + item.description)
print("statuses_count: " + str(item.statuses_count))
print("friends_count: " + str(item.friends_count))
print("followers_count: " + str(item.followers_count))
tweets = item.statuses_count
account_created_date = item.created_at
delta = datetime.utcnow() - account_created_date
account_age_days = delta.days
print("Account age (in days): " + str(account_age_days))
if account_age_days > 0:
print("Average tweets per day: " + "%.2f"%(float(tweets)/float(account_age_days)))
hashtags = []
mentions = []
tweet_count = 0
end_date = datetime.utcnow() - timedelta(days=30)
for status in Cursor(auth_api.user_timeline, id=target).items():
tweet_count += 1
if hasattr(status, "entities"):
entities = status.entities
if "hashtags" in entities:
for ent in entities["hashtags"]:
if ent is not None:
if "text" in ent:
hashtag = ent["text"]
if hashtag is not None:
hashtags.append(hashtag)
if "user_mentions" in entities:
for ent in entities["user_mentions"]:
if ent is not None:
if "screen_name" in ent:
name = ent["screen_name"]
if name is not None:
mentions.append(name)
if status.created_at < end_date:
break
print
print("Most mentioned Twitter users:")
for item, count in Counter(mentions).most_common(10):
print(item + "\t" + str(count))
print
print("Most used hashtags:")
for item, count in Counter(hashtags).most_common(10):
print(item + "\t" + str(count))
print
print ("All done. Processed " + str(tweet_count) + " tweets.")
print
|
from configparser import ConfigParser
parser = ConfigParser()
CONF='/Users/edmelnik/Library/CloudStorage/iCloud Drive/Documents/GitHub/inserttest/inserttest.config'
parser.read(CONF)
insertnum = parser['inserttest_config']['insert_number']
if insertnum == '6':
print(insertnum)
|
from nfd_router_client import RouterClient
import subprocess
import time
import socket
import json
class FirewallEM(object):
def __init__(self, logger, vnfm_host, vnfm_port):
#TODO: keep trace of configuration
self.configuration = {"append-drop":[]}
self.logger = logger
print ("creating vnf client with: %s, %s", vnfm_host, vnfm_port)
def enforce_initial_configuration(self, config):
print ("enforcing firewall initial configuration")
print (str(config))
str_command = "/home/NDN/ndnfirewall/bin/ndnfirewall "+config["next_router"]+" &"
subprocess.call(args=str_command, shell=True)
time.sleep(2)
configuration = {}
configuration["post"] = {}
configuration["post"]["mode"] = [config["firewall_rules"]["mode"]]
firewall_rules = config["firewall_rules"]["rules"]
if len(firewall_rules) > 0:
for rule in firewall_rules:
configuration["post"][rule["action"]] = rule["prefix"]
json_config = json.dumps(configuration)
UDP_IP = "127.0.0.1"
UDP_PORT = 6362
COMMAND = json_config
print ("UDP target IP: "+UDP_IP)
print ("UDP target port: "+str(UDP_PORT))
print ("message: "+str(COMMAND))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(COMMAND, (UDP_IP, UDP_PORT))
def update_configuration(self, new_configuration):
#print ("*** enforcing firewall new configuration ***")
configuration = {}
configuration["post"] = {}
configuration["post"]["append-drop"] = new_configuration
print (configuration)
json_config = json.dumps(configuration)
UDP_IP = "127.0.0.1"
UDP_PORT = 6362
COMMAND = json_config
#print ("UDP target IP: "+UDP_IP)
#print ("UDP target port: "+str(UDP_PORT))
#print ("message: "+str(COMMAND))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(COMMAND, (UDP_IP, UDP_PORT))
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure msvs_large_pdb works correctly.
"""
import TestGyp
import struct
import sys
if sys.platform == 'win32':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
CHDIR = 'large-pdb'
def CheckImageAndPdb(test, image_basename, expected_page_size,
pdb_basename=None):
if not pdb_basename:
pdb_basename = image_basename + '.pdb'
test.built_file_must_exist(image_basename, chdir=CHDIR)
test.built_file_must_exist(pdb_basename, chdir=CHDIR)
# We expect the PDB to have the given page size. For full details of the
# header look here: https://code.google.com/p/pdbparser/wiki/MSF_Format
# We read the little-endian 4-byte unsigned integer at position 32 of the
# file.
pdb_path = test.built_file_path(pdb_basename, chdir=CHDIR)
pdb_file = open(pdb_path, 'rb')
pdb_file.seek(32, 0)
page_size = struct.unpack('<I', pdb_file.read(4))[0]
if page_size != expected_page_size:
print "Expected page size of %d, got %d for PDB file `%s'." % (
expected_page_size, page_size, pdb_path)
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
test.run_gyp('large-pdb.gyp', chdir=CHDIR)
test.build('large-pdb.gyp', 'large_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_exe.exe', 4096)
test.build('large-pdb.gyp', 'small_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_exe.exe', 1024)
test.build('large-pdb.gyp', 'large_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_dll.dll', 4096)
test.build('large-pdb.gyp', 'small_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_dll.dll', 1024)
test.build('large-pdb.gyp', 'large_pdb_implicit_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_implicit_exe.exe', 4096)
# This target has a different PDB name because it uses an
# 'msvs_large_pdb_path' variable.
test.build('large-pdb.gyp', 'large_pdb_variable_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_variable_exe.exe', 4096,
pdb_basename='foo.pdb')
# This target has a different output name because it uses 'product_name'.
test.build('large-pdb.gyp', 'large_pdb_product_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'bar.exe', 4096)
test.pass_test()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
from dataclasses import dataclass
from enum import Enum
from typing import Any, Optional, Union
from pants.backend.project_info.peek import _PeekJsonEncoder
from pants.backend.python.dependency_inference.module_mapper import ResolveName
from pants.backend.python.dependency_inference.parse_python_dependencies import (
ParsedPythonDependencies,
ParsedPythonImportInfo,
)
from pants.backend.python.dependency_inference.rules import (
ImportResolveResult,
PythonImportDependenciesInferenceFieldSet,
ResolvedParsedPythonDependencies,
ResolvedParsedPythonDependenciesRequest,
UnownedImportsPossibleOwners,
UnownedImportsPossibleOwnersRequest,
_collect_imports_info,
_exec_parse_deps,
_find_other_owners_for_unowned_imports,
import_rules,
)
from pants.backend.python.goals.run_python_source import PythonSourceFieldSet
from pants.backend.python.subsystems.setup import PythonSetup
from pants.build_graph.address import Address
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import collect_rules, goal_rule, rule
from pants.engine.target import Targets
from pants.option.option_types import EnumOption
from pants.util.strutil import softwrap
class AnalysisFlavor(Enum):
raw_dependency_inference = "raw_dependency_inference"
dependency_inference = "dependency_inference"
class DumpPythonSourceAnalysisSubsystem(GoalSubsystem):
name = "python-dump-source-analysis"
help = "Dump source analysis for python_source targets."
flavor = EnumOption(
"--analysis-flavor",
default=AnalysisFlavor.dependency_inference,
help=softwrap(
f"""\
The type of information that should be returned.\n
* `{AnalysisFlavor.dependency_inference.value}`: The results of dependency inference, for every detected import in every file.\n
* `{AnalysisFlavor.raw_dependency_inference.value}`: The raw intermediate results of the dependency inference process,
at every stage they're available.
Potentially useful for debugging the dependency inference process.\n
"""
),
)
class DumpPythonSourceAnalysis(Goal):
subsystem_cls = DumpPythonSourceAnalysisSubsystem
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY # TODO(#17129) — Migrate this.
@dataclass(frozen=True)
class PythonSourceAnalysis:
"""Information on the inferred imports for a Python file, including all raw intermediate
results."""
fs: PythonImportDependenciesInferenceFieldSet
identified: ParsedPythonDependencies
resolved: ResolvedParsedPythonDependencies
possible_owners: UnownedImportsPossibleOwners
@rule
async def dump_python_source_analysis_single(
fs: PythonImportDependenciesInferenceFieldSet,
python_setup: PythonSetup,
) -> PythonSourceAnalysis:
"""Infer the dependencies for a single python fieldset, keeping all the intermediate results."""
parsed_dependencies = await _exec_parse_deps(fs, python_setup)
resolve = fs.resolve.normalized_value(python_setup)
resolved_dependencies = await Get(
ResolvedParsedPythonDependencies,
ResolvedParsedPythonDependenciesRequest(fs, parsed_dependencies, resolve),
)
import_deps, unowned_imports = _collect_imports_info(resolved_dependencies.resolve_results)
imports_to_other_owners = await _find_other_owners_for_unowned_imports(
UnownedImportsPossibleOwnersRequest(unowned_imports, resolve),
)
return PythonSourceAnalysis(
fs, parsed_dependencies, resolved_dependencies, imports_to_other_owners
)
@dataclass(frozen=True)
class ImportAnalysis:
"""Information on the inferred imports for a Python file."""
name: str
reference: Union[ParsedPythonImportInfo, str]
resolved: ImportResolveResult
possible_resolve: Optional[list[tuple[Address, ResolveName]]]
@dataclass(frozen=True)
class CollectedImportAnalysis:
"""Collected information on all Python files."""
imports: list[ImportAnalysis]
assets: list[ImportAnalysis]
def collect_analysis(raw: PythonSourceAnalysis) -> CollectedImportAnalysis:
"""Collect raw analysis and present it in a helpful per-import format."""
imports = []
resolved_results = raw.resolved.resolve_results
for name, info in raw.identified.imports.items():
possible_resolve = raw.possible_owners.value.get(name)
imports.append(
ImportAnalysis(
name=name,
reference=info,
resolved=resolved_results[name],
possible_resolve=possible_resolve,
)
)
assets = []
resolved_assets = raw.resolved.assets
for name in raw.identified.assets:
possible_resolve = raw.possible_owners.value.get(name)
assets.append(
ImportAnalysis(
name=name,
reference=name, # currently assets don't keep track of their line numbers
resolved=resolved_assets[name],
possible_resolve=possible_resolve,
)
)
return CollectedImportAnalysis(imports, assets)
@goal_rule
async def dump_python_source_analysis(
request: DumpPythonSourceAnalysisSubsystem,
targets: Targets,
console: Console,
) -> DumpPythonSourceAnalysis:
source_field_sets = [
PythonImportDependenciesInferenceFieldSet.create(tgt)
for tgt in targets
if PythonSourceFieldSet.is_applicable(tgt)
]
source_analysis = await MultiGet(
Get(
PythonSourceAnalysis,
PythonImportDependenciesInferenceFieldSet,
fs,
)
for fs in source_field_sets
)
output: Any
if request.flavor == AnalysisFlavor.raw_dependency_inference:
output = source_analysis
else:
output = {str(a.fs.address): collect_analysis(a) for a in source_analysis}
console.print_stdout(json.dumps(output, cls=_PeekJsonEncoder))
return DumpPythonSourceAnalysis(exit_code=0)
def rules():
return [
*import_rules(),
*collect_rules(),
]
|
from user_input import prompt
def intro():
print("Welcome to Shmucksburg: A knightless town in a dangerous world.")
prompt("")
print("Your name is Cecil. Cecil Farmer.\n")
prompt("You're a farmer.\n")
prompt("You leave your cabin one morning and walk to the townsquare. \
You can hear the beautiful sound of birds chirping.\n")
prompt("\ttweet!\a")
prompt("\ttweet tweet!\a\a")
prompt("\ttweet tweet tweet!\a\a\a\n")
print("You see your fellow villagers standing in the townsquare, \
mourning, as they've grown accustom to doing:\n")
prompt("\"Oh, that the town would be safe! \
Oh, that our goods would be returned! Oh, that the robbers would be put to \
justice! Oh, that we would be granted federal funding for our municipal \
improvements!\n \
\"But who? Who will be our knight!?\"\n")
prompt("You silently agree. A hero is needed! In the past, Shmucksburg \
has been able to defend itself because of its armory. But since that was \
robbed last year, they've had little luck defending the village. If only \
someone could find and retake the contents of your armory....\n")
prompt("You return home. You go into your barn to begin your work. \
As you reach for a shovel, you find an old mask from your childhood.\n")
prompt("\"Why this makes me look twice as dapper and three times as \
valiant!\"\n")
prompt("'Tis fate, perhaps, but the mask inspires you.\n")
prompt("\"A hero needs two things: heart, and a sweet mask! \
Now that I have the latter, I can be our village knight!\"\n")
name = ""
while name == "":
name = prompt("\tWhat shall you call yourself, sir knight?\n").title()
catchphrase = ""
while catchphrase == "":
catchphrase = prompt("\tWhat shall your battle cry be?\n")
prompt("\"When I wear this mask, I shall be known as " + name + "! \
And when I march into battle, I will shout, '" + catchphrase + "!'\"\n")
prompt("After some thought, you decide a hero may need some other things \
as well, such as weapons and armor and perhaps a shield. You head to \
the townsquare to pawn your gold ring and buy some supplies.")
prompt("\n\n\t\t\tYOUR JOURNEY BEGINS.\n\n")
intro_data = (name, catchphrase)
return intro_data
if __name__ == "__main__":
print("This is a module for 'Oh Great Knight'")
prompt("Press enter to exit.")
|
#!/usr/bin/env python
'''
Test_pycosat.py
'''
#############
# IMPORTS #
#############
# standard python packages
import copy, inspect, logging, os, shutil, sqlite3, sys, time, unittest
# ------------------------------------------------------ #
# import sibling packages HERE!!!
if not os.path.abspath( __file__ + "/../../src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../src" ) )
from solvers import sniper_logic
# orik library
if not os.path.abspath( __file__ + "/../../lib/orik/src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../lib/orik/src" ) )
from derivation import ProvTree
# iapyx library
if not os.path.abspath( __file__ + "/../../lib/orik/lib/iapyx/src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../lib/orik/lib/iapyx/src" ) )
from dedt import dedt
from evaluators import c4_evaluator
from utils import globalCounters, tools
# ------------------------------------------------------ #
#######################
# TEST SNIPER LOGIC #
#######################
class Test_sniper_logic( unittest.TestCase ) :
logging.basicConfig( format='%(levelname)s:%(message)s', level=logging.DEBUG )
#logging.basicConfig( format='%(levelname)s:%(message)s', level=logging.INFO )
#logging.basicConfig( format='%(levelname)s:%(message)s', level=logging.WARNING )
PRINT_STOP = False
#########################
# DO ABSORPTION LAW 4 #
#########################
def test_do_absorption_law_4( self ) :
fmla = "(A&(B|A))|(A|B)"
expected_fmla = "A|(A|B)"
self.assertEqual( sniper_logic.do_absorption_law( fmla ), expected_fmla )
#########################
# DO ABSORPTION LAW 3 #
#########################
def test_do_absorption_law_3( self ) :
fmla = "(A&(B|A))|(A&B)"
expected_fmla = "A"
self.assertEqual( sniper_logic.do_absorption_law( fmla ), expected_fmla )
#########################
# DO ABSORPTION LAW 2 #
#########################
def test_do_absorption_law_2( self ) :
fmla = "A|(A&B)"
expected_fmla = "A"
self.assertEqual( sniper_logic.do_absorption_law( fmla ), expected_fmla )
#########################
# DO ABSORPTION LAW 1 #
#########################
def test_do_absorption_law_1( self ) :
fmla = "A|(B&A)"
expected_fmla = "A"
self.assertEqual( sniper_logic.do_absorption_law( fmla ), expected_fmla )
#########################
# DO IDEMPOTENT LAW 4 #
#########################
# demonstrates deficiency in idempotence simplification wrt identical expressions
# around | and & operators.
@unittest.skip( "need to expand idempotence simplification to expressions." )
def test_do_idempotent_law_4( self ) :
fmla = "(((clock_B)|(clock_B&clock_B))&((clock_A)|clock_A|(clock_A)))"
fmla = fmla + "|" + fmla
expected_fmla = "(clock_B&clock_A)"
self.assertEqual( sniper_logic.do_idempotent_law( fmla ), expected_fmla )
#########################
# DO IDEMPOTENT LAW 3 #
#########################
def test_do_idempotent_law_3( self ) :
fmla = "((A&A)&A)|(A|A|A)|(A&B)"
expected_fmla = "A|(A&B)"
self.assertEqual( sniper_logic.do_idempotent_law( fmla ), expected_fmla )
#########################
# DO IDEMPOTENT LAW 2 #
#########################
def test_do_idempotent_law_2( self ) :
fmla = "(((clock_B)|(clock_B&clock_B&clock_B))&((clock_B)|clock_B|(clock_B)))"
expected_fmla = "clock_B"
self.assertEqual( sniper_logic.do_idempotent_law( fmla ), expected_fmla )
#########################
# DO IDEMPOTENT LAW 1 #
#########################
def test_do_idempotent_law_1( self ) :
fmla = "(((clock_B)|(clock_B&clock_B))&((clock_A)|clock_A|(clock_A)))"
expected_fmla = "(clock_B&clock_A)"
self.assertEqual( sniper_logic.do_idempotent_law( fmla ), expected_fmla )
if __name__ == "__main__":
unittest.main()
#########
# EOF #
#########
|
import json
diccionario=input()
miDiccionario=json.loads(diccionario)
lista=input().split()
listaComprables=[]
#print(miDiccionario)
suma=0
for i in lista:
if i in miDiccionario.keys():
listaComprables.append(i)
suma+=miDiccionario[i]
print(suma)
for j in listaComprables:
print(j,end=" ")
print("")
|
import functools
import json
import os
import random
import shutil
from abc import ABC, abstractmethod
from glob import glob
from pathlib import Path
from typing import Callable, cast, List, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import _read_pfm, download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
T1 = Tuple[Image.Image, Image.Image, Optional[np.ndarray], np.ndarray]
T2 = Tuple[Image.Image, Image.Image, Optional[np.ndarray]]
__all__ = ()
_read_pfm_file = functools.partial(_read_pfm, slice_channels=1)
class StereoMatchingDataset(ABC, VisionDataset):
"""Base interface for Stereo matching datasets"""
_has_built_in_disparity_mask = False
def __init__(self, root: str, transforms: Optional[Callable] = None) -> None:
"""
Args:
root(str): Root directory of the dataset.
transforms(callable, optional): A function/transform that takes in Tuples of
(images, disparities, valid_masks) and returns a transformed version of each of them.
images is a Tuple of (``PIL.Image``, ``PIL.Image``)
disparities is a Tuple of (``np.ndarray``, ``np.ndarray``) with shape (1, H, W)
valid_masks is a Tuple of (``np.ndarray``, ``np.ndarray``) with shape (H, W)
In some cases, when a dataset does not provide disparities, the ``disparities`` and
``valid_masks`` can be Tuples containing None values.
For training splits generally the datasets provide a minimal guarantee of
images: (``PIL.Image``, ``PIL.Image``)
disparities: (``np.ndarray``, ``None``) with shape (1, H, W)
Optionally, based on the dataset, it can return a ``mask`` as well:
valid_masks: (``np.ndarray | None``, ``None``) with shape (H, W)
For some test splits, the datasets provides outputs that look like:
imgaes: (``PIL.Image``, ``PIL.Image``)
disparities: (``None``, ``None``)
Optionally, based on the dataset, it can return a ``mask`` as well:
valid_masks: (``None``, ``None``)
"""
super().__init__(root=root)
self.transforms = transforms
self._images = [] # type: ignore
self._disparities = [] # type: ignore
def _read_img(self, file_path: Union[str, Path]) -> Image.Image:
img = Image.open(file_path)
if img.mode != "RGB":
img = img.convert("RGB")
return img
def _scan_pairs(
self,
paths_left_pattern: str,
paths_right_pattern: Optional[str] = None,
) -> List[Tuple[str, Optional[str]]]:
left_paths = list(sorted(glob(paths_left_pattern)))
right_paths: List[Union[None, str]]
if paths_right_pattern:
right_paths = list(sorted(glob(paths_right_pattern)))
else:
right_paths = list(None for _ in left_paths)
if not left_paths:
raise FileNotFoundError(f"Could not find any files matching the patterns: {paths_left_pattern}")
if not right_paths:
raise FileNotFoundError(f"Could not find any files matching the patterns: {paths_right_pattern}")
if len(left_paths) != len(right_paths):
raise ValueError(
f"Found {len(left_paths)} left files but {len(right_paths)} right files using:\n "
f"left pattern: {paths_left_pattern}\n"
f"right pattern: {paths_right_pattern}\n"
)
paths = list((left, right) for left, right in zip(left_paths, right_paths))
return paths
@abstractmethod
def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
# function that returns a disparity map and an occlusion map
pass
def __getitem__(self, index: int) -> Union[T1, T2]:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 3 or 4-tuple with ``(img_left, img_right, disparity, Optional[valid_mask])`` where ``valid_mask``
can be a numpy boolean mask of shape (H, W) if the dataset provides a file
indicating which disparity pixels are valid. The disparity is a numpy array of
shape (1, H, W) and the images are PIL images. ``disparity`` is None for
datasets on which for ``split="test"`` the authors did not provide annotations.
"""
img_left = self._read_img(self._images[index][0])
img_right = self._read_img(self._images[index][1])
dsp_map_left, valid_mask_left = self._read_disparity(self._disparities[index][0])
dsp_map_right, valid_mask_right = self._read_disparity(self._disparities[index][1])
imgs = (img_left, img_right)
dsp_maps = (dsp_map_left, dsp_map_right)
valid_masks = (valid_mask_left, valid_mask_right)
if self.transforms is not None:
(
imgs,
dsp_maps,
valid_masks,
) = self.transforms(imgs, dsp_maps, valid_masks)
if self._has_built_in_disparity_mask or valid_masks[0] is not None:
return imgs[0], imgs[1], dsp_maps[0], cast(np.ndarray, valid_masks[0])
else:
return imgs[0], imgs[1], dsp_maps[0]
def __len__(self) -> int:
return len(self._images)
class CarlaStereo(StereoMatchingDataset):
"""
Carla simulator data linked in the `CREStereo github repo <https://github.com/megvii-research/CREStereo>`_.
The dataset is expected to have the following structure: ::
root
carla-highres
trainingF
scene1
img0.png
img1.png
disp0GT.pfm
disp1GT.pfm
calib.txt
scene2
img0.png
img1.png
disp0GT.pfm
disp1GT.pfm
calib.txt
...
Args:
root (string): Root directory where `carla-highres` is located.
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
"""
def __init__(self, root: str, transforms: Optional[Callable] = None) -> None:
super().__init__(root, transforms)
root = Path(root) / "carla-highres"
left_image_pattern = str(root / "trainingF" / "*" / "im0.png")
right_image_pattern = str(root / "trainingF" / "*" / "im1.png")
imgs = self._scan_pairs(left_image_pattern, right_image_pattern)
self._images = imgs
left_disparity_pattern = str(root / "trainingF" / "*" / "disp0GT.pfm")
right_disparity_pattern = str(root / "trainingF" / "*" / "disp1GT.pfm")
disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
self._disparities = disparities
def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
disparity_map = _read_pfm_file(file_path)
disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
valid_mask = None
return disparity_map, valid_mask
def __getitem__(self, index: int) -> T1:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
If a ``valid_mask`` is generated within the ``transforms`` parameter,
a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
"""
return cast(T1, super().__getitem__(index))
class Kitti2012Stereo(StereoMatchingDataset):
"""
KITTI dataset from the `2012 stereo evaluation benchmark <http://www.cvlibs.net/datasets/kitti/eval_stereo_flow.php>`_.
Uses the RGB images for consistency with KITTI 2015.
The dataset is expected to have the following structure: ::
root
Kitti2012
testing
colored_0
1_10.png
2_10.png
...
colored_1
1_10.png
2_10.png
...
training
colored_0
1_10.png
2_10.png
...
colored_1
1_10.png
2_10.png
...
disp_noc
1.png
2.png
...
calib
Args:
root (string): Root directory where `Kitti2012` is located.
split (string, optional): The dataset split of scenes, either "train" (default) or "test".
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
"""
_has_built_in_disparity_mask = True
def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
super().__init__(root, transforms)
verify_str_arg(split, "split", valid_values=("train", "test"))
root = Path(root) / "Kitti2012" / (split + "ing")
left_img_pattern = str(root / "colored_0" / "*_10.png")
right_img_pattern = str(root / "colored_1" / "*_10.png")
self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
if split == "train":
disparity_pattern = str(root / "disp_noc" / "*.png")
self._disparities = self._scan_pairs(disparity_pattern, None)
else:
self._disparities = list((None, None) for _ in self._images)
def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], None]:
# test split has no disparity maps
if file_path is None:
return None, None
disparity_map = np.asarray(Image.open(file_path)) / 256.0
# unsqueeze the disparity map into (C, H, W) format
disparity_map = disparity_map[None, :, :]
valid_mask = None
return disparity_map, valid_mask
def __getitem__(self, index: int) -> T1:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
generate a valid mask.
Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
"""
return cast(T1, super().__getitem__(index))
class Kitti2015Stereo(StereoMatchingDataset):
"""
KITTI dataset from the `2015 stereo evaluation benchmark <http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php>`_.
The dataset is expected to have the following structure: ::
root
Kitti2015
testing
image_2
img1.png
img2.png
...
image_3
img1.png
img2.png
...
training
image_2
img1.png
img2.png
...
image_3
img1.png
img2.png
...
disp_occ_0
img1.png
img2.png
...
disp_occ_1
img1.png
img2.png
...
calib
Args:
root (string): Root directory where `Kitti2015` is located.
split (string, optional): The dataset split of scenes, either "train" (default) or "test".
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
"""
_has_built_in_disparity_mask = True
def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
super().__init__(root, transforms)
verify_str_arg(split, "split", valid_values=("train", "test"))
root = Path(root) / "Kitti2015" / (split + "ing")
left_img_pattern = str(root / "image_2" / "*.png")
right_img_pattern = str(root / "image_3" / "*.png")
self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
if split == "train":
left_disparity_pattern = str(root / "disp_occ_0" / "*.png")
right_disparity_pattern = str(root / "disp_occ_1" / "*.png")
self._disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
else:
self._disparities = list((None, None) for _ in self._images)
def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], None]:
# test split has no disparity maps
if file_path is None:
return None, None
disparity_map = np.asarray(Image.open(file_path)) / 256.0
# unsqueeze the disparity map into (C, H, W) format
disparity_map = disparity_map[None, :, :]
valid_mask = None
return disparity_map, valid_mask
def __getitem__(self, index: int) -> T1:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
generate a valid mask.
Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
"""
return cast(T1, super().__getitem__(index))
class Middlebury2014Stereo(StereoMatchingDataset):
"""Publicly available scenes from the Middlebury dataset `2014 version <https://vision.middlebury.edu/stereo/data/scenes2014/>`.
The dataset mostly follows the original format, without containing the ambient subdirectories. : ::
root
Middlebury2014
train
scene1-{perfect,imperfect}
calib.txt
im{0,1}.png
im1E.png
im1L.png
disp{0,1}.pfm
disp{0,1}-n.png
disp{0,1}-sd.pfm
disp{0,1}y.pfm
scene2-{perfect,imperfect}
calib.txt
im{0,1}.png
im1E.png
im1L.png
disp{0,1}.pfm
disp{0,1}-n.png
disp{0,1}-sd.pfm
disp{0,1}y.pfm
...
additional
scene1-{perfect,imperfect}
calib.txt
im{0,1}.png
im1E.png
im1L.png
disp{0,1}.pfm
disp{0,1}-n.png
disp{0,1}-sd.pfm
disp{0,1}y.pfm
...
test
scene1
calib.txt
im{0,1}.png
scene2
calib.txt
im{0,1}.png
...
Args:
root (string): Root directory of the Middleburry 2014 Dataset.
split (string, optional): The dataset split of scenes, either "train" (default), "test", or "additional"
use_ambient_views (boolean, optional): Whether to use different expose or lightning views when possible.
The dataset samples with equal probability between ``[im1.png, im1E.png, im1L.png]``.
calibration (string, optional): Whether or not to use the calibrated (default) or uncalibrated scenes.
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
download (boolean, optional): Whether or not to download the dataset in the ``root`` directory.
"""
splits = {
"train": [
"Adirondack",
"Jadeplant",
"Motorcycle",
"Piano",
"Pipes",
"Playroom",
"Playtable",
"Recycle",
"Shelves",
"Vintage",
],
"additional": [
"Backpack",
"Bicycle1",
"Cable",
"Classroom1",
"Couch",
"Flowers",
"Mask",
"Shopvac",
"Sticks",
"Storage",
"Sword1",
"Sword2",
"Umbrella",
],
"test": [
"Plants",
"Classroom2E",
"Classroom2",
"Australia",
"DjembeL",
"CrusadeP",
"Crusade",
"Hoops",
"Bicycle2",
"Staircase",
"Newkuba",
"AustraliaP",
"Djembe",
"Livingroom",
"Computer",
],
}
_has_built_in_disparity_mask = True
def __init__(
self,
root: str,
split: str = "train",
calibration: Optional[str] = "perfect",
use_ambient_views: bool = False,
transforms: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transforms)
verify_str_arg(split, "split", valid_values=("train", "test", "additional"))
self.split = split
if calibration:
verify_str_arg(calibration, "calibration", valid_values=("perfect", "imperfect", "both", None)) # type: ignore
if split == "test":
raise ValueError("Split 'test' has only no calibration settings, please set `calibration=None`.")
else:
if split != "test":
raise ValueError(
f"Split '{split}' has calibration settings, however None was provided as an argument."
f"\nSetting calibration to 'perfect' for split '{split}'. Available calibration settings are: 'perfect', 'imperfect', 'both'.",
)
if download:
self._download_dataset(root)
root = Path(root) / "Middlebury2014"
if not os.path.exists(root / split):
raise FileNotFoundError(f"The {split} directory was not found in the provided root directory")
split_scenes = self.splits[split]
# check that the provided root folder contains the scene splits
if not any(
# using startswith to account for perfect / imperfect calibrartion
scene.startswith(s)
for scene in os.listdir(root / split)
for s in split_scenes
):
raise FileNotFoundError(f"Provided root folder does not contain any scenes from the {split} split.")
calibrartion_suffixes = {
None: [""],
"perfect": ["-perfect"],
"imperfect": ["-imperfect"],
"both": ["-perfect", "-imperfect"],
}[calibration]
for calibration_suffix in calibrartion_suffixes:
scene_pattern = "*" + calibration_suffix
left_img_pattern = str(root / split / scene_pattern / "im0.png")
right_img_pattern = str(root / split / scene_pattern / "im1.png")
self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
if split == "test":
self._disparities = list((None, None) for _ in self._images)
else:
left_dispartity_pattern = str(root / split / scene_pattern / "disp0.pfm")
right_dispartity_pattern = str(root / split / scene_pattern / "disp1.pfm")
self._disparities += self._scan_pairs(left_dispartity_pattern, right_dispartity_pattern)
self.use_ambient_views = use_ambient_views
def _read_img(self, file_path: Union[str, Path]) -> Image.Image:
"""
Function that reads either the original right image or an augmented view when ``use_ambient_views`` is True.
When ``use_ambient_views`` is True, the dataset will return at random one of ``[im1.png, im1E.png, im1L.png]``
as the right image.
"""
ambient_file_paths: List[Union[str, Path]] # make mypy happy
if not isinstance(file_path, Path):
file_path = Path(file_path)
if file_path.name == "im1.png" and self.use_ambient_views:
base_path = file_path.parent
# initialize sampleable container
ambient_file_paths = list(base_path / view_name for view_name in ["im1E.png", "im1L.png"])
# double check that we're not going to try to read from an invalid file path
ambient_file_paths = list(filter(lambda p: os.path.exists(p), ambient_file_paths))
# keep the original image as an option as well for uniform sampling between base views
ambient_file_paths.append(file_path)
file_path = random.choice(ambient_file_paths) # type: ignore
return super()._read_img(file_path)
def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
# test split has not disparity maps
if file_path is None:
return None, None
disparity_map = _read_pfm_file(file_path)
disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
disparity_map[disparity_map == np.inf] = 0 # remove infinite disparities
valid_mask = (disparity_map > 0).squeeze(0) # mask out invalid disparities
return disparity_map, valid_mask
def _download_dataset(self, root: str) -> None:
base_url = "https://vision.middlebury.edu/stereo/data/scenes2014/zip"
# train and additional splits have 2 different calibration settings
root = Path(root) / "Middlebury2014"
split_name = self.split
if split_name != "test":
for split_scene in self.splits[split_name]:
split_root = root / split_name
for calibration in ["perfect", "imperfect"]:
scene_name = f"{split_scene}-{calibration}"
scene_url = f"{base_url}/{scene_name}.zip"
print(f"Downloading {scene_url}")
# download the scene only if it doesn't exist
if not (split_root / scene_name).exists():
download_and_extract_archive(
url=scene_url,
filename=f"{scene_name}.zip",
download_root=str(split_root),
remove_finished=True,
)
else:
os.makedirs(root / "test")
if any(s not in os.listdir(root / "test") for s in self.splits["test"]):
# test split is downloaded from a different location
test_set_url = "https://vision.middlebury.edu/stereo/submit3/zip/MiddEval3-data-F.zip"
# the unzip is going to produce a directory MiddEval3 with two subdirectories trainingF and testF
# we want to move the contents from testF into the directory
download_and_extract_archive(url=test_set_url, download_root=str(root), remove_finished=True)
for scene_dir, scene_names, _ in os.walk(str(root / "MiddEval3/testF")):
for scene in scene_names:
scene_dst_dir = root / "test"
scene_src_dir = Path(scene_dir) / scene
os.makedirs(scene_dst_dir, exist_ok=True)
shutil.move(str(scene_src_dir), str(scene_dst_dir))
# cleanup MiddEval3 directory
shutil.rmtree(str(root / "MiddEval3"))
def __getitem__(self, index: int) -> T2:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
``valid_mask`` is implicitly ``None`` for `split=test`.
"""
return cast(T2, super().__getitem__(index))
class CREStereo(StereoMatchingDataset):
"""Synthetic dataset used in training the `CREStereo <https://arxiv.org/pdf/2203.11483.pdf>`_ architecture.
Dataset details on the official paper `repo <https://github.com/megvii-research/CREStereo>`_.
The dataset is expected to have the following structure: ::
root
CREStereo
tree
img1_left.jpg
img1_right.jpg
img1_left.disp.jpg
img1_right.disp.jpg
img2_left.jpg
img2_right.jpg
img2_left.disp.jpg
img2_right.disp.jpg
...
shapenet
img1_left.jpg
img1_right.jpg
img1_left.disp.jpg
img1_right.disp.jpg
...
reflective
img1_left.jpg
img1_right.jpg
img1_left.disp.jpg
img1_right.disp.jpg
...
hole
img1_left.jpg
img1_right.jpg
img1_left.disp.jpg
img1_right.disp.jpg
...
Args:
root (str): Root directory of the dataset.
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
"""
_has_built_in_disparity_mask = True
def __init__(
self,
root: str,
transforms: Optional[Callable] = None,
) -> None:
super().__init__(root, transforms)
root = Path(root) / "CREStereo"
dirs = ["shapenet", "reflective", "tree", "hole"]
for s in dirs:
left_image_pattern = str(root / s / "*_left.jpg")
right_image_pattern = str(root / s / "*_right.jpg")
imgs = self._scan_pairs(left_image_pattern, right_image_pattern)
self._images += imgs
left_disparity_pattern = str(root / s / "*_left.disp.png")
right_disparity_pattern = str(root / s / "*_right.disp.png")
disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
self._disparities += disparities
def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
# unsqueeze the disparity map into (C, H, W) format
disparity_map = disparity_map[None, :, :] / 32.0
valid_mask = None
return disparity_map, valid_mask
def __getitem__(self, index: int) -> T1:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
generate a valid mask.
"""
return cast(T1, super().__getitem__(index))
class FallingThingsStereo(StereoMatchingDataset):
"""`FallingThings <https://research.nvidia.com/publication/2018-06_falling-things-synthetic-dataset-3d-object-detection-and-pose-estimation>`_ dataset.
The dataset is expected to have the following structure: ::
root
FallingThings
single
dir1
scene1
_object_settings.json
_camera_settings.json
image1.left.depth.png
image1.right.depth.png
image1.left.jpg
image1.right.jpg
image2.left.depth.png
image2.right.depth.png
image2.left.jpg
image2.right
...
scene2
...
mixed
scene1
_object_settings.json
_camera_settings.json
image1.left.depth.png
image1.right.depth.png
image1.left.jpg
image1.right.jpg
image2.left.depth.png
image2.right.depth.png
image2.left.jpg
image2.right
...
scene2
...
Args:
root (string): Root directory where FallingThings is located.
variant (string): Which variant to use. Either "single", "mixed", or "both".
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
"""
def __init__(self, root: str, variant: str = "single", transforms: Optional[Callable] = None) -> None:
super().__init__(root, transforms)
root = Path(root) / "FallingThings"
verify_str_arg(variant, "variant", valid_values=("single", "mixed", "both"))
variants = {
"single": ["single"],
"mixed": ["mixed"],
"both": ["single", "mixed"],
}[variant]
split_prefix = {
"single": Path("*") / "*",
"mixed": Path("*"),
}
for s in variants:
left_img_pattern = str(root / s / split_prefix[s] / "*.left.jpg")
right_img_pattern = str(root / s / split_prefix[s] / "*.right.jpg")
self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
left_disparity_pattern = str(root / s / split_prefix[s] / "*.left.depth.png")
right_disparity_pattern = str(root / s / split_prefix[s] / "*.right.depth.png")
self._disparities += self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
# (H, W) image
depth = np.asarray(Image.open(file_path))
# as per https://research.nvidia.com/sites/default/files/pubs/2018-06_Falling-Things/readme_0.txt
# in order to extract disparity from depth maps
camera_settings_path = Path(file_path).parent / "_camera_settings.json"
with open(camera_settings_path, "r") as f:
# inverse of depth-from-disparity equation: depth = (baseline * focal) / (disparity * pixel_constant)
intrinsics = json.load(f)
focal = intrinsics["camera_settings"][0]["intrinsic_settings"]["fx"]
baseline, pixel_constant = 6, 100 # pixel constant is inverted
disparity_map = (baseline * focal * pixel_constant) / depth.astype(np.float32)
# unsqueeze disparity to (C, H, W)
disparity_map = disparity_map[None, :, :]
valid_mask = None
return disparity_map, valid_mask
def __getitem__(self, index: int) -> T1:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
If a ``valid_mask`` is generated within the ``transforms`` parameter,
a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
"""
return cast(T1, super().__getitem__(index))
class SceneFlowStereo(StereoMatchingDataset):
"""Dataset interface for `Scene Flow <https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html>`_ datasets.
This interface provides access to the `FlyingThings3D, `Monkaa` and `Driving` datasets.
The dataset is expected to have the following structure: ::
root
SceneFlow
Monkaa
frames_cleanpass
scene1
left
img1.png
img2.png
right
img1.png
img2.png
scene2
left
img1.png
img2.png
right
img1.png
img2.png
frames_finalpass
scene1
left
img1.png
img2.png
right
img1.png
img2.png
...
...
disparity
scene1
left
img1.pfm
img2.pfm
right
img1.pfm
img2.pfm
FlyingThings3D
...
...
Args:
root (string): Root directory where SceneFlow is located.
variant (string): Which dataset variant to user, "FlyingThings3D" (default), "Monkaa" or "Driving".
pass_name (string): Which pass to use, "clean" (default), "final" or "both".
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
"""
def __init__(
self,
root: str,
variant: str = "FlyingThings3D",
pass_name: str = "clean",
transforms: Optional[Callable] = None,
) -> None:
super().__init__(root, transforms)
root = Path(root) / "SceneFlow"
verify_str_arg(variant, "variant", valid_values=("FlyingThings3D", "Driving", "Monkaa"))
verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both"))
passes = {
"clean": ["frames_cleanpass"],
"final": ["frames_finalpass"],
"both": ["frames_cleanpass", "frames_finalpass"],
}[pass_name]
root = root / variant
prefix_directories = {
"Monkaa": Path("*"),
"FlyingThings3D": Path("*") / "*" / "*",
"Driving": Path("*") / "*" / "*",
}
for p in passes:
left_image_pattern = str(root / p / prefix_directories[variant] / "left" / "*.png")
right_image_pattern = str(root / p / prefix_directories[variant] / "right" / "*.png")
self._images += self._scan_pairs(left_image_pattern, right_image_pattern)
left_disparity_pattern = str(root / "disparity" / prefix_directories[variant] / "left" / "*.pfm")
right_disparity_pattern = str(root / "disparity" / prefix_directories[variant] / "right" / "*.pfm")
self._disparities += self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
disparity_map = _read_pfm_file(file_path)
disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
valid_mask = None
return disparity_map, valid_mask
def __getitem__(self, index: int) -> T1:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
If a ``valid_mask`` is generated within the ``transforms`` parameter,
a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
"""
return cast(T1, super().__getitem__(index))
class SintelStereo(StereoMatchingDataset):
"""Sintel `Stereo Dataset <http://sintel.is.tue.mpg.de/stereo>`_.
The dataset is expected to have the following structure: ::
root
Sintel
training
final_left
scene1
img1.png
img2.png
...
...
final_right
scene2
img1.png
img2.png
...
...
disparities
scene1
img1.png
img2.png
...
...
occlusions
scene1
img1.png
img2.png
...
...
outofframe
scene1
img1.png
img2.png
...
...
Args:
root (string): Root directory where Sintel Stereo is located.
pass_name (string): The name of the pass to use, either "final", "clean" or "both".
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
"""
_has_built_in_disparity_mask = True
def __init__(self, root: str, pass_name: str = "final", transforms: Optional[Callable] = None) -> None:
super().__init__(root, transforms)
verify_str_arg(pass_name, "pass_name", valid_values=("final", "clean", "both"))
root = Path(root) / "Sintel"
pass_names = {
"final": ["final"],
"clean": ["clean"],
"both": ["final", "clean"],
}[pass_name]
for p in pass_names:
left_img_pattern = str(root / "training" / f"{p}_left" / "*" / "*.png")
right_img_pattern = str(root / "training" / f"{p}_right" / "*" / "*.png")
self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
disparity_pattern = str(root / "training" / "disparities" / "*" / "*.png")
self._disparities += self._scan_pairs(disparity_pattern, None)
def _get_occlussion_mask_paths(self, file_path: str) -> Tuple[str, str]:
# helper function to get the occlusion mask paths
# a path will look like .../.../.../training/disparities/scene1/img1.png
# we want to get something like .../.../.../training/occlusions/scene1/img1.png
fpath = Path(file_path)
basename = fpath.name
scenedir = fpath.parent
# the parent of the scenedir is actually the disparity dir
sampledir = scenedir.parent.parent
occlusion_path = str(sampledir / "occlusions" / scenedir.name / basename)
outofframe_path = str(sampledir / "outofframe" / scenedir.name / basename)
if not os.path.exists(occlusion_path):
raise FileNotFoundError(f"Occlusion mask {occlusion_path} does not exist")
if not os.path.exists(outofframe_path):
raise FileNotFoundError(f"Out of frame mask {outofframe_path} does not exist")
return occlusion_path, outofframe_path
def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
if file_path is None:
return None, None
# disparity decoding as per Sintel instructions in the README provided with the dataset
disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
r, g, b = np.split(disparity_map, 3, axis=-1)
disparity_map = r * 4 + g / (2**6) + b / (2**14)
# reshape into (C, H, W) format
disparity_map = np.transpose(disparity_map, (2, 0, 1))
# find the appropriate file paths
occlued_mask_path, out_of_frame_mask_path = self._get_occlussion_mask_paths(file_path)
# occlusion masks
valid_mask = np.asarray(Image.open(occlued_mask_path)) == 0
# out of frame masks
off_mask = np.asarray(Image.open(out_of_frame_mask_path)) == 0
# combine the masks together
valid_mask = np.logical_and(off_mask, valid_mask)
return disparity_map, valid_mask
def __getitem__(self, index: int) -> T2:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
The disparity is a numpy array of shape (1, H, W) and the images are PIL images whilst
the valid_mask is a numpy array of shape (H, W).
"""
return cast(T2, super().__getitem__(index))
class InStereo2k(StereoMatchingDataset):
"""`InStereo2k <https://github.com/YuhuaXu/StereoDataset>`_ dataset.
The dataset is expected to have the following structure: ::
root
InStereo2k
train
scene1
left.png
right.png
left_disp.png
right_disp.png
...
scene2
...
test
scene1
left.png
right.png
left_disp.png
right_disp.png
...
scene2
...
Args:
root (string): Root directory where InStereo2k is located.
split (string): Either "train" or "test".
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
"""
def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
super().__init__(root, transforms)
root = Path(root) / "InStereo2k" / split
verify_str_arg(split, "split", valid_values=("train", "test"))
left_img_pattern = str(root / "*" / "left.png")
right_img_pattern = str(root / "*" / "right.png")
self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
left_disparity_pattern = str(root / "*" / "left_disp.png")
right_disparity_pattern = str(root / "*" / "right_disp.png")
self._disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
# unsqueeze disparity to (C, H, W)
disparity_map = disparity_map[None, :, :] / 1024.0
valid_mask = None
return disparity_map, valid_mask
def __getitem__(self, index: int) -> T1:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
If a ``valid_mask`` is generated within the ``transforms`` parameter,
a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
"""
return cast(T1, super().__getitem__(index))
class ETH3DStereo(StereoMatchingDataset):
"""ETH3D `Low-Res Two-View <https://www.eth3d.net/datasets>`_ dataset.
The dataset is expected to have the following structure: ::
root
ETH3D
two_view_training
scene1
im1.png
im0.png
images.txt
cameras.txt
calib.txt
scene2
im1.png
im0.png
images.txt
cameras.txt
calib.txt
...
two_view_training_gt
scene1
disp0GT.pfm
mask0nocc.png
scene2
disp0GT.pfm
mask0nocc.png
...
two_view_testing
scene1
im1.png
im0.png
images.txt
cameras.txt
calib.txt
scene2
im1.png
im0.png
images.txt
cameras.txt
calib.txt
...
Args:
root (string): Root directory of the ETH3D Dataset.
split (string, optional): The dataset split of scenes, either "train" (default) or "test".
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
"""
_has_built_in_disparity_mask = True
def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
super().__init__(root, transforms)
verify_str_arg(split, "split", valid_values=("train", "test"))
root = Path(root) / "ETH3D"
img_dir = "two_view_training" if split == "train" else "two_view_test"
anot_dir = "two_view_training_gt"
left_img_pattern = str(root / img_dir / "*" / "im0.png")
right_img_pattern = str(root / img_dir / "*" / "im1.png")
self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
if split == "test":
self._disparities = list((None, None) for _ in self._images)
else:
disparity_pattern = str(root / anot_dir / "*" / "disp0GT.pfm")
self._disparities = self._scan_pairs(disparity_pattern, None)
def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
# test split has no disparity maps
if file_path is None:
return None, None
disparity_map = _read_pfm_file(file_path)
disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
mask_path = Path(file_path).parent / "mask0nocc.png"
valid_mask = Image.open(mask_path)
valid_mask = np.asarray(valid_mask).astype(bool)
return disparity_map, valid_mask
def __getitem__(self, index: int) -> T2:
"""Return example at given index.
Args:
index(int): The index of the example to retrieve
Returns:
tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
generate a valid mask.
Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
"""
return cast(T2, super().__getitem__(index))
|
from json import loads
from flask import (render_template, Blueprint,
request, jsonify, redirect, url_for, abort)
# imports dos modulos
from manticora.controllers.modules.register import (register_user,
register_rest,
update_user)
from manticora.models.database.tables import db
from flask_login import current_user
app = Blueprint('register', __name__)
@app.route('/new_user/', methods=['POST'])
def new_user():
data = loads(request.data)
name = data['name']
email = data['email']
pwd = data['pwd']
neigh = data['neigh']
city = data['city']
street = data['street']
num = data['num']
complement = data['complement']
return jsonify({
'result': register_user(name, pwd, email, neigh,
city, street, num,
complement)
})
@app.route('/new_adm/', methods=['GET'])
def new_adm():
return render_template('admin_register.html')
@app.route('/new_adm/', methods=['POST'])
def new_adm_post():
name = request.form['name']
email = request.form['email']
phone = request.form['phone']
num_phone = request.form['num']
pwd = request.form['pwd2']
neigh = request.form['neigh']
city = request.form['city']
street = request.form['street']
num_street = request.form['num_street']
comp = request.form['comp']
open = request.form['hora_aber']
closed = request.form['hora_fech']
img = request.files['img']
if open == closed:
return redirect(url_for('register.new_adm',
new_adm='Desculpe, não é possivel utilizar um estabelecimento 24hrs')) # NOQA
adm = register_user(name, pwd, email, neigh,
city, street, num_street,
comp, is_adm=True,
return_entity=True)
if not type(adm) == str:
rest = register_rest(phone, num_phone, img, open, closed, adm)
return redirect(url_for('register.new_adm', new_adm=rest))
return redirect(url_for('register.new_adm', new_adm=adm))
@app.route('/alter_user/', methods=['POST'])
def alter_user():
data = request.form
neigh = data.get('neigh_in')
city = data.get('city_in')
street = data.get('street_in')
num = data.get('number_in')
complement = data.get('comp_in')
upd_user = update_user(city, neigh, street, num, complement, current_user)
return redirect(url_for('login.login_template', upd=upd_user))
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import glob
import time
import numpy as np
import os
import pandas as pd
paths = os.getenv('ENSEMBLING', None)
if None == paths:
paths = [
# glob.glob(os.getenv('PREDICTING') + '.simple')[0],
glob.glob(os.getenv('PREDICTING') + '.lr')[0],
glob.glob(os.getenv('PREDICTING') + '.fm')[0],
# glob.glob(os.getenv('PREDICTING') + '.tf_pairwise')[0],
# glob.glob(os.getenv('PREDICTING') + '.tf_classifier')[0],
glob.glob(os.getenv('PREDICTING') + '.gbt')[0],
glob.glob(os.getenv('PREDICTING') + '.pairwise')[0]
]
else:
paths = paths.split(':')
def main():
t_id = []
probs = []
for path in paths:
df = pd.read_csv(path)
t_id = df['id'].values
probs.append(df['probability'].values)
probability = np.power(np.prod(probs, axis=0), 1.0 / len(paths))
assert(len(probability) == len(t_id))
df_pred = pd.DataFrame({
'id': t_id,
'probability': probability,
})
csv_path = os.getenv('PREDICTING')
df_pred.to_csv(csv_path, columns=('id', 'probability'), index=None)
print('Saved: {}'.format(csv_path))
if __name__ == '__main__':
main()
|
import unittest
from katas.kyu_8.function_within_function import always
class AlwaysTestCase(unittest.TestCase):
def setUp(self):
self.three = always(3)
def test_equals(self):
self.assertEqual(self.three(), 3)
|
from typing import Any, cast, Dict, List, Optional, Tuple, Union
import PIL.Image
import torch
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision import tv_tensors
from torchvision.ops import masks_to_boxes
from torchvision.prototype import tv_tensors as proto_tv_tensors
from torchvision.transforms.v2 import functional as F, InterpolationMode, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
from torchvision.transforms.v2.functional._geometry import _check_interpolation
class SimpleCopyPaste(Transform):
def __init__(
self,
blending: bool = True,
resize_interpolation: Union[int, InterpolationMode] = F.InterpolationMode.BILINEAR,
antialias: Optional[bool] = None,
) -> None:
super().__init__()
self.resize_interpolation = _check_interpolation(resize_interpolation)
self.blending = blending
self.antialias = antialias
def _copy_paste(
self,
image: Union[torch.Tensor, tv_tensors.Image],
target: Dict[str, Any],
paste_image: Union[torch.Tensor, tv_tensors.Image],
paste_target: Dict[str, Any],
random_selection: torch.Tensor,
blending: bool,
resize_interpolation: F.InterpolationMode,
antialias: Optional[bool],
) -> Tuple[torch.Tensor, Dict[str, Any]]:
paste_masks = tv_tensors.wrap(paste_target["masks"][random_selection], like=paste_target["masks"])
paste_boxes = tv_tensors.wrap(paste_target["boxes"][random_selection], like=paste_target["boxes"])
paste_labels = tv_tensors.wrap(paste_target["labels"][random_selection], like=paste_target["labels"])
masks = target["masks"]
# We resize source and paste data if they have different sizes
# This is something different to TF implementation we introduced here as
# originally the algorithm works on equal-sized data
# (for example, coming from LSJ data augmentations)
size1 = cast(List[int], image.shape[-2:])
size2 = paste_image.shape[-2:]
if size1 != size2:
paste_image = F.resize(paste_image, size=size1, interpolation=resize_interpolation, antialias=antialias)
paste_masks = F.resize(paste_masks, size=size1)
paste_boxes = F.resize(paste_boxes, size=size1)
paste_alpha_mask = paste_masks.sum(dim=0) > 0
if blending:
paste_alpha_mask = F.gaussian_blur(paste_alpha_mask.unsqueeze(0), kernel_size=[5, 5], sigma=[2.0])
inverse_paste_alpha_mask = paste_alpha_mask.logical_not()
# Copy-paste images:
image = image.mul(inverse_paste_alpha_mask).add_(paste_image.mul(paste_alpha_mask))
# Copy-paste masks:
masks = masks * inverse_paste_alpha_mask
non_all_zero_masks = masks.sum((-1, -2)) > 0
masks = masks[non_all_zero_masks]
# Do a shallow copy of the target dict
out_target = {k: v for k, v in target.items()}
out_target["masks"] = torch.cat([masks, paste_masks])
# Copy-paste boxes and labels
bbox_format = target["boxes"].format
xyxy_boxes = masks_to_boxes(masks)
# masks_to_boxes produces bboxes with x2y2 inclusive but x2y2 should be exclusive
# we need to add +1 to x2y2.
# There is a similar +1 in other reference implementations:
# https://github.com/pytorch/vision/blob/b6feccbc4387766b76a3e22b13815dbbbfa87c0f/torchvision/models/detection/roi_heads.py#L418-L422
xyxy_boxes[:, 2:] += 1
boxes = F.convert_bounding_box_format(
xyxy_boxes, old_format=tv_tensors.BoundingBoxFormat.XYXY, new_format=bbox_format, inplace=True
)
out_target["boxes"] = torch.cat([boxes, paste_boxes])
labels = target["labels"][non_all_zero_masks]
out_target["labels"] = torch.cat([labels, paste_labels])
# Check for degenerated boxes and remove them
boxes = F.convert_bounding_box_format(
out_target["boxes"], old_format=bbox_format, new_format=tv_tensors.BoundingBoxFormat.XYXY
)
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
valid_targets = ~degenerate_boxes.any(dim=1)
out_target["boxes"] = boxes[valid_targets]
out_target["masks"] = out_target["masks"][valid_targets]
out_target["labels"] = out_target["labels"][valid_targets]
return image, out_target
def _extract_image_targets(
self, flat_sample: List[Any]
) -> Tuple[List[Union[torch.Tensor, tv_tensors.Image]], List[Dict[str, Any]]]:
# fetch all images, bboxes, masks and labels from unstructured input
# with List[image], List[BoundingBoxes], List[Mask], List[Label]
images, bboxes, masks, labels = [], [], [], []
for obj in flat_sample:
if isinstance(obj, tv_tensors.Image) or is_pure_tensor(obj):
images.append(obj)
elif isinstance(obj, PIL.Image.Image):
images.append(F.to_image(obj))
elif isinstance(obj, tv_tensors.BoundingBoxes):
bboxes.append(obj)
elif isinstance(obj, tv_tensors.Mask):
masks.append(obj)
elif isinstance(obj, (proto_tv_tensors.Label, proto_tv_tensors.OneHotLabel)):
labels.append(obj)
if not (len(images) == len(bboxes) == len(masks) == len(labels)):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain equal sized list of Images, "
"BoundingBoxeses, Masks and Labels or OneHotLabels."
)
targets = []
for bbox, mask, label in zip(bboxes, masks, labels):
targets.append({"boxes": bbox, "masks": mask, "labels": label})
return images, targets
def _insert_outputs(
self,
flat_sample: List[Any],
output_images: List[torch.Tensor],
output_targets: List[Dict[str, Any]],
) -> None:
c0, c1, c2, c3 = 0, 0, 0, 0
for i, obj in enumerate(flat_sample):
if isinstance(obj, tv_tensors.Image):
flat_sample[i] = tv_tensors.wrap(output_images[c0], like=obj)
c0 += 1
elif isinstance(obj, PIL.Image.Image):
flat_sample[i] = F.to_pil_image(output_images[c0])
c0 += 1
elif is_pure_tensor(obj):
flat_sample[i] = output_images[c0]
c0 += 1
elif isinstance(obj, tv_tensors.BoundingBoxes):
flat_sample[i] = tv_tensors.wrap(output_targets[c1]["boxes"], like=obj)
c1 += 1
elif isinstance(obj, tv_tensors.Mask):
flat_sample[i] = tv_tensors.wrap(output_targets[c2]["masks"], like=obj)
c2 += 1
elif isinstance(obj, (proto_tv_tensors.Label, proto_tv_tensors.OneHotLabel)):
flat_sample[i] = tv_tensors.wrap(output_targets[c3]["labels"], like=obj)
c3 += 1
def forward(self, *inputs: Any) -> Any:
flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
images, targets = self._extract_image_targets(flat_inputs)
# images = [t1, t2, ..., tN]
# Let's define paste_images as shifted list of input images
# paste_images = [t2, t3, ..., tN, t1]
# FYI: in TF they mix data on the dataset level
images_rolled = images[-1:] + images[:-1]
targets_rolled = targets[-1:] + targets[:-1]
output_images, output_targets = [], []
for image, target, paste_image, paste_target in zip(images, targets, images_rolled, targets_rolled):
# Random paste targets selection:
num_masks = len(paste_target["masks"])
if num_masks < 1:
# Such degerante case with num_masks=0 can happen with LSJ
# Let's just return (image, target)
output_image, output_target = image, target
else:
random_selection = torch.randint(0, num_masks, (num_masks,), device=paste_image.device)
random_selection = torch.unique(random_selection)
output_image, output_target = self._copy_paste(
image,
target,
paste_image,
paste_target,
random_selection=random_selection,
blending=self.blending,
resize_interpolation=self.resize_interpolation,
antialias=self.antialias,
)
output_images.append(output_image)
output_targets.append(output_target)
# Insert updated images and targets into input flat_sample
self._insert_outputs(flat_inputs, output_images, output_targets)
return tree_unflatten(flat_inputs, spec)
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
import os
import logging
from colorlog import ColoredFormatter
import argparse
import uvloop
import asyncio
from signal import signal, SIGINT
# Import KafkaProducer / KafkaConsumer
from tonga.services.consumer.kafka_consumer import KafkaConsumer
from tonga.services.producer.kafka_producer import KafkaProducer
# Import serializer
from tonga.services.serializer.avro import AvroSerializer
# Import key partitioner
from tonga.services.coordinator.partitioner.key_partitioner import KeyPartitioner
# Import coffee-maker events
from examples.coffee_bar.coffeemaker.models.results.make_coffee_result import MakeCoffeeResult
from examples.coffee_bar.coffeemaker.models.commands.make_coffee import MakeCoffee
from examples.coffee_bar.coffeemaker.models.events.coffee_started import CoffeeStarted
# Import coffee-maker handlers
from examples.coffee_bar.coffeemaker.models.handlers.coffee_started_handler import CoffeeStartedHandler
from examples.coffee_bar.coffeemaker.models.handlers.make_coffee_result_handler import MakeCoffeeResultHandler
from examples.coffee_bar.coffeemaker.models.handlers.make_coffee_handler import MakeCoffeeHandler
def setup_logger():
"""Return a logger with a default ColoredFormatter."""
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s]%(levelname)s: %(name)s/%(module)s/%(funcName)s:%(lineno)d"
" (%(thread)d) %(blue)s%(message)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
)
logger = logging.getLogger('tonga')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
if __name__ == '__main__':
# Argument parser, use for start waiter by instance
parser = argparse.ArgumentParser(description='Coffee-maker Parser')
parser.add_argument('instance', metavar='--instance', type=int, help='Service current instance')
parser.add_argument('nb_replica', metavar='--replica', type=int, help='Replica number')
args = parser.parse_args()
cur_instance = args.instance
nb_replica = args.nb_replica
try:
cur_instance = int(cur_instance)
except ValueError:
print('Bad instance !')
exit(-1)
# Creates coffee-maker dict app
coffee_maker_app = dict()
# Register coffee-maker info
coffee_maker_app['instance'] = cur_instance
coffee_maker_app['nb_replica'] = nb_replica
# Registers logger
coffee_maker_app['logger'] = setup_logger()
coffee_maker_app['logger'].info(f'Coffee-maker current instance : {cur_instance}')
# Creates & registers event loop
coffee_maker_app['loop'] = uvloop.new_event_loop()
asyncio.set_event_loop(coffee_maker_app['loop'])
# Creates & registers Avro serializer
coffee_maker_app['serializer'] = AvroSerializer(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'examples/coffee_bar/avro_schemas'))
# Creates & register KafkaProducer
coffee_maker_app['producer'] = KafkaProducer(name=f'coffee-maker-{cur_instance}',
bootstrap_servers='localhost:9092',
client_id=f'coffee-maker-{cur_instance}',
serializer=coffee_maker_app['serializer'],
loop=coffee_maker_app['loop'], partitioner=KeyPartitioner(),
acks='all', transactional_id=f'coffee-maker')
# Initializes coffee-maker handlers
make_coffee_handler = MakeCoffeeHandler(coffee_maker_app['producer'])
make_coffee_result_handler = MakeCoffeeResultHandler()
coffee_started_handler = CoffeeStartedHandler()
# Registers events / handlers in serializer
coffee_maker_app['serializer'].register_class('tonga.coffeemaker.command.MakeCoffee', MakeCoffee,
make_coffee_handler)
coffee_maker_app['serializer'].register_class('tonga.coffeemaker.result.MakeCoffeeResult', MakeCoffeeResult,
make_coffee_result_handler)
coffee_maker_app['serializer'].register_class('tonga.coffeemaker.event.CoffeeStarted', CoffeeStarted,
coffee_started_handler)
# Creates & registers KafkaConsumer
coffee_maker_app['consumer'] = KafkaConsumer(name=f'coffee-maker-{cur_instance}',
serializer=coffee_maker_app['serializer'],
bootstrap_servers='localhost:9092',
client_id=f'coffee-maker-{cur_instance}',
topics=['coffee-maker-commands'],
loop=coffee_maker_app['loop'], group_id='coffee-maker',
assignors_data={'instance': cur_instance,
'nb_replica': nb_replica,
'assignor_policy': 'only_own'},
isolation_level='read_committed')
# Ensures future of KafkaConsumer
asyncio.ensure_future(coffee_maker_app['consumer'].listen_event('committed'), loop=coffee_maker_app['loop'])
# Catch SIGINT
signal(SIGINT, lambda s, f: coffee_maker_app['loop'].stop())
try:
# Runs forever
coffee_maker_app['loop'].run_forever()
except Exception:
# If an exception was raised loop was stopped
coffee_maker_app['loop'].stop()
|
# Check If Adjacent Cells Contain Consecutive Numbers
# var0 is checked with all the variables in argv
# argv may contain non-fixed number of variables
def adjacency(var0, *argv):
passed = False
for var in argv:
passed = passed or abs(var0 - var) == 1
if passed: break
return passed
|
#!/usr/bin/python
import socket
import time
import threading
import json
import random
import argparse
import sys
from Queue import Queue
from concurrent import futures
def get_args():
"""
Get command line args from the user.
"""
parser = argparse.ArgumentParser(
description='Standard Arguments for talking to Central Index Server')
parser.add_argument('-p', '--port',
type=int,
required=True,
action='store',
help='Server Port Number')
parser.add_argument('-r', '--replica',
type=int,
default=1,
action='store',
help='Data Replication Factor')
args = parser.parse_args()
return args
class ServerOperations(threading.Thread):
def __init__(self, threadid, name, rep_factor, server_port):
"""
Constructor used to initialize class object.
@param threadid: Thread ID.
@param name: Name of the thread.
"""
threading.Thread.__init__(self)
self.threadID = threadid
self.name = name
self.replication_factor = rep_factor
self.server_port = server_port
self.hash_table_ports_peers = {}
self.hash_table_files = {}
self.hash_table_replica_files = {}
self.hash_table_peer_files = {}
self.listener_queue = Queue()
def server_listener(self):
"""
Server Listener Method is used start Central Index Server to listen on
port: 3344 for incoming connections.
"""
try:
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_host = socket.gethostname()
server_socket.bind((server_host, self.server_port))
server_socket.listen(10)
while True:
conn, addr = server_socket.accept()
self.listener_queue.put((conn,addr))
except Exception as e:
print "Server Listener on port Failed: %s" % e
sys.exit(1)
def registry(self, addr, files, peer_port):
"""
This method is invoked by the peer trying to register itself
with the Indexing Server.
@param addr: Address of the incoming connection.
@param files: File list sent by the peer.
@param peer_port: Peer's server port.
@return free_socket: Socket port to be used as a Peer Server.
"""
try:
self.hash_table_ports_peers[peer_port] = addr[0]
peer_id = addr[0] + ":" + str(peer_port)
self.hash_table_peer_files[peer_id] = files
for f in files:
if self.hash_table_files.has_key(f):
self.hash_table_files[f].append(peer_id)
else:
self.hash_table_files[f] = [peer_id]
return True
except Exception as e:
print "Peer registration failure: %s" % e
return False
def update(self, peer_update):
"""
This method is invoked by the peer's file handler to update the files
in the Index Server. Peer file handler invokes this method upon addition
of new file or removal of existing file.
@param peer_update: Peer File Update details.
"""
try:
if peer_update['task'] == 'add':
for f in peer_update['files']:
self.hash_table_peer_files[peer_update['peer_id']].append(f)
if self.hash_table_files.has_key(f):
self.hash_table_files[f].append(str(peer_update['peer_id']))
else:
self.hash_table_files[f] = [str(peer_update['peer_id'])]
if peer_update['task'] == 'rm':
for f in peer_update['files']:
self.hash_table_peer_files[peer_update['peer_id']].remove(f)
if self.hash_table_files.has_key(f):
for peer_id in self.hash_table_files[f]:
if peer_id == peer_update['peer_id']:
self.hash_table_files[f].remove(peer_id)
if len(self.hash_table_files[f]) == 0:
self.hash_table_files.pop(f, None)
return True
except Exception as e:
print "Peer File Update failure: %s" % e
return False
def list_files_index_server(self):
"""
This method is used display the list of files registered with
the Central Index Server.
@return files_list: List of files present in the server.
"""
try:
files_list = self.hash_table_files.keys()
return files_list
except Exception as e:
print "Listing Files Error, %s" % e
def search(self, file_name):
"""
This method is used to search for a particular file.
@param file_name: File name to be searched.
@return: List of Peers associated with the file.
"""
try:
if self.hash_table_files.has_key(file_name):
peer_list = self.hash_table_files[file_name]
else:
peer_list = []
return peer_list
except Exception as e:
print "Listing Files Error, %s" % e
def deregistry(self, peer_data):
"""
The method is invoked when the Peer dies or Peer shutsdown to
remove all its entry from the Central Index Server.
@param peer_data: Peer data containing Peer details.
@return True/False: Return success or failure.
"""
try:
if self.hash_table_ports_peers.has_key(peer_data['hosting_port']):
self.hash_table_ports_peers.pop(peer_data['hosting_port'], None)
if self.hash_table_peer_files.has_key(peer_data['peer_id']):
self.hash_table_peer_files.pop(peer_data['peer_id'], None)
for f in peer_data['files']:
if self.hash_table_files.has_key(f):
for peer_id in self.hash_table_files[f]:
if peer_id == peer_data['peer_id']:
self.hash_table_files[f].remove(peer_id)
if len(self.hash_table_files[f]) == 0:
self.hash_table_files.pop(f, None)
return True
except Exception as e:
print "Peer deregistration failure: %s" % e
return False
def random_peer(self, f, peer_id_list, file_peer_list):
"""
This method is to obtained a random peer for the file to be replicated.
@return choise: Random Peer to be returned.
"""
try:
choise = random.choice(peer_id_list)
if (choise in file_peer_list) or \
(choise in self.hash_table_replica_files[f]):
choise = self.random_peer(f, peer_id_list, file_peer_list)
return choise
except Exception as e:
print "Chosing random peer for replica error, %s" % e
def data_resilience(self):
"""
This method is used to ensure data resilience system wide.
"""
try:
while True:
perform_replication = False
peer_id_list = self.hash_table_peer_files.keys()
if len(peer_id_list) > self.replication_factor:
for key,val in self.hash_table_files.items():
if not self.hash_table_replica_files.has_key(key):
self.hash_table_replica_files[key] = []
perform_replication = True
elif len(self.hash_table_replica_files[key]) < \
self.replication_factor:
perform_replication = True
if perform_replication:
random_peer = self.random_peer(
key, peer_id_list, val)
print "file %s to be replicated to %s" % \
(key,random_peer)
peer_issue_addr, peer_issue_port = \
random_peer.split(':')
peer_issue_socket = \
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
peer_issue_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
peer_issue_socket.connect(
(socket.gethostname(), int(peer_issue_port)))
cmd_issue = {
'command' : 'obtain_replica',
'file_name' : key,
'peer_server' : random.choice(
self.hash_table_files[key]),
}
peer_issue_socket.sendall(json.dumps(cmd_issue))
rcv_data = json.loads(peer_issue_socket.recv(1024))
peer_issue_socket.close()
if rcv_data:
self.hash_table_replica_files[key].append(
random_peer)
elif not self.hash_table_replica_files:
print "Waiting for more active peers to start with " \
"data replication..."
time.sleep(10)
except Exception as e:
print "Data Resilience Error, %s" % e
def run(self):
"""
Starting thread to carry out server operations.
"""
try:
print "Starting Server Listener Deamon Thread..."
listener_thread = threading.Thread(target=self.server_listener)
listener_thread.setDaemon(True)
listener_thread.start()
print "Stating Data Resilience Deamon Thread with " \
"replication factor = %s" % self.replication_factor
dr_thread = threading.Thread(target=self.data_resilience)
dr_thread.setDaemon(True)
dr_thread.start()
while True:
while not self.listener_queue.empty():
with futures.ThreadPoolExecutor(max_workers=8) as executor:
conn, addr = self.listener_queue.get()
data_received = json.loads(conn.recv(1024))
print "Got connection from %s on port %s, requesting " \
"for: %s" % (addr[0], addr[1], data_received['command'])
if data_received['command'] == 'register':
fut = executor.submit(self.registry, addr,
data_received['files'],
data_received['peer_port'])
success = fut.result(timeout= None)
if success:
print "registration successfull, Peer ID: %s:%s" \
% (addr[0], data_received['peer_port'])
conn.send(json.dumps([addr[0], success]))
else:
print "registration unsuccessfull, Peer ID: %s:%s" \
% (addr[0], data_received['peer_port'])
conn.send(json.dumps([addr[0], success]))
elif data_received['command'] == 'update':
fut = executor.submit(self.update, data_received)
success = fut.result(timeout= None)
if success:
print "Update of Peer ID: %s successful" \
% (data_received['peer_id'])
conn.send(json.dumps(success))
else:
print "Update of Peer ID: %s unsuccessful" \
% (data_received['peer_id'])
conn.send(json.dumps(success))
elif data_received['command'] == 'list':
fut = executor.submit(self.list_files_index_server)
file_list = fut.result(timeout= None)
print "File list generated, %s" % file_list
conn.send(json.dumps(file_list))
elif data_received['command'] == 'search':
fut = executor.submit(self.search,
data_received['file_name'])
peer_list = fut.result(timeout= None)
print "Peer list generated, %s" % peer_list
conn.send(json.dumps(peer_list))
elif data_received['command'] == 'deregister':
fut = executor.submit(self.deregistry, data_received)
success = fut.result(timeout= None)
if success:
print "deregistration of Peer ID: %s successful" \
% (data_received['peer_id'])
conn.send(json.dumps(success))
else:
print "deregistration of Peer ID: %s unsuccessful" \
% (data_received['peer_id'])
conn.send(json.dumps(success))
print "hash table: Files || %s" % \
self.hash_table_files
print "hash table: Port-Peers || %s" % \
self.hash_table_ports_peers
print "hash table: Peer-Files || %s" % \
self.hash_table_peer_files
conn.close()
except Exception as e:
print "Server Operations error, %s " % e
sys.exit(1)
if __name__ == '__main__':
"""
Main method to start deamon threads for listener and operations.
"""
try:
args = get_args()
print "Starting Central Indexing Server..."
print "Starting Server Operations Thread..."
operations_thread = ServerOperations(1, "ServerOperations",
args.replica, args.port)
operations_thread.start()
except Exception as e:
print e
sys.exit(1)
except (KeyboardInterrupt, SystemExit):
print "Central Index Server Shutting down..."
time.sleep(1)
sys.exit(1)
__author__ = 'arihant'
|
../mysqlconnection.py
|
__author__ = 'Elisabetta Ronchieri'
def get_longest_string(elements):
max_length,longest_element = max([(len(element),element) for element in elements])
return max_length,longest_element
def add_empty_space(current_len, longest_len):
number = longest_len - current_len + 4
return ' '.ljust(number)
|
# Import the modules
import cv2
from sklearn.externals import joblib
from skimage.feature import hog
import numpy as np
import matplotlib.pyplot as plt
grid_line_x = 9
grid_line_y = 8
################################
###############################
#
#
#
#
def areacon(contours,area,sub):
count=0
for i in range(len(contours)):
ar = cv2.contourArea(contours[i])
# print ar,area, sub
if ar>area-sub and ar<area+sub:#detecting provision marker
contours[count]=contours[i]
#print ar,area, sub, count, i
count=count+1
#print count
return contours,count
##############################
########################################
###########################################
# calculate contours coordinates
#
#
#
#
def ccoor(contour):
M = cv2.moments(contour)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
return cx,cy
###########################################
##################################
##############################################
##################
# grid draw
#
#
#
def grid_draw(image,m,n): ##filename is image filename with full file path, n is grid of n lines
h,k,l=image.shape
#print h,k
line_widthm=h/(m-1)
line_widthn=k/(n-1) ##calculating width between 2 consecutive parallel lines
for x in range(0, m): ##drawing lines
X=x*line_widthm
cv2.line(image,(0,X),(k,X),(0,0,255), 2)#lines is red color, bgr format
for y in range(0, n): ##drawing lines
Y=y*line_widthn
cv2.line(image,(Y,0),(Y,h),(255,0,0), 2)#lines is red color, bgr format
return (image)
##########################
# returning grid coordinate from pixels coordinates
#
#
#
#
def getcoor(x,y,m,n):
'''
cx=x/n#(int)(round(x/m))
cy=y/n#(int)(round(y/n))
return cx,cy
'''
#img=cv2.imread(filename) ##getting input image
X=0
Y=0
for i in range(0, grid_line_x): ##
X=X+m
Y=0
for j in range(0, grid_line_y): ##
Y=Y+n
#print X,Y
if x<=X and y<=Y:
return i,j
break
##########################
# converting grid coordinates into pixels
#
#
#
#
def gridtopixel(x,y,m,n):
X=x*m+m/2
Y=y*n+n/2
return X,Y
################################
# Load the classifier
clf = joblib.load("digits_cls.pkl")
# Read the input image
im = cv2.imread("clippedtest4s.jpg")
# Convert to grayscale and apply Gaussian filtering
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0)
# Threshold the image
ret, im_th = cv2.threshold(im_gray, 160, 255, cv2.THRESH_BINARY_INV)
# Find contours in the image
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
ctrs, count=areacon(ctrs,110,100)
ctrs=ctrs[:count]
#ctrs=sorted(ctrs, key = cv2.contourArea, reverse = False)[:count] ##bot
# print count
# ctrs=sorted(ctrs, key = cv2.contourArea, reverse = True)[:6] ##bot
#print ctrs
#print ctrs
#cv2.drawContours(im, ctrs, -1, (255,100,0), 2)
# Get rectangles contains each contour
h,k,l=im.shape
m=h/(grid_line_x-1)
n=k/(grid_line_y-1)
print h,k
print "m=",m," n=",n
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
# # For each rectangular region, calculate HOG features and predict
# #the digit using Linear SVM.
i=0
# print len(rects)
# for rect in rects:
# cv2.rectangle(im, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 2)
node={}
for rect in rects:
# Draw the rectangles
# cv2.rectangle(im, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 2)
# Make the rectangular region around the digit
leng = int(rect[3] * 1.6)
pt1 = int(rect[1] + rect[3] // 2 - leng // 2)
pt2 = int(rect[0] + rect[2] // 2 - leng // 2)
roi = im_th[pt1:pt1+leng, pt2:pt2+leng]
# Resize the imagepython
roi = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)
roi = cv2.dilate(roi, (3, 3))
# Calculate the HOG features
roi_hog_fd = hog(roi, orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualise=False)
nbr = clf.predict(np.array([roi_hog_fd], 'float64'))
print nbr
M = cv2.moments(ctrs[i])
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
print cx,cy
cX, cY=getcoor(cy,cx, m,n)
print "grid cell",cX,cY
#print "grid cell to pixels",gridtopixel(cY,cX, m,n)
# if
# node['A']=nbr[0]
cv2.circle(im,(int(cx),int(cy)),3,(255,255,0),-11)
#node[]=
print "Area", cv2.contourArea(ctrs[i])
cv2.putText(im, str(int(nbr[0])), (rect[0], rect[1]),cv2.FONT_HERSHEY_DUPLEX, 1, (255, 100, 0), 2)
i=i+1
# print int(nbr[0])
img2=grid_draw(im,grid_line_x,grid_line_y)
cv2.imshow("Resulting Image with Rectangular ROIs", im)
cv2.imshow("Thresholded", im_th)
cv2.waitKey()
|
from django.db import models
# Create your models here.
# 创建一个类, 创建一个数据库table
class Student(models.Model):
stu_name = models.CharField(max_length=16)
stu_age = models.IntegerField(default=1)
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from typing import Sequence
def unmangle_name(s: str) -> str:
"""
Convert a name with ``$uXXXX`` character runs into their original forms, which may not
necessarily be safe DAML identifiers.
:param s: The string to interpret.
:return: A new string with the appropriate substitutions.
"""
return (
s.replace("$u002b", "+")
.replace("$u005b", "[")
.replace("$u005d", "]")
.replace("$u003c", "<")
.replace("$u003e", ">")
.replace("$u003a", ":")
.replace("$u0022", '"')
.replace("$u0028", "(")
.replace("$u0029", ")")
.replace("$u002f", "/")
.replace("$u002c", ",")
.replace("$u003d", "=")
)
def is_hidden_module_name(name: "Sequence[str]") -> bool:
if len(name) == 1 and name[0] in ("GhcPrim", "Prelude"):
return True
if len(name) >= 2:
if name[0] == "GHC":
return True
elif name[0] == "DA":
return True
elif name[0] == "Control" and name[1] == "Exception":
return True
return False
def maybe_parentheses(obj, operator: str = " ") -> str:
"""
Wrap the string (or object's string representation) in parentheses, but only if required.
:param obj:
The object to (potentially) wrap up in parentheses. If it is not a string, it is converted
to a string first.
:param operator:
The operator that needs precedence rules clarified. The default is ``' '`` (a space).
:return: The (possibly) parenthesized string.
"""
s = str(obj)
if operator not in s:
return s
# if there are already wrapping punctuation marks, there may not be a need to add additional
# ones
open_mark = s[0]
if open_mark in "([{":
groupings = [s[0]]
for c in s[1:-1]:
if c in "([{":
groupings.append(c)
elif c == "}":
if groupings[-1] == "{":
groupings.pop()
else:
groupings.clear()
elif c == "]":
if groupings[-1] == "[":
groupings.pop()
else:
groupings.clear()
elif c == ")":
if groupings[-1] == "(":
groupings.pop()
else:
groupings.clear()
if not groupings:
# we balanced out all groupings (or just his a syntax error in unmatched groupings);
# add clarifying parentheses
return "(" + s + ")"
if (
(groupings[0] == "(" and groupings[-1] == ")")
or (groupings[0] == "[" and groupings[-1] == "]")
or (groupings[0] == "{" and groupings[-1] == "}")
):
return s
return "(" + s + ")" if operator in s else s
def indent(text: str, spaces: int):
"""
Prepend every line of the specified text with a set number of spaces. Line endings are
preserved.
"""
prefix = " " * spaces
return "".join(prefix + t for t in text.splitlines(keepends=True))
|
"""
Irj programot, mely beker ket egesz szamot, es kiirja az osszeguket.
"""
|
# -*- coding: utf-8 -*-
import sys,os,time
class cls:
t= None
def __init__(self):
print 'cls'
def p2(self):
pass
|
def combine(a, b, a_len, b_len):
i = a_len - 1
j = b_len - 1
full_length = len(a)
while (i >= 0 and j >= 0):
if (a[i] > b[j]):
a[full_length - 1] = a[i]
i -= 1
else:
a[full_length - 1] = b[j]
j -= 1
full_length -= 1
if (i < 0):
for k in range(0, full_length):
a[k] = b[k]
def main():
a = [2, 7, 9, 10, 12, 0, 0, 0, 0, 0]
b = [1, 3, 4, 8, 11]
combine(a, b, 5, 5)
print a
if __name__ == '__main__':
main()
|
def fact(x):
if x==1 or x==0:
return 1
else:
return x*fact(x-1)
t=input()
for i in range(t):
n=input()
print fact(n)
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__ = 'Man Li'
import os
import re
import sys
import time
import json
import random
import requests
from requests.exceptions import ReadTimeout, ConnectionError, RequestException
import csv
import hashlib
from lxml import etree
import redis
import urllib
from multiprocessing import Process
from itertools import chain
defaultencoding = 'utf-8'
if sys.getdefaultencoding() != defaultencoding:
reload(sys)
sys.setdefaultencoding(defaultencoding)
#USER_AGENTS 随机头信息
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
#构造请求头
HEADER = {
'User-Agent': random.choice(USER_AGENTS),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
}
def get_proxy():
r = redis.StrictRedis(host='192.168.1.79',port='6379',db=1)
ipnumber = r.zcard('proxy:ips')
if ipnumber <= 1:
number = 0
else:
number = random.randint(0, ipnumber-1)
a = r.zrange('proxy:ips',0,-1,desc=True)
print(a)
print(a[number].decode("utf-8"))
return a[number].decode("utf-8")
ip = get_proxy()
print(ip)
proxies = {"http":ip}
#Get网页,返回内容
def get_html( url_path, proxies = proxies, payload = '', cookies = ''):
try:
s = requests.Session()
r = s.get(
url_path,#路径
headers=HEADER,#请求头
params=payload,#传参 @payload 字典或者json
cookies=cookies,#cookies
verify=False,#SSL验证 @verify False忽略;True开启
proxies=proxies,#代理
timeout=20)#@timeout 超时单位 秒
r.raise_for_status()
#防止中文乱码
r.encoding = 'gb2312'
return r.text
except ReadTimeout:
print('Timeout')
time.sleep(5)
return get_html(url_path, {"http":get_proxy()})
except ConnectionError:
print('Connection error')
time.sleep(5)
return get_html(url_path, {"http":get_proxy()})
except RequestException:
print('RequestException')
time.sleep(5)
return get_html(url_path, {"http":get_proxy()})
def get_headers( url_path, payload = '', cookies = '',proxies = ''):
try:
s = requests.Session()
r = s.get(
url_path,#路径
headers=HEADER,#请求头
params=payload,#传参 @payload 字典或者json
cookies=cookies,#cookies
verify=True,#SSL验证 @verify False忽略;True开启
proxies=proxies,#代理
timeout=30)#@timeout 超时单位 秒
r.raise_for_status()
#print r.headers#获取响应头
#print r.cookies#获取cookies
return r.headers
except ReadTimeout:
print('Timeout')
except ConnectionError:
print('Connection error')
except RequestException:
print('RequestException')
def get_now_Location( url_path, payload = '', cookies = '',proxies = ''):
try:
s = requests.Session()
r = s.get(
url_path,#路径
headers=HEADER,#请求头
params=payload,#传参 @payload 字典或者json
cookies=cookies,#cookies
verify=True,#SSL验证 @verify False忽略;True开启
proxies=proxies,#代理
timeout=30)#@timeout 超时单位 秒
r.raise_for_status()
#print r.headers#获取响应头
#print r.cookies#获取cookies
return r.url
except ReadTimeout:
print('Timeout')
except ConnectionError:
print('Connection error')
except RequestException:
print('RequestException')
#Post
def post_html( url_path, datas, payload = '', cookies = '',proxies = ''):
try:
s = requests.Session()
r = s.post(
url_path,#路径
headers=HEADER,
data = datas,#请求头
params=payload,#传参 @payload 字典或者json
cookies=cookies,#cookies
verify=True,#SSL验证 @verify False忽略;True开启
proxies=proxies,#代理
timeout=30)#@timeout 超时单位 秒
#r.raise_for_status()
#print r.headers#获取响应头
#print r.cookies#获取cookies
return r.text
except ReadTimeout:
print('Timeout')
except ConnectionError:
print('Connection error')
except RequestException:
print('RequestException')
#testurl1 = "http://college.gaokao.com/schlist/"
#datas1 = get_html(testurl1)
#print(datas1)
def get_dl_data(html):
reg = r"<dl.+?</dl>"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
return data
def filter_href(html):
html = str(html)
reg = r"(?<=href=\").+?(?=\")|(?<=href=\').+?(?=\')"
reger = re.compile(reg)
data = re.findall(reger, html)
return data
def get_college_names(html):
#/html/body/div[5]/div[4]/h2
html = str(html)
all_list_datas = []
datas = etree.HTML(html)
info = datas.xpath('/html/body//div[@class="bg_sez"]/h2/text()')
#print(info)
#t = etree.tostring(info[0], encoding="utf-8", pretty_print=True)
return info[0]
#获取大学信息 div
def get_college_info_div(html):
try:
html = str(html)
all_list_datas = []
datas = etree.HTML(html)
info = datas.xpath('/html/body//div[@class="college_msg bk"]')
print(info)
t = etree.tostring(info[0], encoding="utf-8", pretty_print=True)
return t.decode("utf-8")
except Exception as e:
return ""
#生成学校ID
def set_college_id(src):
src+=str(time.time())
m2 = hashlib.md5()
m2.update(src.encode('utf-8'))
useruuid = m2.hexdigest()
return useruuid
#获取大学图标链接
def filter_src(html):
reg = r"(?<=src=\").+?(?=\")|(?<=src=\').+?(?=\')"
reger = re.compile(reg)
data = re.findall(reger, html)
if data!=[]:
return data[0]
else:
return 0
#获取ul
def get_ul_data(html):
reg = r"<ul.+?</ul>"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
return data
#获取li
def get_li_data(html):
reg = r"<li.+?</li>"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
return data
#获取 span
def get_span_data(html):
reg = r"<span.+?>(.+?)</span>"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
return data
#gxlvshu 高校隶属于 清洗
def gxlvy_qinxi(html):
reg = r"高校隶属于:(.+?)</li>"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
if data == []:
return ""
else:
return data[0]
#gxszd 高校所在地 清洗
def gxszd_qinxi(html):
reg = r"高校所在地:(.+?)</li>"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
if data == []:
return ""
else:
return data[0]
#院士清洗
def gx_ys_qinxi(html):
reg = r"院士:(.+?) "
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
if data == []:
return ""
else:
return data[0]
#博士点清洗
def gx_bs_qinxi(html):
reg = r"博士点:(.+?) "
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
if data == []:
return ""
else:
return data[0]
#硕士点清洗
def gx_ss_qinxi(html):
reg = r"硕士点:(.+?)</"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
if data == []:
return ""
else:
return data[0]
#通讯地址
def txdz(html):
reg = r"通讯地址:(.+?)<br"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
if data == []:
return ""
else:
return data[0]
#联系电话
def lxdh(html):
reg = r"联系电话:(.+?)<br"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
if data == []:
return ""
else:
return data[0]
#电子邮箱
def dzyx(html):
reg = r"电子邮箱:(.+?)<br"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
if data == []:
return ""
else:
return data[0]
#学校网址
def xxwz(html):
reg = r"学校网址:(.+?)</p>"
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
if data == []:
return ""
else:
return data[0]
#下载图标
def downld_img(url,inputid):
'''
dex = url.split(".")[-1]
Path_img = "F:/ttys/"+inputid+"."+dex
dbimg = "/ttys/"+inputid+"."+dex
urllib.request.urlretrieve(url,Path_img)
add_sql(ysid,dbimg)
print("download succees ====> "+Path_img)
'''
try:
#dex = url.split(".")[-1]
Path_img = "F:/college_img/"+inputid+"_img.png"
#dbimg = "/ttys/"+inputid+"."+dex
urllib.request.urlretrieve(url,Path_img)
#add_sql(ysid,dbimg)
print("download succees ====> "+Path_img)
return inputid+"_img.png"
except Exception as e:
print(e)
return ""
'''
#大学 1 到 2667 http://college.gaokao.com/school/3/
testurl2 = "http://college.gaokao.com/school/1/"
datas2 = get_html(testurl2)
#print(datas2)
#大学学校名称
college_names = get_college_names(datas2).strip()
print(college_names)
college_ids = set_college_id(testurl2)
print(college_ids)
#大学学校信息
college_info_div = get_college_info_div(datas2)
print(college_info_div)
#大学图标链接
collegeimgs = filter_src(college_info_div)
print(collegeimgs)
if collegeimgs != False:
college_img_name = downld_img(collegeimgs,college_names)
else:
college_img_name = ""
ulinfos = get_ul_data(college_info_div)
uls1 = ulinfos[0]
uls2 = ulinfos[1]
print(uls1)
print(uls2)
lidatas = get_li_data(uls1)
print(lidatas)
#高校类型
gxtype = lidatas[0]
#高校隶属
gxlvshu = lidatas[1]
#高校所在地
gxchengshi = lidatas[2]
#高校院士,博士,硕士
gx_y_b_s = lidatas[3]
print("【高校名称】 : "+college_names)
print("【高校图标】 : "+college_img_name)
college_type_val = "|".join(get_span_data(gxtype))
print("【高校类型】 : "+college_type_val)
college_lsy_val = gxlvy_qinxi(gxlvshu)
print("【高校隶属于】 : "+college_lsy_val)
college_szd_val = gxszd_qinxi(gxchengshi)
print("【高校所在地】 : "+college_szd_val)
college_ys_number = gx_ys_qinxi(gx_y_b_s)
print("【院士】 : "+college_ys_number)
college_bs_number = gx_bs_qinxi(gx_y_b_s)
print("【博士点】 : "+college_bs_number)
college_ss_number = gx_ss_qinxi(gx_y_b_s)
print("【硕士点】 : "+college_ss_number)
college_txdz_val = txdz(uls2)
print("【通讯地址】 : "+college_txdz_val)
college_lxdh_val = lxdh(uls2)
print("【联系电话】 : "+college_lxdh_val)
college_dzyx_val = dzyx(uls2)
print("【电子邮箱】 : "+college_dzyx_val)
college_xxwz_val = xxwz(uls2)
print("【学校网址】 : "+college_xxwz_val)
'''
#院校简介 http://college.gaokao.com/school/tinfo/1/intro/
#yuanxiao_jianjie_urls = "http://college.gaokao.com/school/tinfo/1/intro/"
#yuanxiao_jianjie_datas = get_html(yuanxiao_jianjie_urls)
#院校简介 html 获取
def yuanxiao_jianjie_qinxi(html):
html = str(html)
all_list_datas = []
datas = etree.HTML(html)
info = datas.xpath('/html/body//div[@class="jj"]')
print(info)
t = etree.tostring(info[0], encoding="utf-8", pretty_print=True)
return t.decode("utf-8")
#获取简介内容
def yuanxiao_jianjie_getdatas(html):
reg = r"<p> (.+?)</p> "
reger = re.compile(reg, re.S)
data = re.findall(reger, str(html))
if data == []:
return ""
else:
return data[0]
#简介内容最终清洗
def yuanxiao_jianjie_endqinxi(html):
qinxitxt = ['\n','\t','\u3000',' ']
for qxtxt in qinxitxt:
html = html.replace(qxtxt,"")
return html
'''
yuanxiao_jianjie_val = yuanxiao_jianjie_qinxi(yuanxiao_jianjie_datas)
#print("【院校简介】 : "+yuanxiao_jianjie_val)
yuanxiao_jianjie_val2 = yuanxiao_jianjie_getdatas(yuanxiao_jianjie_val)
#print(yuanxiao_jianjie_val2)
print()
yuanxiao_jianjie_val3 = yuanxiao_jianjie_endqinxi(yuanxiao_jianjie_val2)
print("【院校简介】 : "+yuanxiao_jianjie_val3)
'''
# 师资力量 http://college.gaokao.com/school/tinfo/1/shizi/
#yuanxiao_sizililang_urls = "http://college.gaokao.com/school/tinfo/1/shizi/"
#yuanxiao_sizililang_datas = get_html(yuanxiao_sizililang_urls)
'''
yuanxiao_sizililang_val = yuanxiao_jianjie_qinxi(yuanxiao_sizililang_datas)
print("【师资力量】 : "+yuanxiao_sizililang_val)
'''
# 院系设置 http://college.gaokao.com/school/tinfo/1/yuanxi/
#yuanxiao_yuanxisz_urls = "http://college.gaokao.com/school/tinfo/1/yuanxi/"
#yuanxiao_yuanxisz_datas = get_html(yuanxiao_yuanxisz_urls)
'''
yuanxiao_yuanxisz_val = yuanxiao_jianjie_qinxi(yuanxiao_yuanxisz_datas)
print("【院系设置】 : "+yuanxiao_yuanxisz_val)
'''
# 专业设置 http://college.gaokao.com/school/tinfo/1/schspe/
#yuanxiao_zhuanye_urls = "http://college.gaokao.com/school/tinfo/1/schspe/"
#yuanxiao_zhuanye_datas = get_html(yuanxiao_zhuanye_urls)
#专业设置 html 清洗
def yuanxiao_zhuanye_qinxi(html):
html = str(html)
all_list_datas = []
datas = etree.HTML(html)
info = datas.xpath('/html/body//div[@class="plan_con"]')
print(info)
t = etree.tostring(info[0], encoding="utf-8", pretty_print=True)
return t.decode("utf-8")
'''
yuanxiao_zhuanye_val = yuanxiao_zhuanye_qinxi(yuanxiao_zhuanye_datas)
print("【专业设置】 : "+yuanxiao_zhuanye_val)
'''
#联系方式 http://college.gaokao.com/school/tinfo/1/lianxi/
#yuanxiao_lianxifs_urls = "http://college.gaokao.com/school/tinfo/1/lianxi/"
#yuanxiao_lianxifs_datas = get_html(yuanxiao_lianxifs_urls)
'''
yuanxiao_lianxifs_val = yuanxiao_jianjie_qinxi(yuanxiao_lianxifs_datas)
print("【联系方式】 : "+yuanxiao_lianxifs_val)
yuanxiao_lianxifs_val2 = yuanxiao_jianjie_getdatas(yuanxiao_lianxifs_val)
print(yuanxiao_lianxifs_val2)
'''
#录取分数 http://college.gaokao.com/school/tinfo/1/result/1/1/
#yuanxiao_luqu_urls = "http://college.gaokao.com/school/tinfo/1/result/1/1/"
# 【单独开爬虫】
#http://college.gaokao.com/school/tinfo/<学校>/result/<城市>/<分科>/
'''
北京=1 天津=2 辽宁=3 吉林=4 黑龙江=5 上海=6 江苏=7 浙江=8 安徽=9 福建=10 山东=11 湖北=12 湖南=13
广东=14 重庆=15 四川=16 陕西=17 甘肃=18 河北=19 山西=20 内蒙古=21 河南=22 海南=23 广西=24 贵州=25
云南=26 西藏=27 青海=28 宁夏=29 新疆=30 江西=31 香港=33 澳门=38 台湾=39
理科=1 文科=2 综合=3 其他=4 艺术理=8 艺术文=9 综合改革=10
'''
def run1(urls_number):
#学院主页地址
testurl2 = "http://college.gaokao.com/school/"+str(urls_number)+"/"
datas2 = get_html(testurl2)
#print(datas2)
#院校简介
yuanxiao_jianjie_urls = "http://college.gaokao.com/school/tinfo/"+str(urls_number)+"/intro/"
yuanxiao_jianjie_datas = get_html(yuanxiao_jianjie_urls)
#师资力量
yuanxiao_sizililang_urls = "http://college.gaokao.com/school/tinfo/"+str(urls_number)+"/shizi/"
yuanxiao_sizililang_datas = get_html(yuanxiao_sizililang_urls)
#院系设置
yuanxiao_yuanxisz_urls = "http://college.gaokao.com/school/tinfo/"+str(urls_number)+"/yuanxi/"
yuanxiao_yuanxisz_datas = get_html(yuanxiao_yuanxisz_urls)
#专业设置
yuanxiao_zhuanye_urls = "http://college.gaokao.com/school/tinfo/"+str(urls_number)+"/schspe/"
yuanxiao_zhuanye_datas = get_html(yuanxiao_zhuanye_urls)
#联系方式
yuanxiao_lianxifs_urls = "http://college.gaokao.com/school/tinfo/"+str(urls_number)+"/lianxi/"
yuanxiao_lianxifs_datas = get_html(yuanxiao_lianxifs_urls)
#录取分数
yuanxiao_luqu_urls = "http://college.gaokao.com/school/tinfo/"+str(urls_number)+"/result/1/1/"
#print(datas2)
#大学学校名称
college_names = get_college_names(datas2).strip()
print(college_names)
college_ids = set_college_id(testurl2)
#print(college_ids)
#大学学校信息
college_info_div = get_college_info_div(datas2)
print(college_info_div)
#大学图标链接
collegeimgs = filter_src(college_info_div)
#print(collegeimgs)
if collegeimgs != False :
college_img_name = downld_img(collegeimgs,college_names)
else:
college_img_name = "isnull"
ulinfos = get_ul_data(college_info_div)
if ulinfos !=[]:
uls1 = ulinfos[0]
uls2 = ulinfos[1]
else:
uls1 = ""
uls2 = ""
#print(uls1)
#print(uls2)
lidatas = get_li_data(uls1)
#print(lidatas)
#高校类型
if lidatas!=[]:
gxtype = lidatas[0]
#高校隶属
gxlvshu = lidatas[1]
#高校所在地
gxchengshi = lidatas[2]
#高校院士,博士,硕士
gx_y_b_s = lidatas[3]
else:
gxtype = ""
#高校隶属
gxlvshu = ""
#高校所在地
gxchengshi = ""
#高校院士,博士,硕士
gx_y_b_s = ""
#print("【高校名称】 : "+college_names)
#print("【高校图标】 : "+college_img_name)
college_type_val = "|".join(get_span_data(gxtype))
#print("【高校类型】 : "+college_type_val)
college_lsy_val = gxlvy_qinxi(gxlvshu)
#print("【高校隶属于】 : "+college_lsy_val)
college_szd_val = gxszd_qinxi(gxchengshi)
#print("【高校所在地】 : "+college_szd_val)
college_ys_number = gx_ys_qinxi(gx_y_b_s)
#print("【院士】 : "+college_ys_number)
college_bs_number = gx_bs_qinxi(gx_y_b_s)
#print("【博士点】 : "+college_bs_number)
college_ss_number = gx_ss_qinxi(gx_y_b_s)
#print("【硕士点】 : "+college_ss_number)
college_txdz_val = txdz(uls2)
#print("【通讯地址】 : "+college_txdz_val)
college_lxdh_val = lxdh(uls2)
#print("【联系电话】 : "+college_lxdh_val)
college_dzyx_val = dzyx(uls2)
#print("【电子邮箱】 : "+college_dzyx_val)
college_xxwz_val = xxwz(uls2)
#print("【学校网址】 : "+college_xxwz_val)
yuanxiao_jianjie_val = yuanxiao_jianjie_qinxi(yuanxiao_jianjie_datas)
#print("【院校简介】 : "+yuanxiao_jianjie_val)
yuanxiao_jianjie_val2 = yuanxiao_jianjie_getdatas(yuanxiao_jianjie_val)
#print(yuanxiao_jianjie_val2)
#print()
yuanxiao_jianjie_val3 = yuanxiao_jianjie_endqinxi(yuanxiao_jianjie_val2)
#print("【院校简介】 : "+yuanxiao_jianjie_val3)
yuanxiao_sizililang_val = yuanxiao_jianjie_qinxi(yuanxiao_sizililang_datas)
#print("【师资力量】 : "+yuanxiao_sizililang_val)
yuanxiao_yuanxisz_val = yuanxiao_jianjie_qinxi(yuanxiao_yuanxisz_datas)
#print("【院系设置】 : "+yuanxiao_yuanxisz_val)
yuanxiao_zhuanye_val = yuanxiao_zhuanye_qinxi(yuanxiao_zhuanye_datas)
#print("【专业设置】 : "+yuanxiao_zhuanye_val)
yuanxiao_lianxifs_val = yuanxiao_jianjie_qinxi(yuanxiao_lianxifs_datas)
print("【联系方式】 : "+yuanxiao_lianxifs_val)
yuanxiao_lianxifs_val2 = yuanxiao_jianjie_getdatas(yuanxiao_lianxifs_val)
print(yuanxiao_lianxifs_val2)
#保存数据 1 学校信息表 高校名称,高校ID,高校图标,高校类型,高校隶属于,高校所在地,院士,博士点,硕士点,通讯地址,联系电话,电子邮箱,学校网址
# 院校简介,联系方式,源数据地址
print("【高校名称】 : "+college_names)
print("【高校ID】 : "+college_ids)
print("【高校图标】 : "+str(college_img_name))
print("【高校类型】 : "+college_type_val)
print("【高校隶属于】 : "+college_lsy_val)
print("【高校所在地】 : "+college_szd_val)
print("【院士】 : "+college_ys_number)
print("【博士点】 : "+college_bs_number)
print("【硕士点】 : "+college_ss_number)
print("【通讯地址】 : "+college_txdz_val)
print("【联系电话】 : "+college_lxdh_val)
print("【电子邮箱】 : "+college_dzyx_val)
print("【学校网址】 : "+college_xxwz_val)
print("【院校简介】 : "+yuanxiao_jianjie_val3)
print("【联系方式】 : "+yuanxiao_lianxifs_val2)
print("【源数据地址】 : "+testurl2)
#写入数据到csv
addlist_1 = [college_names, college_ids, college_img_name, college_type_val, college_lsy_val, college_szd_val, college_ys_number, college_bs_number,
college_ss_number, college_txdz_val, college_lxdh_val, college_dzyx_val, college_xxwz_val, yuanxiao_jianjie_val3, yuanxiao_lianxifs_val2,
testurl2]
print(addlist_1)
with open("D:/xuexuao_info_5.csv", 'a', newline='', encoding='utf-8') as f:
print(" ===> add ok !!!")
csv_write = csv.writer(f,dialect='excel')
csv_write.writerow(addlist_1)
print("__________________________________\n")
#保存数据 2 表2 学校院系设置爬虫数据表 高校名称,高校ID,院系设置,院系设置-源数据地址,师资力量,师资力量-源数据地址,专业设置,专业设置-源数据地址
# 录取分数-源数据地址
print("【高校名称】 : "+college_names)
print("【高校ID】 : "+college_ids)
print("【院系设置】 : "+yuanxiao_yuanxisz_val)
print("【院系设置-源数据地址】 : "+yuanxiao_yuanxisz_urls)
print("【师资力量】 : "+yuanxiao_sizililang_val)
print("【师资力量-源数据地址】 : "+yuanxiao_sizililang_urls)
print("【专业设置】 : "+yuanxiao_zhuanye_val)
print("【专业设置-源数据地址】 : "+yuanxiao_zhuanye_urls)
print("【录取分数-源数据地址】 : "+yuanxiao_luqu_urls)
addlist_2 = [college_names, college_ids, yuanxiao_yuanxisz_val, yuanxiao_yuanxisz_urls, yuanxiao_sizililang_val, yuanxiao_sizililang_urls,
yuanxiao_zhuanye_val, yuanxiao_zhuanye_urls, yuanxiao_luqu_urls]
print(addlist_2)
with open("D:/xuexuao_pachong_5.csv", 'a', newline='', encoding='utf-8') as f:
print(" ===> add ok !!!")
csv_write = csv.writer(f,dialect='excel')
csv_write.writerow(addlist_2)
print("__________________________________\n")
n=1
while n<=2667:
#709 没有学校 是个 404地址
if n in [709,747,1823,2402,2403,2404,2405,2406,2407]:
n+=1
continue
print("____________________***** "+str(n)+" *****____________________________")
run1(n)
time.sleep(1)
n+=1
#bug 154 693(手动添加)
|
# -*- encoding: utf-8 -*-
#########################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-TODAY Probuse Consulting Service Pvt. Ltd. (<http://probuse.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from openerp import pooler
class account_voucher_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_voucher_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'getLines': self._voucher_line_get,
})
self.context = context
def _voucher_line_get(self, voucher):
domain = []
voucherline_obj = pooler.get_pool(self.cr.dbname).get('account.voucher.line')
domain.append(('voucher_id','=',voucher.id))
if voucher.type in ('receipt', 'sale'):
domain.append(('type', '=', 'cr'))
else:#for supplier payment and supplier receipts.
domain.append(('type', '=', 'dr'))
voucherlines = voucherline_obj.search(self.cr, self.uid, domain, context=self.context)
return voucherline_obj.browse(self.cr, self.uid, voucherlines, context=self.context)
class report_test(osv.AbstractModel):
_name = "report.print_account_voucher.print_voucher_report_all"
_inherit = "report.abstract_report"
_template = "print_account_voucher.print_voucher_report_all"
_wrapped_report_class = account_voucher_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# encoding=utf8
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
import sys
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except NameError:
pass
extensions = [
Extension('im2col_cython', ['im2col_cython.pyx'],
include_dirs = [numpy.get_include()]
),
]
setup(
ext_modules = cythonize(extensions),
)
|
from newsletter.models import User
from django.forms import ModelForm
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class SubscribeForm(ModelForm):
"""
Form that allows a user to input his or her email address and select a city
from a list of the top 100 U.S. cities by population. Both fields from the
User model are used - no exclusions.
"""
class Meta:
model = User
exclude = []
def __init__(self, *args, **kwargs):
"""
Add crispy_forms helper for adding a submit button and override a
couple field properties to better fit the provided screenshot.
"""
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Subscribe', css_class="btn-block"))
super(SubscribeForm, self).__init__(*args, **kwargs)
self.fields['location'].empty_label = "Where do you live?"
self.fields['email'].label = "Email Address"
|
# This is used for generating the Keybox file which used for applying Widevine/Attestation keybox.
from sys import argv
script, first, second, third, fourth = argv
# fileName: Keybox index file which contain the device ID.
# prefix: prefix name which contain the project name for device ID, such as: MERCURY_CN_.
# start: start ID, if the start is 1, then then device ID is: MERCURY_CN_00000001.
# keyboxNum: The number of keybox ID for this file, it should be 25000 as BBRY request.
def GenKeyboxFile(fileName, prefix, start, keyboxNum):
fp = open(fileName, 'w')
try:
for num in range(int(start), int(keyboxNum)):
# temp = "%08d" % num # format the tring to 8 digitals with '0'.
temp = str(num).zfill(8) # fill with 0, untill to 8 digitals.
buf = prefix + temp + '\n'
fp.writelines(buf);
except Exception, e:
print Exception, ":", e
finally:
fp.close()
GenKeyboxFile(first, second, third, fourth)
|
class PeopleService(object):
def __init__(self, people_repository):
self.repository = people_repository
def get_all(self):
people = self.repository.get_all()
return [person['name'] for person in people]
def get_friends(self, name):
raw_friends = self.repository.get_friends(name)
if len(raw_friends) == 0:
return raw_friends
people = [
*raw_friends.get('knows', []),
*raw_friends.get('~knows', [])
]
return [person['name'] for person in people]
def get_unknown_people(self, name):
people = self.get_friends(name)
unkown_people = []
for person in people:
friends = self.get_friends(person)
if len(friends) == 0:
return friends
for friend in friends:
if friend not in people and friend != name:
unkown_people.append(friend)
return unkown_people
def add_people(self, payload):
people = self.get_all()
for key, values in payload.items():
for value in values:
if value not in people:
return None
uids = self.repository.get_uids(values)
if self.repository.add_person(key, uids) is None:
return None
return True
|
# -*- coding: utf-8 -*-
import ECDSA
from queue import Queue
import random
# 초기화
def init():
print("확장된 스택 프로세서 입니다.")
print("메시지 소유권을 지정하고 확인합니다.\n")
stack = []
cnt = 5
return stack, cnt
# 남은 카운트 프린트
def print_cnt(cnt):
if cnt <= 0:
print("남은 기회가 없습니다.")
else:
print("남은 기회 : ", cnt)
return
# 스택 내용 프린트
def print_stack(stack):
if len(stack) == 0:
print("stack is empty")
else:
print("========== stack ===========")
if type(stack[-1]) is tuple:
print("top ----> ", (hex(stack[-1][0])[2:], hex(stack[-1][1])[2:]))
else:
print("top ----> ", stack[-1])
for i in range(len(stack) - 2, -1, -1):
print("============================")
if type(stack[i]) is tuple:
print(" ", (hex(stack[i][0])[2:], hex(stack[i][1])[2:]))
else:
print(" ", stack[i])
print('============================\n')
return
# script 생성
# parameter - signature, pubkey : 수신자의 서명, 수신자의 공개키
# msg, mode : 암호화 메시지, 테스트 모드
def make_script(cnt, sig, pubkey, msg, mode):
print_cnt(cnt)
print("================================================")
print("명령어는 다음과 같이 입력됩니다.")
print("<서명(hex)> <공개키(hex)> 복사명령어 해쉬명령어 <공개키 해쉬값(hex)> 동일검증명령어 서명검증명령어")
print("<r,s> <Qx,Qy> OP_DUP OP_HASH <HashX,HashY> OP_EQUALVERIFY OP_CHECKSIG")
print("ex) <abcd,0123> <01234,abcde> OP_DUP OP_HASH <0000,ffff> OP_EQUALVERIFY OP_CHECKSIG")
print("================================================")
if mode == 5:
scripts = input("명령어를 입력하세요 : ")
else:
if mode == 1:
(Qx, Qy) = pubkey
if mode == 2:
AttackerKey = random.randint(1, ECDSA.n)
(Qx, Qy) = ECDSA.multiple(ECDSA.Gx, ECDSA.Gy, AttackerKey, ECDSA.p, ECDSA.a)
if mode == 3:
AttackerKey = random.randint(1, ECDSA.n)
(Qx, Qy) = ECDSA.multiple(ECDSA.Gx, ECDSA.Gy, AttackerKey, ECDSA.p, ECDSA.a)
pubkey = (Qx, Qy)
if mode == 4:
AttackerKey = random.randint(1, ECDSA.n)
AttackerSig = ECDSA.make_signature(msg, AttackerKey)
sig = AttackerSig[0]
(Qx, Qy) = pubkey
Qx_bytes = Qx.to_bytes(32, 'big')
Qy_bytes = Qy.to_bytes(32, 'big')
Hash_Qx = ECDSA.Hashing(Qx_bytes)
Hash_Qy = ECDSA.Hashing(Qy_bytes)
Hash_Qx = ''.join(format(x, '02x') for x in Hash_Qx)
Hash_Qy = ''.join(format(x, '02x') for x in Hash_Qy)
scripts = ""
scripts += "<" + hex(sig[0])[2:] + "," + hex(sig[1])[2:] + "> "
scripts += "<" + hex(pubkey[0])[2:] + "," + hex(pubkey[1])[2:] + "> "
scripts += "OP_DUP OP_HASH "
scripts += "<" + str(Hash_Qx) + "," + str(Hash_Qy) + "> "
scripts += "OP_EQUALVERIFY OP_CHECKSIG "
scriptPubKey = []
scriptSig = []
for i in range(len(scripts.split())):
if i == 0 or i == 1:
scriptSig.append(scripts.split()[i])
else:
scriptPubKey.append(scripts.split()[i])
return scripts, scriptPubKey, scriptSig, cnt
# scripts를 큐로 변환
def scripts_to_queue(scripts, cnt):
queue = Queue()
scripts = scripts.split()
OperDict = {'OP_DUP' : 0x76, 'OP_HASH' : 0xAA, 'OP_EQUALVERIFY' : 0x88, 'OP_CHECKSIG' : 0xAC}
valid_script = True
for oper in scripts:
if oper[0:3] == "OP_":
if oper in OperDict.keys():
queue.put(OperDict[oper])
else:
print("잘못된 명령어입니다.\n")
cnt -= 1
valid_script = False
break
else:
if oper[0] == '<':
try:
numbers = oper.split(',')
num1 = int('0x' + numbers[0][1:], 16)
num2 = int('0x' + numbers[1][:-1], 16)
num_byte = (len(numbers[0][1:]) // 2) + (len(numbers[1][:-1]) // 2)
queue.put(num_byte)
queue.put((num1, num2))
except:
print("잘못된 값입니다.\n")
cnt -= 1
valid_script = False
break
else:
print("잘못된 입력입니다.\n")
cnt -= 1
valid_script = False
break
return queue, cnt, valid_script
# 연산
# op_code : 해당 operation code
# msg : 메시지
# scripts : 전체 명령, type = Queue
def oper(stack, msg, scripts):
# scripts 큐의 front 값으로 op_code 판단
op_code = scripts.get()
# 올바르게 작동 중인지 체크
valid = True
# 해시 값 push
# 올바른 op_code 일 때, 해당 바이트만큼의 값을 push
# ex. op_code = 0x02, scripts = 0x01 0x02 0x03
# 0x0203 = 515 를 stack에 push
if op_code >= 0x01 and op_code <=0x4B:
print("push 입니다.")
num = scripts.get()
stack.append(num)
print_stack(stack)
return stack, valid
# OP_DUP
# 스택의 top 값을 복사
elif op_code == 0x76:
print("duplicate 입니다.")
st_top = stack[-1]
stack.append(st_top)
print_stack(stack)
return stack, valid
# OP_HASH
# 스택의 top 값에 LSH256 적용
# LSH 라이브러리에 적용 시 32바이트로 입력
# ex. 1 = b'\x00\x00\.....\x01'
elif op_code == 0xAA:
print("hash 입니다.")
pubkey = stack.pop()
Qx = pubkey[0]
Qy = pubkey[1]
Qx_bytes = Qx.to_bytes(32, 'big')
Qy_bytes = Qy.to_bytes(32, 'big')
Hash_Qx = ECDSA.Hashing(Qx_bytes)
Hash_Qy = ECDSA.Hashing(Qy_bytes)
Hash_Qx = ''.join(format(x, '02x') for x in Hash_Qx)
Hash_Qy = ''.join(format(x, '02x') for x in Hash_Qy)
Hash_Qx = int('0x' + Hash_Qx, 16)
Hash_Qy = int('0x' + Hash_Qy, 16)
pubkeyHash = (Hash_Qx, Hash_Qy)
stack.append(pubkeyHash)
print_stack(stack)
return stack, valid
# OP_EQUALVERIFY
# 해시 값 검증
elif op_code == 0x88:
print("equal and verify 입니다.")
hash1 = stack.pop()
hash2 = stack.pop()
if hash1 == hash2:
print("hash 값이 일치합니다.")
else:
valid = False
print("hash 값이 일치하지 않습니다.")
print_stack(stack)
return stack, valid
# OP_CHECKSIG
# 서명 검증
elif op_code == 0xAC:
print("check signature 입니다.")
pubkey = stack.pop()
sig = stack.pop()
verifying = ECDSA.sig_verification(sig, pubkey, msg)
if verifying == True:
print("서명 검증에 성공하였습니다.")
stack.append("True")
else:
valid = False
print("서명 검증에 실패하였습니다.")
stack.append("False")
print_stack(stack)
return stack, valid
else:
print("올바르지 않은 명령어(op_code)입니다.")
valid = False
print_stack(stack)
return stack, valid
|
# -*- coding: utf-8 -*-
from flask import request, Blueprint
from flask import g
import logging
from libs.crossdomain import crossdomain
from libs.util import make_response
from libs.util import create_access_token
from libs.response_meta import ResponseMeta
from .authorization import require_client_auth
from models.user import User
from models.app import App
from models.customer import Customer
app = Blueprint('customer', __name__)
@app.route("/customer/register", methods=["POST"])
@crossdomain(origin='*', headers=['Authorization'])
@require_client_auth
def customer_register():
rds = g.rds
db = g._db
obj = request.get_json(force=True, silent=True, cache=False)
if obj is None:
logging.debug("json decode err")
raise ResponseMeta(400, "json decode error")
appid = obj.get("appid", 0)
uid = obj.get("customer_id", "")
name = obj.get("name", "")
avatar = obj.get("avatar", "")
if not appid:
raise ResponseMeta(400, "invalid param")
store_id = App.get_store_id(db, appid)
if not store_id:
raise ResponseMeta(400, "app do not support customer")
if not uid:
client_id = Customer.generate_client_id(rds)
else:
client_id = Customer.get_client_id(rds, appid, uid)
if not client_id:
client_id = Customer.generate_client_id(rds)
Customer.set_client_id(rds, appid, uid, client_id)
token = User.get_user_access_token(rds, appid, client_id)
if not token:
token = create_access_token()
User.add_user_count(rds, appid, client_id)
User.save_user(rds, appid, client_id, name, avatar, token)
User.save_token(rds, appid, client_id, token)
resp = {
"token":token,
"store_id":store_id,
"client_id":client_id,
}
data = {"data":resp}
return make_response(200, data)
|
def convertWeapon(weapon):
blademaster = __isBlademaster(weapon)
newWeapon = {}
newWeapon['affinity'] = __intOrNone(weapon.get('Affinity'))
newWeapon['attack'] = __intOrNone(weapon.get('Attack'))
newWeapon['create_price'] = __filterString(weapon.get('Create_Price'))
newWeapon['defense'] = __intOrNone(weapon.get('Defense'))
newWeapon['glaive_type'] = __filterString(weapon.get('Glaive_Type'))
newWeapon['name'] = __filterString(weapon.get('Name'))
newWeapon['phial'] = __filterString(weapon.get('Phial'))
newWeapon['rarity'] = __intOrNone('Rarity')
newWeapon['shelling'] = __filterString(weapon.get('Shelling'))
newWeapon['slot'] = __intOrNone(weapon.get('Slot'))
newWeapon['true_attack'] = __intOrNone(weapon.get('True_Attack'))
newWeapon['upgrade_price'] = __filterString(weapon.get('Upgrade_Price'))
newWeapon['weapon_family'] = __filterString(weapon.get('Weapon_Family'))
newWeapon['id'] = __intOrNone(weapon.get('id'))
if blademaster:
newWeapon['class'] = 'Blademaster'
else:
newWeapon['class'] = 'Gunner'
return newWeapon
def __isBlademaster(weapon):
weapon_family = weapon.get('Weapon_Family')
gun = ["Bow","Light Bowgun", "Heavy Bowgun"]
return not weapon_family in gun
def convertCreateItems(weapon):
items = []
wep_id = __intOrNone(weapon.get('id'))
for create_item in weapon.get('Create_Items'):
item_map = {}
item_map['id'] = wep_id
item_map['name'] = __filterString(create_item['Name'])
item_map['quantity'] = __intOrNone(create_item['Quantity'])
item_map['item_id'] = __intOrNone(create_item['id'])
items.append(item_map)
return items
def convertUpgradeItems(weapon):
items = []
wep_id = __intOrNone(weapon.get('id'))
for upgrade_item in weapon.get('Upgrade_Items'):
item_map = {}
item_map['id'] = wep_id
item_map['name'] = __filterString(upgrade_item['Name'])
item_map['quantity'] = __intOrNone(upgrade_item['Quantity'])
item_map['item_id'] = __intOrNone(upgrade_item['id'])
items.append(item_map)
return items
def convertUpgradesTo(weapon):
items = []
wep_id = __intOrNone(weapon.get('id'))
for upgrade_item in weapon.get('Upgrades_To'):
item_map = {}
item_map['id'] = wep_id
item_map['name'] = __filterString(upgrade_item['Name'])
item_map['item_id'] = __intOrNone(upgrade_item['id'])
items.append(item_map)
return items
def __filterString(someString):
if someString:
return someString.replace("'", "''")
else:
return None
def __intOrNone(possibleInt):
try:
return int(possibleInt)
except:
return -1
|
import json
import os
from discord.ext.commands import Bot
from database import DB
from google_search import fetch_search_results
### Load Discord Token from environment on PROD
### Otherwise load it from local file
DISCORD_TOKEN = os.environ.get('DISCORD_TOKEN')
if DISCORD_TOKEN is None:
env_config = json.loads(open('dev.json', 'r').read())
DISCORD_TOKEN = env_config.get('DISCORD_TOKEN')
### Initialise Discord BOT API Client
bot = Bot(command_prefix='!')
### Specify commands
@bot.command(name='hi', help='Responds with Greeting')
async def greet(ctx):
await ctx.send(f"Hey {ctx.message.author.display_name}")
@bot.command(name='google', help='Responds with top 5 results of Google Search with specified search term(s)')
async def googlesearch(ctx, *terms):
### Combines multiple search terms into space separated string
search_term = " ".join(list(terms)).strip()
### Check if there was no search term(s) provided
if not search_term:
await ctx.send("Usage: !google <search-term(s)>")
else:
### Connect to DB
db_instance = DB()
### Dict of data to be stored in DB as search history entry
### containing basic information associated with the request
search_request_data = {
'guild': ctx.guild.name,
'guild_id': ctx.guild.id,
'channel': ctx.channel.name,
'author': ctx.message.author.display_name,
'term': search_term
}
### Store the data inside MongoDB
db_instance.store(**search_request_data)
### Search google for the provided search term(s)
search_results = fetch_search_results(search_term)
### Check if any fetching results from successful or not
if search_results.get('success', False):
### Combine the search results into new line separated strings
response_links = '\n'.join([_ for _ in search_results.get('results', [])])
### Return response with search results
await ctx.send(f"Top Results from Google for '{search_term}' are:\n{response_links}")
else:
### Return response in case of failure in searching
await ctx.send(f"Error while fetching search results from Google for {search_term}")
@bot.command(name='recent', help='Returns recent search terms matching the provided term')
async def recents(ctx, term=None):
### Check if there was no search term provided
if term is None:
### Return response in case no search term was provided
await ctx.send("Usage: !recent <term>")
else:
### Connect to DB
db_instance = DB()
### Fetch Guild ID to only query search history of that Guild
guild_id = ctx.guild.id
### Fetch previous search results for particular Guild matching with provided search term
search_result = db_instance.search_term(term=term, guild_id=guild_id)
### Check if any fetching results from successful or not
if search_result.get('success', False):
if search_result.get('results', []):
### Combine search results into new line separated strings
results = '\n'.join([result.get('term', '') for result in search_result.get('results', [])])
### Return response with search results
await ctx.send(f"Recent Similar searches to '{term}' are:\n{results}")
else:
### Return response in case no results matching provided input were found
await ctx.send(f"No recent searches match the provided term {term}")
else:
### Return response in case of failure in searching
await ctx.send(f"Error while querying search history for {term}")
bot.run(DISCORD_TOKEN)
|
# Generated by Django 2.1 on 2018-08-12 20:58
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('campaigns', '0002_auto_20180812_2358'),
('lists', '0001_initial'),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activity_type', models.PositiveSmallIntegerField(choices=[(1, 'Subscribed'), (2, 'Unsubscribed'), (3, 'Was sent'), (4, 'Opened'), (5, 'Clicked'), (6, 'Imported')], verbose_name='type')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='date')),
('description', models.TextField(blank=True, verbose_name='description')),
('ip_address', models.GenericIPAddressField(blank=True, null=True, unpack_ipv4=True, verbose_name='confirm IP address')),
('campaign', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='activities', to='campaigns.Campaign')),
('email', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='activities', to='campaigns.Email')),
('link', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='activities', to='campaigns.Link')),
('location', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='activities', to='core.City', verbose_name='location')),
],
options={
'verbose_name': 'activity',
'verbose_name_plural': 'activities',
'db_table': 'colossus_activities',
},
),
migrations.CreateModel(
name='Subscriber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('email', models.EmailField(max_length=255, verbose_name='email address')),
('name', models.CharField(blank=True, max_length=150, verbose_name='name')),
('open_rate', models.FloatField(default=0.0, editable=False, verbose_name='opens')),
('click_rate', models.FloatField(default=0.0, editable=False, verbose_name='clicks')),
('update_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='updated')),
('status', models.PositiveSmallIntegerField(choices=[(1, 'Pending'), (2, 'Subscribed'), (3, 'Unsubscribed'), (4, 'Cleaned')], default=1, verbose_name='status')),
('optin_ip_address', models.GenericIPAddressField(blank=True, null=True, unpack_ipv4=True, verbose_name='opt-in IP address')),
('optin_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='opt-in date')),
('confirm_ip_address', models.GenericIPAddressField(blank=True, null=True, unpack_ipv4=True, verbose_name='confirm IP address')),
('confirm_date', models.DateTimeField(blank=True, null=True, verbose_name='confirm date')),
('last_seen_ip_address', models.GenericIPAddressField(blank=True, null=True, unpack_ipv4=True, verbose_name='last seen IP address')),
('last_seen_date', models.DateTimeField(blank=True, null=True, verbose_name='last seen date')),
('location', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='subscribers', to='core.City', verbose_name='location')),
('mailing_list', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='subscribers', to='lists.MailingList')),
],
options={
'verbose_name': 'subscriber',
'verbose_name_plural': 'subscribers',
'db_table': 'colossus_subscribers',
},
),
migrations.CreateModel(
name='SubscriptionFormTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(choices=[('subscribe', 'Subscribe page'), ('subscribe-thank-you', 'Subscribe thank you page'), ('confirm-email', 'Opt-in confirm email'), ('confirm-thank-you', 'Opt-in confirm thank you page'), ('welcome-email', 'Final welcome email'), ('unsubscribe', 'Unsubscribe page'), ('unsubscribe-success', 'Unsubscribe success page'), ('goodbye-email', 'Goodbye email')], db_index=True, max_length=30, verbose_name='key')),
('redirect_url', models.URLField(blank=True, help_text='Instead of showing this page, redirect to URL.', verbose_name='redirect URL')),
('send_email', models.BooleanField(default=True, verbose_name='send final confirmation email?')),
('from_email', models.EmailField(max_length=254, verbose_name='from email address')),
('from_name', models.CharField(blank=True, max_length=100, verbose_name='from name')),
('subject', models.CharField(blank=True, max_length=150, verbose_name='email subject')),
('content_html', models.TextField(blank=True, verbose_name='content HTML')),
('content_text', models.TextField(blank=True, verbose_name='content plain text')),
('mailing_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forms_templates', to='lists.MailingList', verbose_name='mailing list')),
],
options={
'verbose_name': 'form template',
'verbose_name_plural': 'form templates',
'db_table': 'colossus_form_templates',
},
),
migrations.AddField(
model_name='activity',
name='subscriber',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='activities', to='subscribers.Subscriber'),
),
migrations.AlterUniqueTogether(
name='subscriber',
unique_together={('email', 'mailing_list')},
),
]
|
import mysql.connector
database = "production"
inputDb = mysql.connector.connect(
host="localhost",
user="root",
passwd="password",
database=database
)
print(inputDb)
# Dropping The semantics Tables
mycursor = inputDb.cursor()
mycursor.execute("SHOW TABLES")
db = mycursor.fetchall() # Fetches all the Table Names
print(db)
mycursor = inputDb.cursor()
for T in db:
print(T[0] + " -----------------------") # Name of the Table
mycursor.execute("DESCRIBE %s" % T[0])
row = mycursor.fetchall() # Attribute
#print(row)
for A in row:
print(T[0]+"_"+A[0] + " "+A[1]) # Attribute name and AttributeType
newColumnName=""
if A[1]=='text':
newColumnName=T[0] + "_" + A[0] + " CHAR(255)"
else:
newColumnName = T[0] + "_" + A[0] + " " + A[1]
Q="ALTER TABLE "+T[0]+" CHANGE "+A[0]+" "+newColumnName
print(Q)
mycursor.execute(Q)
mycursor.close()
|
import csv
import pprint
import matplotlib.pyplot as plt
import math
path = '/Users/yutaro/research/2020/src/build/'
with open(path+'ground_truth.csv') as f:
#print(f.read())
readf = list(csv.reader(f))
with open(path+'resultZ=0.1115.csv') as g:
readg = list(csv.reader(g))
l = []
lr = []
x = []
N = 30
for i in range(2,17):
x.append((i+7) *180./ N)
diff = 0
r = 0
for j in range(1,4):
diff += math.sqrt((float(readf[i][j] )- float(readg[i][j])) * (float(readf[i][j]) - float(readg[i][j])))
r += float(readf[i][j]) * float(readf[i][j])
r = math.sqrt(r)
l.append(diff)
lr.append(diff/r)
fig = plt.figure()
plt.plot(x,l)
plt.show()
plt.savefig(path+"diff.png")
fig = plt.figure()
plt.plot(x,lr)
plt.show()
plt.savefig(path+"diff_r.png")
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import contextlib
import json
import os.path
from dataclasses import dataclass
from typing import Any, Mapping
import yaml
from pants.backend.openapi.target_types import (
OPENAPI_FILE_EXTENSIONS,
OpenApiDocumentDependenciesField,
OpenApiDocumentField,
OpenApiSourceDependenciesField,
OpenApiSourceField,
)
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.base.specs import FileLiteralSpec, RawSpecs
from pants.engine.fs import Digest, DigestContents
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import collect_rules, rule
from pants.engine.target import (
DependenciesRequest,
ExplicitlyProvidedDependencies,
FieldSet,
HydratedSources,
HydrateSourcesRequest,
InferDependenciesRequest,
InferredDependencies,
Targets,
)
from pants.engine.unions import UnionRule
from pants.util.frozendict import FrozenDict
@dataclass(frozen=True)
class ParseOpenApiSources:
sources_digest: Digest
paths: tuple[str, ...]
@dataclass(frozen=True)
class OpenApiDependencies:
dependencies: FrozenDict[str, frozenset[str]]
@rule
async def parse_openapi_sources(request: ParseOpenApiSources) -> OpenApiDependencies:
digest_contents = await Get(DigestContents, Digest, request.sources_digest)
dependencies: dict[str, frozenset[str]] = {}
for digest_content in digest_contents:
spec = None
if digest_content.path.endswith(".json"):
with contextlib.suppress(json.JSONDecodeError):
spec = json.loads(digest_content.content)
elif digest_content.path.endswith(".yaml") or digest_content.path.endswith(".yml"):
with contextlib.suppress(yaml.YAMLError):
spec = yaml.safe_load(digest_content.content)
if not spec or not isinstance(spec, dict):
dependencies[digest_content.path] = frozenset()
continue
dependencies[digest_content.path] = _find_local_refs(digest_content.path, spec)
return OpenApiDependencies(dependencies=FrozenDict(dependencies))
def _find_local_refs(path: str, d: Mapping[str, Any]) -> frozenset[str]:
local_refs: set[str] = set()
for k, v in d.items():
if isinstance(v, dict):
local_refs.update(_find_local_refs(path, v))
elif k == "$ref" and isinstance(v, str):
# https://swagger.io/specification/#reference-object
# https://datatracker.ietf.org/doc/html/draft-pbryan-zyp-json-ref-03
v = v.split("#", 1)[0]
if any(v.endswith(ext) for ext in OPENAPI_FILE_EXTENSIONS) and "://" not in v:
# Resolution is performed relative to the referring document.
normalized = os.path.normpath(os.path.join(os.path.dirname(path), v))
if not normalized.startswith("../"):
local_refs.add(normalized)
return frozenset(local_refs)
# -----------------------------------------------------------------------------------------------
# `openapi_document` dependency inference
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class OpenApiDocumentDependenciesInferenceFieldSet(FieldSet):
required_fields = (OpenApiDocumentField, OpenApiDocumentDependenciesField)
sources: OpenApiDocumentField
dependencies: OpenApiDocumentDependenciesField
class InferOpenApiDocumentDependenciesRequest(InferDependenciesRequest):
infer_from = OpenApiDocumentDependenciesInferenceFieldSet
@rule
async def infer_openapi_document_dependencies(
request: InferOpenApiDocumentDependenciesRequest,
) -> InferredDependencies:
explicitly_provided_deps, hydrated_sources = await MultiGet(
Get(ExplicitlyProvidedDependencies, DependenciesRequest(request.field_set.dependencies)),
Get(HydratedSources, HydrateSourcesRequest(request.field_set.sources)),
)
candidate_targets = await Get(
Targets,
RawSpecs(
file_literals=(FileLiteralSpec(*hydrated_sources.snapshot.files),),
description_of_origin="the `openapi_document` dependency inference",
),
)
addresses = frozenset(
[target.address for target in candidate_targets if target.has_field(OpenApiSourceField)]
)
dependencies = explicitly_provided_deps.remaining_after_disambiguation(
addresses.union(explicitly_provided_deps.includes),
owners_must_be_ancestors=False,
)
return InferredDependencies(dependencies)
# -----------------------------------------------------------------------------------------------
# `openapi_source` dependency inference
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class OpenApiSourceDependenciesInferenceFieldSet(FieldSet):
required_fields = (OpenApiSourceField, OpenApiSourceDependenciesField)
sources: OpenApiSourceField
dependencies: OpenApiSourceDependenciesField
class InferOpenApiSourceDependenciesRequest(InferDependenciesRequest):
infer_from = OpenApiSourceDependenciesInferenceFieldSet
@rule
async def infer_openapi_module_dependencies(
request: InferOpenApiSourceDependenciesRequest,
) -> InferredDependencies:
explicitly_provided_deps, hydrated_sources = await MultiGet(
Get(ExplicitlyProvidedDependencies, DependenciesRequest(request.field_set.dependencies)),
Get(HydratedSources, HydrateSourcesRequest(request.field_set.sources)),
)
result = await Get(
OpenApiDependencies,
ParseOpenApiSources(
sources_digest=hydrated_sources.snapshot.digest,
paths=hydrated_sources.snapshot.files,
),
)
paths: set[str] = set()
for source_file in hydrated_sources.snapshot.files:
paths.update(result.dependencies[source_file])
candidate_targets = await Get(
Targets,
RawSpecs(
file_literals=tuple(FileLiteralSpec(path) for path in paths),
unmatched_glob_behavior=GlobMatchErrorBehavior.ignore,
description_of_origin="the `openapi_source` dependency inference",
),
)
addresses = frozenset(
[target.address for target in candidate_targets if target.has_field(OpenApiSourceField)]
)
dependencies = explicitly_provided_deps.remaining_after_disambiguation(
addresses.union(explicitly_provided_deps.includes),
owners_must_be_ancestors=False,
)
return InferredDependencies(dependencies)
def rules():
return [
*collect_rules(),
UnionRule(InferDependenciesRequest, InferOpenApiDocumentDependenciesRequest),
UnionRule(InferDependenciesRequest, InferOpenApiSourceDependenciesRequest),
]
|
from django.contrib import admin
from django.conf.urls import url, include
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
urlpatterns = [
url('^admin/', admin.site.urls, name='admin'),
url('',include('inventory.urls'), name='inventory'),
]
urlpatterns+=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
import tkinter as tk
from tkinter import filedialog
def open_handler():
name = tk.filedialog.askopenfilename(filetypes=(("file","*.jpg"),("All Files","*.*") ))
print(name)
def button_hanadler(r_v):
print(r_v)
mainwindow=tk.Tk()
mainwindow.geometry("640x340")
mainwindow.title("sensor data ")
menubar=tk.Menu(mainwindow)
sub_menu=tk.Menu(menubar, tearoff=0)
sub_menu.add_command(label="Open File ", command=open_handler)
sub_menu.add_separator()
menubar.add_cascade(label="File", menu=sub_menu)
r_v=tk.IntVar()
r=tk.Radiobutton(mainwindow, text="Keep me Logged in",variable=r_v, value=1, command=lambda:button_hanadler(r_v.get()))
r.pack()
r1=tk.Radiobutton(mainwindow, text="Save Password",variable=r_v, value=2, command=lambda:button_hanadler(r_v.get()))
r1.pack()
mainwindow.config(menu=menubar)
mainwindow.mainloop()
|
import config
import time
from network import WLAN
from network import Server
# Conexion a la WiFi
wlan = WLAN(mode=WLAN.STA) # Modo adaptador wifi
wlan.connect(config.wifi_ssid, auth=(None, config.wifi_pass)) # Orden y parámetros de conexión a la red wifi
if config.REPL:
while not wlan.isconnected():
print('No conectado a WiFi')
time.sleep(5)
if wlan.isconnected():
print('Conectado a WiFi: ' + config.wifi_ssid)
# Servicio telnet
server = Server(login=(config.user, config.password), timeout=60)
server.timeout(300) # change the timeout
|
####sss666
read_csv 177
sys.stdout###np.sys.stdout
to_csv(cols)##columns
分隔符 182页 不太懂哎哎。
class 那一段。
web信息也迷迷糊糊。(主要是无法删除空格)
from lxml.html import parse
from urllib.request import urlopen
parsed=parse(urlopen('http://finance.yahoo.com/q/op?s=AAPL+Options'))
doc=parsed.getroot()
tables=doc.findall('.//table')
#print(tables)
rows=[]
for i in range(len(tables)):
rows.append(tables[i].findall('.//tr'))
#print(rows[i],'\n')
def _unpack(row,kind='td'):
elts=row.findall('.//%s' %kind)
return [val.text_content() for val in elts]
#print(_unpack(rows[2][1],kind='td'))##
for i in range(len(rows)):
print(_unpack(rows[i][0],kind='th'))###这一行需要解决的是消除空格什么的。
if len(rows[i])>1:
print(_unpack(rows[i][1],kind='td'))###为啥不能print,没有print是可以的呀。
#189页。dataframe没有save项。
###191页。无论如何data=json.loads(resp.text)都有问题。
###194页 zip不懂哎。181页也是。
#并且194页。sql行并没有read_frame 这个方法,我用read_sql也可以啊。
#最后MongoDB 并没有看。
|
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.shortcuts import render
from django.views.decorators.csrf import csrf_protect
from django.template import Context
import json
from .models import ParkingSlots
from django.http import QueryDict
from .parking_utils import allocate_slot,vacant_slot,create_new_parking,add_slot_to_parking,slot_status_inparking
#Used Django templates to render frontend .Html templates that are being rendered are in templates folder
# Create your views here.
# create dashboard view for first page and take input the number of slots that we want to have in our parking lot
@csrf_protect
def index(request):
#Todo add slot to parking lot
if request.method == 'POST':
data = QueryDict(request.body).dict()
total_num_slot=data.get('slot_numbers','')
add_num=data.get('add_slot','')
if add_num:
add_slot_to_parking(add_num)
if total_num_slot:
create_new_parking(total_num_slot)
template = loader.get_template('carparking/index.html')
return render(request, 'carparking/index.html')
# will add car to parking slots ,throw error when no more slot available and also vacant to given slot id
# will use pg_advisory lock when two people make request at same time
# return error when slot number to be vacanted not present
@csrf_protect
def dashboard(request):
error_message=""
slot_status=""
if request.method == 'POST':
data = QueryDict(request.body).dict()
car_registration_num = data.get('car_registration_num','')
car_color = data.get('car_color','')
slot_id = data.get('slot_number','')
get_slot_status = data.get('slot_status','')
if car_registration_num and car_color:
error_message = allocate_slot(car_registration_num,car_color)
if slot_id:
error_message = vacant_slot(slot_id)
if get_slot_status:
slot_status=slot_status_inparking(get_slot_status)
template = loader.get_template('carparking/dashboard.html')
return render(request, 'carparking/dashboard.html',{"error_message":error_message,"status":str(slot_status)})
# print slot id with car_number and color that are avaliable in parking lot
@csrf_protect
def get_all_slots(request):
all_slots=ParkingSlots.objects.filter(availibility_status=False)
error_message=""
if not all_slots:
error_message="There are No cars in Parking slots"
return render(request, 'carparking/all_slot.html',{"all_slots": all_slots,"error_message":error_message})
|
from django.db import models
from apps.main.models import *
from apps.resources.models import *
import csv
with open("EXP_REENTRY.csv",'rb') as f:
reader = csv.reader(f)
your_list = list(reader)
all_resource_types = ResourceType.objects.all()
for resource_type in all_resource_types:
print resource_type
input_resource = raw_input("Type of Resource to Import===>")
resource_type = ResourceType.objects.get(name=input_resource)
if resource_type:
print "Got Resource Type %s" % resource_type
delete = None
while delete not in ['y','N']:
delete= raw_input("Replace all resources...Delete other '%s' resources? y or N.....")
if delete == 'y':
Resource.objects.filter(resource_types=resource_type).delete()
admin=User.objects.get(username='bpadmin')
# count =0
# for x in your_list[0]:
# print count, x
# count+=1
print "test"
for resource in your_list[1:]:
# print resource
if resource[1]:
new_resource=Resource(created_by=admin, name=resource[1],approved=True)
new_resource.save()
# new_resource.created_by=admin
# new_resource.name=resource[1]
print "Created Resource for %s" % resource[1]
if resource[2]:
new_resource.city=resource[2]
new_resource.state='IL'
print "\tAdded City State"
if resource[3]:
new_resource.address=resource[3]
print "\tAdded Address"
if resource[4]:
new_resource.phone=resource[4]
print "\tAdded Phone"
if resource[5]:
new_resource.contact_name=resource[5]
print "\tAdded Contact Name"
if resource[6]:
new_resource.email=resource[6]
print "\tAdded Email"
if resource[7]:
new_resource.website=resource[7]
print "\tAdded Website"
if resource[0]:
notes=resource[0]
if resource[8]:
notes+=resource[8]
new_resource.notes=notes
print "\tAdded notes"
elif resource[8]:
new_resource.notes=resource[8]
print "\tAdded notes"
restrictions = resource[9]
if resource[10]:
restrictions+=" LGBTQ friendly? %s" % resource[10]
if resource[12]:
restrictions+=" Sex Offenses Ok?: %s" % resource[12]
if restrictions:
new_resource.restrictions=restrictions
print "\tAdded notes"
new_resource.save()
new_resource.resource_types.add(resource_type)
# dedicated_to = x[1]
# print "\t"+dedicated_to
# name = x[2]
# print "\t"+ name
# county = x[3]
# print "\t"+county
# address = x[4]
# print "\t"+address
# city =x[5]
# print "\t"+city
# state = x[6]
# print "\t"+state
# zip_code = x[7]
# print "\t"+zip_code
# phone=x[8]
# print "\t"+phone,
# contact_name=x[9]
# email=x[10]
# print "\t"+email,
# website = x[11]
# print "\t"+website,
# notes=x[12]
# print "\t"+notes,
# restrictions=x[13]
# print "\t"+restrictions,
# bp_contact=x[14]
# print "\t"+bp_contact
# bp_supported_note=x[15]
# print "\t"+bp_supported_note
# org_bool = OrganizationType.objects.filter(name=x[0])
# if org_bool:
# org_type = OrganizationType.objects.get(name=x[0])
# org_type.save()
# this_org=Organization.objects.create(dedicated_to=dedicated_to,name=name,county=county,address=address,city=city,state=state,zip_code=zip_code, phone=phone,contact_name=contact_name,email=email,website=website,notes=notes,restrictions=restrictions,bp_contact=bp_contact,bp_supported_note=bp_supported_note,created_by=admin)
# this_org.save()
# this_org.org_type.add(org_type)
# print "Created Resource for " + name
# else:
# print "This Organiztaion Type doesn't exist...skipping..."
|
"""
Pygameは日本語の表示が苦手。日本語を表示するには、print(pygame.font.get_fonts())で
各パソコンで使用できる日本語フォントを調べ、それを指定する方法があるが、
Pygameで使える日本語フォントはPCごとに違ううえ、種類が限られている。
そこでIPAフォントを用いて日本語を表示する方法がある。
"""
import pygame
import sys
# 白
WHITE = (255, 255, 255)
# 黒
BLACK = (0, 0, 0)
def main():
# pygameモジュールの初期化
pygame.init()
# ウィンドウに表示されるタイトルを設定
pygame.display.set_caption("初めてのPygameで日本語を表示する")
# スクリーンを初期化
screen = pygame.display.set_mode((800, 600))
clock = pygame.time.Clock()
font = pygame.font.Font("ipam00303/ipam.ttf", 80)
# 時間管理変数
tmr = 0
while True:
tmr = tmr + 1
# pygameのイベントを繰り返しで処理する
for event in pygame.event.get():
if event.type == pygame.QUIT:
# ウィンドウの✖ボタンをクリック
# pygameのモジュールの初期化を解除
pygame.quit()
# プログラムを終了
sys.exit()
txt = font.render("日本語表示 "+str(tmr), True, WHITE)
# 指定色でスクリーン全体をクリア
screen.fill(BLACK)
# 文字列を描いたSurfaceをスクリーンに貼り付ける
screen.blit(txt, [100, 100])
# 画面更新
pygame.display.update()
# フレームレートを設定する
clock.tick(10)
if __name__ == "__main__":
main()
|
from mlmicrophysics.models import DenseNeuralNetwork
from mlmicrophysics.data import subset_data_files_by_date, assemble_data_files
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler, OneHotEncoder
from sklearn.metrics import confusion_matrix, accuracy_score, mean_absolute_error, mean_squared_error
from mlmicrophysics.metrics import heidke_skill_score, peirce_skill_score, hellinger_distance, root_mean_squared_error, r2_corr
import argparse
import yaml
from os.path import join, exists
import os
from datetime import datetime
import logging
from tensorflow.keras.losses import huber
# from memory_profiler import profile
import optuna
from aimlutils.echo.src.trial_suggest import *
from aimlutils.echo.src.base_objective import *
import tensorflow as tf
logger = logging.getLogger(__name__)
scalers = {"MinMaxScaler": MinMaxScaler,
"MaxAbsScaler": MaxAbsScaler,
"StandardScaler": StandardScaler,
"RobustScaler": RobustScaler}
class_metrics = {"accuracy": accuracy_score,
"heidke": heidke_skill_score,
"peirce": peirce_skill_score,
"confusion": confusion_matrix}
reg_metrics = {"rmse": root_mean_squared_error,
"mae": mean_absolute_error,
"r2": r2_corr,
"hellinger": hellinger_distance,
"mse": mean_squared_error,
"huber": huber}
def leaky(x):
return tf.nn.leaky_relu(x, alpha=0.01)
def ranked_probability_score(y_true_discrete, y_pred_discrete):
y_pred_cumulative = np.cumsum(y_pred_discrete)
y_true_cumulative = np.cumsum(y_true_discrete)
return np.mean((y_pred_cumulative - y_true_cumulative) ** 2) / float(y_pred_discrete.shape[1] - 1)
# @profile(precision=4)
def objective(trial, config):
tf.config.threading.set_inter_op_parallelism_threads(2)
tf.config.threading.set_intra_op_parallelism_threads(2)
# Get list of hyperparameters from the config
hyperparameters = config["optuna"]["parameters"]
# Now update some hyperparameters via custom rules
trial_hyperparameters = {}
for param_name in hyperparameters.keys():
trial_hyperparameters[param_name] = trial_suggest_loader(trial, hyperparameters[param_name])
data_path = config["data_path"]
out_path = config["out_path"]
input_cols = config["input_cols"]
output_cols = config["output_cols"]
input_transforms = config["input_transforms"]
output_transforms = config["output_transforms"]
np.random.seed(config["random_seed"])
input_scaler = scalers[config["input_scaler"]]()
subsample = config["subsample"]
if not exists(out_path):
os.makedirs(out_path)
start = datetime.now()
logger.info(f"Loading training data for trial: {trial.number}")
train_files, val_files, test_files = subset_data_files_by_date(data_path, **config["subset_data"])
scaled_input_train, \
labels_train, \
transformed_out_train, \
scaled_out_train, \
output_scalers, \
meta_train = assemble_data_files(train_files, input_cols, output_cols, input_transforms,
output_transforms, input_scaler, subsample=subsample)
logger.info("Loading testing data")
scaled_input_test, \
labels_test, \
transformed_out_test, \
scaled_out_test, \
output_scalers_test, \
meta_test = assemble_data_files(test_files, input_cols, output_cols, input_transforms,
output_transforms, input_scaler, output_scalers=output_scalers,
train=False, subsample=subsample)
logger.info(f"Finished loading data took: {datetime.now() - start}")
start = datetime.now()
input_scaler_df = pd.DataFrame({"mean": input_scaler.mean_, "scale": input_scaler.scale_},
index=input_cols)
out_scales_list = []
for var in output_scalers.keys():
for out_class in output_scalers[var].keys():
if output_scalers[var][out_class] is not None:
out_scales_list.append(pd.DataFrame({"mean": output_scalers[var][out_class].mean_,
"scale": output_scalers[var][out_class].scale_},
index=[var + "_" + str(out_class)]))
out_scales_df = pd.concat(out_scales_list)
out_scales_df.to_csv(join(out_path, "output_scale_values.csv"),
index_label="output")
logger.info(f"Finished scaling data took: {datetime.now() - start}")
beginning = datetime.now()
logger.info(f"BEGINNING model training: {beginning}")
with tf.device("/CPU:0"):
# initialize neural networks that will only be defined once and trained in epoch loop
classifiers = dict()
for output_col in output_cols:
classifiers[output_col] = DenseNeuralNetwork(hidden_layers=trial_hyperparameters["class_hidden_layers"],
hidden_neurons=trial_hyperparameters["class_hidden_neurons"],
lr=trial_hyperparameters["class_lr"],
l2_weight=trial_hyperparameters["class_l2_weight"],
activation=trial_hyperparameters["class_activation"],
batch_size=trial_hyperparameters["class_batch_size"],
**config["classifier_networks"])
regressors = dict()
for output_col in output_cols:
regressors[output_col] = dict()
for label in [l for l in list(output_transforms[output_col].keys()) if l != 0]:
regressors[output_col][label] = DenseNeuralNetwork(hidden_layers=trial_hyperparameters["reg_hidden_layers"],
hidden_neurons=trial_hyperparameters["reg_hidden_neurons"],
lr=trial_hyperparameters["reg_lr"],
l2_weight=trial_hyperparameters["reg_l2_weight"],
activation=trial_hyperparameters["reg_activation"],
batch_size=trial_hyperparameters["reg_batch_size"],
**config["regressor_networks"])
reg_index = []
for output_col in output_cols:
for label in list(output_transforms[output_col].keys()):
if label != 0:
reg_index.append(output_col + f"_{label:d}")
test_prediction_values = np.zeros((scaled_out_test.shape[0], len(reg_index)))
test_prediction_labels = np.zeros(scaled_out_test.shape)
logger.info(f"Finished initializing models took: {datetime.now() - beginning}")
for epoch in range(config["epochs"]):
logger.info(f"Training epoch: {epoch}")
start = datetime.now()
score = 0
for o, output_col in enumerate(output_cols):
logger.info(f"Train {output_col} Classifer - epoch: {epoch}")
hist = classifiers[output_col].fit(scaled_input_train,
labels_train[output_col])
logger.info(f"Evaluate Classifier: {output_col}")
test_prediction_labels[:, o] = classifiers[output_col].predict(scaled_input_test)
logger.info(f"test_prediction_labels[:, o] min: {np.min(test_prediction_labels[:, o])} max: {np.max(test_prediction_labels[:, o])}")
true = OneHotEncoder(sparse=False).fit_transform(labels_test[output_col].to_numpy().reshape(-1, 1))
pred = OneHotEncoder(sparse=False).fit_transform(pd.DataFrame(test_prediction_labels[:, o]))
score += ranked_probability_score(true, pred)
logger.info(f"Finished training epoch {epoch} of classifier {output_col} in: {datetime.now() - start}")
for l, label in enumerate(list(output_transforms[output_col].keys())):
start = datetime.now()
if label != 0:
logger.info(f"Train {output_col} - {label} Regressor - epoch: {epoch}")
hist = regressors[output_col][label].fit(scaled_input_train.loc[labels_train[output_col] == label],
scaled_out_train.loc[labels_train[output_col] == label, output_col])
if label > 0:
out_label = "pos"
else:
out_label = "neg"
test_prediction_values[:, l] = output_scalers[output_col][label].inverse_transform(regressors[output_col][label].predict(scaled_input_test))
score += mean_squared_error(transformed_out_test.loc[labels_test[output_col] == label, output_col],
test_prediction_values[labels_test[output_col] == label, l])
logger.info(f"Finished training epoch {epoch} of regressor {output_col} and label {label} in: {datetime.now() - start}")
trial.report(score, step = epoch)
if trial.should_prune():
raise optuna.TrialPruned()
logger.info(f"Running entire model took: {datetime.now() - beginning}")
return score
class Objective(BaseObjective):
def __init__(self, config, metric = "val_loss", device = "cpu"):
# Initialize the base class
BaseObjective.__init__(self, config, metric, device)
def train(self, trial, conf):
result = objective(trial, conf)
results_dictionary = {
"val_loss": result
}
return results_dictionary
|
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import time
from datetime import datetime
class Server:
def __init__(self):
self.__clients = {}
self.__addresses = {}
self.__HOST = ''
self.__PORT = 33001
self.__BUFSIZ = 1024
self.__SERVER = socket(AF_INET, SOCK_STREAM)
self.__SERVER.bind((self.__HOST, self.__PORT))
def run(self):
self.__SERVER.listen(2)
print('Esperando para conexão...')
client_thread = Thread(target=self.__cliente_conectando_chat)
client_thread.start()
client_thread.join()
self.__SERVER.close()
# cliente se conectando ao chat
def __cliente_conectando_chat(self):
while True:
connection, client = self.__SERVER.accept()
print(f'{client} se conectou ao chat')
connection.send(bytes('Bem vindo ao chat com TCP!', 'utf8'))
self.__addresses[connection] = client
Thread(target=self.__comunicacao_mensagem, args=(connection, client)).start()
# comunicacao do cliente no chat
def __comunicacao_mensagem(self, conn_client, client):
name = conn_client.recv(self.__BUFSIZ).decode('utf8')
conn_client.send(bytes(f'Bem vindo {name}!', 'utf8'))
time.sleep(0.3)
conn_client.send(bytes('Se você quer sair, escreva \'{quit}\' para sair.', 'utf8'))
self.__enviar_mensagem(bytes(f'{name} se juntou ao chat!', 'utf8'))
self.__clients[conn_client] = name
while True:
msg = conn_client.recv(self.__BUFSIZ)
if msg != bytes('{quit}', 'utf8'):
self.__enviar_mensagem(msg, datetime.now().strftime('%H:%M:%S') + ' ' + name + ': ')
else:
conn_client.send(bytes('{quit}', 'utf8'))
print(f'{client} se desconectou do chat')
conn_client.close()
del self.__clients[conn_client]
self.__enviar_mensagem(bytes(f'{name} saiu do chat.', 'utf8'))
break
# Enviando uma mensagem para todos os clients ativos no chat
def __enviar_mensagem(self, msg, prefix=''):
for sock in self.__clients:
sock.send(bytes(prefix, 'utf8') + msg)
server = Server()
server.run()
|
# List Class
class ListNode:
def __init__(self, data=0, next=None):
self.data = data
self.next = next
# Search for a key, O(n)
def search_list(L:ListNode, key: int) ->ListNode:
while L and L.data != key:
L = L.next
# If key was not present in the list, L will have become null
return L
# Insert new node after node O(1)
def insert_after(node: ListNode, new_node: ListNode) ->None:
new_node.next = node.next
node.next = new_node
# Delete a node past this one. Assume node is not a tail O(1)
def delete_after(node: ListNode) -> None:
node.next = node.next.next
|
from django.urls import path
from django.http import HttpResponse
def view(request):
return HttpResponse()
urlpatterns = [
path('', view),
]
|
"""
# Linked-list 实例
"""
import os
class Node(object):
def __init__(self, value, next=None):
self.value = value
self.next = next
def rotate_right(head: Node, k: int) -> Node:
"""给定一个链表,循环向右移动k个节点。
如给定链表1->2->3->4->5,k=2,则返回循环右移2个节点后的链表:4->5->1->2->3。
思路:设置2个指针,前一个先向前移动k个节点,然后两个节点同步向前移,直到前一个指针到达链表末端,这时两个指针之间的部分就是需要循环移动到链表头的部分。
注意:这里k可以是任何值,有可能会比整个链表长度还大,所以可能会出现循环好几次的情况。可以先计算出链表的长度l,然后取余 k%l 就是实际需要移动的节点。
虽然计算链表长度需要多循环一次,但实际上,在k比较大时直接移动反而会循环更多次。
例如: 1->2->3, k=5, 结果2->3->1
"""
if head is None or head.next is None:
return head
temp = head
count = 0
# 计算链表的长度
while temp:
count += 1
temp = temp.next
k = k % count # 当k>count, 实际只需要移动k%count个节点
if k == 0:
# 循环推移回到原点
return head
# 双指正记录移动位置, 快慢指正
fast = head
slow = head
for i in range(k):
fast = fast.next
# fast移动到末尾, slow移动倒数第K个节点
while fast.next:
fast = fast.next
slow = slow.next
result = slow.next
slow.next = None # 节点断开
fast.next = head
return result
if __name__ == '__main__':
head = Node(1)
temp = head
for i in range(2, 7):
temp.next = Node(i)
temp = temp.next
result = rotate_right(head, 2)
while result:
print(result.value)
result = result.next
|
# -*- coding: utf-8 -*-
# @Time : 2019-12-21
# @Author : mizxc
# @Email : xiangxianjiao@163.com
from werkzeug.security import generate_password_hash, check_password_hash
from flask import current_app, request, flash, render_template, redirect, url_for, session
from flask_login import login_user, logout_user, login_required, current_user
from . import bpAuth
from project.model.user import User
from project.common.regular import reEmail
@bpAuth.route("/register", methods=['GET', 'POST'])
def userRegister():
"""
第一个用户注册默认为管理员,拥有所有权限
以后注册的用户为partner,拥有某分支的知识管理权限
"""
if current_user.is_authenticated:
return redirect(url_for('admin.adminIndex'))
if request.method == 'GET':
return render_template('admin/register.html')
if request.method == 'POST':
if User.objects.count() != 0:
flash(u"你已经注册过了,请登陆!")
return redirect(url_for('auth.userLogin'))
userName = request.form['userName']
email = request.form['email']
password = request.form['password']
if len(userName) < 2 or len(userName) > 20:
flash(u"请输入2-20个字符的用户名!")
return redirect(url_for('auth.userRegister'))
if not reEmail(email):
flash(u"邮箱【%s】格式不对,请重新输入!" % email)
return redirect(url_for('auth.userRegister'))
if User.objects(userName=userName).first():
flash(u"用户名【%s】已经存在,请重新输入!" % userName)
return redirect(url_for('auth.userRegister'))
if len(password) < 6 or len(password) > 18:
flash(u"请输入6-18位密码!")
return redirect(url_for('auth.userRegister'))
user = User()
user.userName = userName
user.email = email
user.password = generate_password_hash(password)
user.save()
flash(u"注册成功,请登陆!")
return redirect(url_for('auth.userLogin'))
@bpAuth.route("/login", methods=['GET', 'POST'])
def userLogin():
# 判断是否登陆
if current_user.is_authenticated:
return redirect(url_for('admin.adminIndex'))
if request.method == 'GET':
return render_template('admin/login.html')
if request.method == 'POST':
userName = request.form['userName']
password = request.form['password']
user = User.objects(userName=userName).first()
if not user:
flash(u"用户【%s】不存在!" % userName)
return redirect(url_for('auth.userLogin'))
if check_password_hash(user.password, password):
login_user(user)
session.permanent = True
return redirect(url_for('admin.adminIndex'))
else:
flash(u"密码输入错误!")
return redirect(url_for('auth.userLogin'))
@bpAuth.route("/loginOut", methods=['GET', 'POST'])
@login_required
def userLoginOut():
logout_user()
flash('你已经退出登陆!')
return redirect(url_for('auth.userLogin'))
|
# -*- coding: utf-8 -*-
from math import prod
from typing import List
class Solution:
def subtractProductAndSum(self, n: int) -> int:
digits = self.getDigits(n)
return prod(digits) - sum(digits)
def getDigits(self, n: int) -> List[int]:
digits = []
while n:
digits.append(n % 10)
n //= 10
return digits
if __name__ == "__main__":
solution = Solution()
assert 15 == solution.subtractProductAndSum(234)
assert 21 == solution.subtractProductAndSum(4421)
|
# Generated by Django 2.2.4 on 2019-09-11 01:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0004_jobopening_send_cv_directly'),
]
operations = [
migrations.AlterField(
model_name='jobopening',
name='method_of_application',
field=models.TextField(blank=True, null=True),
),
]
|
"""
束线元件位置
作者:赵润晓
日期:2021年4月24日
"""
# 因为要使用父目录的 cctpy 所以加入
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from hust_sc_gantry import HUST_SC_GANTRY
from cctpy import *
g = HUST_SC_GANTRY(
DL1=900.78*MM,
GAP1=430.15*MM,
GAP2=370.82*MM,
qs1_length=234.01*MM,
qs1_aperture_radius=60 * MM,
qs1_gradient=0.0,
qs1_second_gradient=0.0,
qs2_length=200.14*MM,
qs2_aperture_radius=60 * MM,
qs2_gradient=0.0,
qs2_second_gradient=0.0,
DL2=2350.11*MM,
GAP3=431.88*MM,
qs3_length=243.79*MM,
)
traj = (
Trajectory
.set_start_point(P2.origin())
.first_line(direct=P2.x_direct(), length=g.DL1))
# AGCT12
traj=traj.add_arc_line(radius=g.cct12_big_r, clockwise=False, angle_deg=22.5).as_aperture_objrct_on_last(140.5 * MM - 20 * MM)
Plot2.plot_p2(traj.get_last_line2().point_at_middle(),'k.')
print('CCT1',traj.get_last_line2().point_at_middle()/MM)
traj=traj.add_strait_line(length=g.GAP1)
# qs1
traj=traj.add_strait_line(length=g.qs1_length).as_aperture_objrct_on_last(60*MM)
Plot2.plot_p2(traj.get_last_line2().point_at_middle(),'k.')
print('qs1',traj.get_last_line2().point_at_middle()/MM)
traj=traj.add_strait_line(length=g.GAP2)
# qs2
traj=traj.add_strait_line(length=g.qs2_length).as_aperture_objrct_on_last(60*MM)
Plot2.plot_p2(traj.get_last_line2().point_at_middle(),'k.')
print('qs2',traj.get_last_line2().point_at_middle()/MM)
traj=traj.add_strait_line(length=g.GAP2)
# qs1
traj=traj.add_strait_line(length=g.qs1_length).as_aperture_objrct_on_last(60*MM)
Plot2.plot_p2(traj.get_last_line2().point_at_middle(),'k.')
print('qs1',traj.get_last_line2().point_at_middle()/MM)
traj=traj.add_strait_line(length=g.GAP1)
# cct12
traj=traj.add_arc_line(radius=g.cct12_big_r, clockwise=False, angle_deg=22.5).as_aperture_objrct_on_last(140.5 * MM - 20 * MM)
Plot2.plot_p2(traj.get_last_line2().point_at_middle(),'k.')
print('CCT1',traj.get_last_line2().point_at_middle()/MM)
traj=traj.add_strait_line(length=g.DL1)
traj=traj.add_strait_line(length=g.DL2)
# cct345
traj=traj.add_arc_line(radius=g.cct345_big_r, clockwise=True, angle_deg=67.5).as_aperture_objrct_on_last(140.5 * MM)
Plot2.plot_p2(traj.get_last_line2().point_at_middle(),'k.')
print('CCT2',traj.get_last_line2().point_at_middle()/MM)
traj=traj.add_strait_line(length=g.GAP3)
# qs3
traj=traj.add_strait_line(length=g.qs3_length).as_aperture_objrct_on_last(60*MM)
Plot2.plot_p2(traj.get_last_line2().point_at_middle(),'k.')
print('qs3',traj.get_last_line2().point_at_middle()/MM)
traj=traj.add_strait_line(length=g.GAP3)
# cct345
traj=traj.add_arc_line(radius=g.cct345_big_r, clockwise=True, angle_deg=67.5).as_aperture_objrct_on_last(140.5 * MM)
Plot2.plot_p2(traj.get_last_line2().point_at_middle(),'k.')
print('CCT2',traj.get_last_line2().point_at_middle()/MM)
traj=traj.add_strait_line(length=g.DL2)
print(traj.point_at_end()/MM)
Plot2.plot(traj)
for line2 in traj.get_line2_list():
if isinstance(line2, ArcLine2):
arc = ArcLine2.as_arc_line2(line2)
Plot2.plot_p2s([
arc.point_at_end(), arc.center, arc.point_at_start()
], describe='r--')
Plot2.equal()
Plot2.show()
|
#from base_case import base_case
import base_case
class case_existing_file(base_case.base_case):
'''File exists.'''
def test(self, handler):
return os.path.isfile(handler.full_path)
def act(self, handler):
self.handle_file(handler, handler.full_path)
class case_no_file(base_case.base_case):
def test(self,handler):
return not os.path.exists(handler.full_path)
def act(self,handler):
raise ServerException("'{0}' not found".format(handler.full_path))
class case_always_fail(base_case.base_case):
def test(self,handler):
return True
def act(self,handler):
raise ServerException("'{0}' unknown object".format(handler.path))
class case_directory_index_file(base_case.base_case):
def index_path(self,handler):
return os.path.join(handler.full_path,'index.html')
def test(self,handler):
return os.path.isdir(handler.full_path) and \
os.path.isfile(self.index_path(handler))
def act(self,handler):
handler.handle_file(self.index_path(handler))
class case_directory_no_index_file(base_case.base_case):
'''Serve listing for a directory without an index.html page.'''
def index_path(self, handler):
return os.path.join(handler.full_path, 'index.html')
def test(self, handler):
return os.path.isdir(handler.full_path) and \
not os.path.isfile(self.index_path(handler))
def act(self, handler):
handler.list_dir(handler.full_path)
class case_cgi_file(base_case.base_case):
def test(self,handler):
return os.path.isfile(handler.full_path) and \
handler.full_path.endwith(".py")
def act(self,handler):
handler.run_cgi(handler.full_path)
|
from spack import *
from glob import glob
from string import Template
import re
import os
import fnmatch
import sys
import shutil
class Fwlite(CMakePackage):
"""CMSSW FWLite built with cmake"""
homepage = "http://cms-sw.github.io"
url = "https://github.com/gartung/fwlite/archive/refs/tags/11.3.1.2.tar.gz"
version('master', git='https://github.com/gartung/fwlite.git',tag='master')
version('11.3.1.2', sha256='0b293f6ad704faea3cc9e046542f1f86c9245ab30d39a2b54ba4d7423e6acfe6')
resource(name='cmaketools', git='https://github.com/gartung/cmaketools.git',
placement='cmaketools')
resource(name='upgrade-geometry', url='http://cmsrep.cern.ch/cmssw/download/Fireworks-Geometry/20200401/cmsGeom2026.root',
placement='data', sha256='3077e4d9abd62c57d1d71b30fa968ba52a7c12879a7fc71d90d94c4123e426fa', expand=False)
resource(name='geometry', url='https://github.com/cms-data/Fireworks-Geometry/archive/V07-06-00.tar.gz',
placement='data/Fireworks/Geometry/data', sha256='93312e7c60525c66c09c86fdc36db401c281b95ccb2d9195d735f84506f5868b')
resource(name='patcandidates', url='https://github.com/cms-data/DataFormats-PatCandidates/archive/V01-00-01.tar.gz',
placement='data/DataFormats/PatCandidates/data', sha256='5a0941df5a191d0f942e26838103659970ba29cb9cd4ab3d0cc45bcc01b408df')
resource(name='miniaod', url='https://cmsshow-rels.web.cern.ch/cmsShow-rels/samples/11_2/RelValZTTminiaod.root',
placement='data/samples/11_2/miniaod', sha256='4c4ddc418c7131f6eea16ea4bfefa36c2dba8ac2161640bf93b1d7889ce8fa2c', expand=False)
resource(name='aod', url='https://cmsshow-rels.web.cern.ch/cmsShow-rels/samples/11_2/RelVallZTTGenSimReco.root',
placement='data/samples/11_2/aod', sha256='209e6bda0496892d33137bf67ecb583b233e4c962154b6ca574aa33f824ea532', expand=False)
if sys.platform != 'darwin':
patch('patch')
if sys.platform == 'darwin':
depends_on('libuuid')
depends_on('cmake', type='build')
depends_on('root+aqua+opengl~x~tbb')
depends_on('intel-tbb-oneapi')
depends_on('clhep')
depends_on('md5-cms')
depends_on('python')
depends_on('vdt')
depends_on('boost')
depends_on('py-pybind11', type=('link', 'run', 'test'))
depends_on('hepmc')
depends_on('pcre')
depends_on('davix')
depends_on('libsigcpp@2.10.3')
depends_on('tinyxml2@6.2.0')
depends_on('jpeg')
depends_on('cppunit')
depends_on('xerces-c')
depends_on('fmt')
depends_on('eigen')
depends_on('openssl')
def cmake_args(self):
cxxstd = self.spec['root'].variants['cxxstd'].value
args = ['-DCMakeTools_DIR=%s/cmaketools' % self.stage.source_path,
'-DCLHEP_ROOT_DIR=%s' % self.spec['clhep'].prefix,
'-DBOOST_ROOT=%s' % self.spec['boost'].prefix,
'-DTBB_ROOT_DIR=%s' % self.spec['intel-tbb-oneapi'].prefix,
'-DCMSMD5ROOT=%s' % self.spec['md5-cms'].prefix,
'-DDAVIXROOT=%s' % self.spec['davix'].prefix,
'-DSIGCPPROOT=%s' % self.spec['libsigcpp'].prefix,
'-DSIGCPP_INCLUDE_DIR=%s/sigc++-2.0' % self.spec['libsigcpp'].prefix.include,
'-DSIGCPP_LIB_INCLUDE_DIR=%s/sigc++-2.0/include' % self.spec['libsigcpp'].prefix.lib,
'-DTINYXML2ROOT=%s' % self.spec['tinyxml2'].prefix,
'-DCPPUNITROOT=%s' % self.spec['cppunit'].prefix,
'-DXERCESC_ROOT_DIR=%s' % self.spec['xerces-c'].prefix]
args.append('-DFMT_INCLUDE_DIR=%s' % self.spec['fmt'].prefix.include)
args.append('-DOPENSSL_INCLUDE_DIR=%s' % self.spec['openssl'].prefix.include)
args.append('-DEIGEN_INCLUDE_DIR=%s' % self.spec['eigen'].prefix.include)
args.append('-DVDT_ROOT_DIR=%s' % self.spec['vdt'].prefix)
args.append('-DCMAKE_CXX_STANDARD=%s' % cxxstd)
if sys.platform == 'darwin':
args.append('-DUUID_INCLUDE_DIR=%s/include' %
self.spec['libuuid'].prefix)
args.append('-DUUID_ROOT_DIR=%s' %
self.spec['libuuid'].prefix)
return(args)
@run_before('cmake')
def move_data(self):
cmssw_version = 'CMSSW.' + str(self.version[:-1])
cmssw_u_version = cmssw_version.replace('.', '_')
mkdirp(join_path(self.stage.source_path,'Fireworks/Core/data'))
with open(join_path(self.stage.source_path,'Fireworks/Core/data/version.txt'), 'w') as f:
f.write('%s' % cmssw_u_version)
shutil.move(join_path(self.stage.source_path, 'data', 'cmsGeom2026.root'), join_path(self.stage.source_path, 'data/Fireworks/Geometry/data', 'cmsGeom2026.root'))
@run_before('install')
def install_data(self):
install_tree(join_path(self.stage.source_path,'data'), join_path(self.prefix,'data'))
def setup_run_environment(self, env):
cmssw_version = 'CMSSW.' + str(self.version[:-1])
cmssw_u_version = cmssw_version.replace('.', '_')
env.set('CMSSW_VERSION', cmssw_u_version)
env.set('ROOT_INCLUDE_PATH', self.prefix.src)
env.set('CMSSW_RELEASE_BASE', self.prefix)
env.set('CMSSW_BASE', '%s' % self.prefix)
env.set('CMSSW_DATA_PATH', '%s/data' % self.prefix)
env.set('CMSSW_SEARCH_PATH', '%s/data/Fireworks/Geometry/data' % self.prefix)
if sys.platform == 'darwin':
env.set('DYLD_FALLBACK_LIBRARY_PATH', self.prefix.lib)
else:
env.set('LD_LIBRARY_PATH', self.prefix.lib)
|
from typing import Tuple, Optional
import torch
from torch import nn, Tensor
from parseridge.parser.modules.attention.soft_attention import Attention
from parseridge.parser.modules.utils import initialize_xavier_dynet_, mask_
class UniversalAttention(Attention):
def __init__(
self,
query_dim: int,
query_output_dim: Optional[int] = None,
key_output_dim: Optional[int] = None,
value_output_dim: Optional[int] = None,
**kwargs,
):
super().__init__(
query_dim=query_dim,
key_dim=query_dim,
query_output_dim=query_output_dim,
key_output_dim=key_output_dim,
value_output_dim=value_output_dim,
**kwargs,
)
self.query_param = nn.Parameter(torch.rand(query_dim))
def forward(
self, keys: Tensor, sequence_lengths: Tensor, values: Tensor = None, **kwargs
) -> Tuple[Tensor, Tensor, Tensor]:
queries = self.query_param.expand(keys.size(0), -1)
return super().forward(queries, keys, sequence_lengths, values)
class LinearAttention(Attention):
def __init__(self, query_dim: int, query_output_dim: Optional[int] = None, **kwargs):
super().__init__(
query_dim=query_dim,
key_dim=query_dim,
query_output_dim=query_output_dim,
key_output_dim=query_output_dim,
**kwargs,
)
self.learn_input = nn.Sequential(
nn.Linear(in_features=query_dim, out_features=query_dim), nn.Tanh()
)
self.similarity_function = nn.Linear(in_features=query_dim, out_features=1)
initialize_xavier_dynet_(self)
def forward(
self, keys: Tensor, sequence_lengths: Tensor, values: Tensor = None, **kwargs
) -> Tuple[Tensor, Tensor, Tensor]:
if values is None:
values = keys
keys = self.learn_input(keys)
# Compare keys to queries
attention_logits = self.similarity_function(keys)
# Mask scores for padding keys
attention_logits = mask_(attention_logits, sequence_lengths, device=self.device)
# Apply normalization function (e.g. softmax)
attention_energies = self.normalize(attention_logits)
# Multiply the values with the attention scores
weighted_values = values * attention_energies
# Compute a weighted average to get a sequence encoding
context_vector = torch.sum(weighted_values, dim=1)
return context_vector, weighted_values, attention_energies
|
# Python Coroutines and Tasks.
# Coroutines declared with async/await syntax is the preferred way of writing asyncio applications.
#
# To actually run a coroutine, asyncio provides three main mechanisms:
#
# > The asyncio.run() function to run the top-level entry point “main()” function.
# > Awaiting on a coroutine.
# > The asyncio.create_task() function to run coroutines concurrently as asyncio Tasks.
# Running two 'say_after' function coroutines concurrently:
async def main():
task1 = asyncio.create_task(
say_after(1, 'hello'))
task2 = asyncio.create_task(
say_after(2, 'world'))
print(f"started at {time.strftime('%X')}")
# Wait until both tasks are completed (should take
# around 2 seconds.)
await task1
await task2
print(f"finished at {time.strftime('%X')}")
|
from waitress import serve
from endpoint import app
if __name__ == "__main__":
# execute only if run as a script
app.run(debug=True)
|
from util import get_image, save_image, main
def download_many(page_number):
for page in range(1, page_number + 1):
image = get_image(page)
save_image(image, '{page:03}.jpg'.format(page=page))
return page_number
if __name__ == '__main__':
main(download_many)
|
import contextlib
import functools
import operator
import os
import pkgutil
import platform
import sys
import warnings
from collections import OrderedDict
from tempfile import TemporaryDirectory
from typing import Any
import pytest
import torch
import torch.fx
import torch.nn as nn
from _utils_internal import get_relative_path
from common_utils import cpu_and_cuda, freeze_rng_state, map_nested_tensor_object, needs_cuda, set_rng_seed
from PIL import Image
from torchvision import models, transforms
from torchvision.models import get_model_builder, list_models
ACCEPT = os.getenv("EXPECTTEST_ACCEPT", "0") == "1"
SKIP_BIG_MODEL = os.getenv("SKIP_BIG_MODEL", "1") == "1"
@contextlib.contextmanager
def disable_tf32():
previous = torch.backends.cudnn.allow_tf32
torch.backends.cudnn.allow_tf32 = False
try:
yield
finally:
torch.backends.cudnn.allow_tf32 = previous
def list_model_fns(module):
return [get_model_builder(name) for name in list_models(module)]
def _get_image(input_shape, real_image, device, dtype=None):
"""This routine loads a real or random image based on `real_image` argument.
Currently, the real image is utilized for the following list of models:
- `retinanet_resnet50_fpn`,
- `retinanet_resnet50_fpn_v2`,
- `keypointrcnn_resnet50_fpn`,
- `fasterrcnn_resnet50_fpn`,
- `fasterrcnn_resnet50_fpn_v2`,
- `fcos_resnet50_fpn`,
- `maskrcnn_resnet50_fpn`,
- `maskrcnn_resnet50_fpn_v2`,
in `test_classification_model` and `test_detection_model`.
To do so, a keyword argument `real_image` was added to the abovelisted models in `_model_params`
"""
if real_image:
# TODO: Maybe unify file discovery logic with test_image.py
GRACE_HOPPER = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "assets", "encode_jpeg", "grace_hopper_517x606.jpg"
)
img = Image.open(GRACE_HOPPER)
original_width, original_height = img.size
# make the image square
img = img.crop((0, 0, original_width, original_width))
img = img.resize(input_shape[1:3])
convert_tensor = transforms.ToTensor()
image = convert_tensor(img)
assert tuple(image.size()) == input_shape
return image.to(device=device, dtype=dtype)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
return torch.rand(input_shape).to(device=device, dtype=dtype)
@pytest.fixture
def disable_weight_loading(mocker):
"""When testing models, the two slowest operations are the downloading of the weights to a file and loading them
into the model. Unless, you want to test against specific weights, these steps can be disabled without any
drawbacks.
Including this fixture into the signature of your test, i.e. `test_foo(disable_weight_loading)`, will recurse
through all models in `torchvision.models` and will patch all occurrences of the function
`download_state_dict_from_url` as well as the method `load_state_dict` on all subclasses of `nn.Module` to be
no-ops.
.. warning:
Loaded models are still executable as normal, but will always have random weights. Make sure to not use this
fixture if you want to compare the model output against reference values.
"""
starting_point = models
function_name = "load_state_dict_from_url"
method_name = "load_state_dict"
module_names = {info.name for info in pkgutil.walk_packages(starting_point.__path__, f"{starting_point.__name__}.")}
targets = {f"torchvision._internally_replaced_utils.{function_name}", f"torch.nn.Module.{method_name}"}
for name in module_names:
module = sys.modules.get(name)
if not module:
continue
if function_name in module.__dict__:
targets.add(f"{module.__name__}.{function_name}")
targets.update(
{
f"{module.__name__}.{obj.__name__}.{method_name}"
for obj in module.__dict__.values()
if isinstance(obj, type) and issubclass(obj, nn.Module) and method_name in obj.__dict__
}
)
for target in targets:
# See https://github.com/pytorch/vision/pull/4867#discussion_r743677802 for details
with contextlib.suppress(AttributeError):
mocker.patch(target)
def _get_expected_file(name=None):
# Determine expected file based on environment
expected_file_base = get_relative_path(os.path.realpath(__file__), "expect")
# Note: for legacy reasons, the reference file names all had "ModelTest.test_" in their names
# We hardcode it here to avoid having to re-generate the reference files
expected_file = os.path.join(expected_file_base, "ModelTester.test_" + name)
expected_file += "_expect.pkl"
if not ACCEPT and not os.path.exists(expected_file):
raise RuntimeError(
f"No expect file exists for {os.path.basename(expected_file)} in {expected_file}; "
"to accept the current output, re-run the failing test after setting the EXPECTTEST_ACCEPT "
"env variable. For example: EXPECTTEST_ACCEPT=1 pytest test/test_models.py -k alexnet"
)
return expected_file
def _assert_expected(output, name, prec=None, atol=None, rtol=None):
"""Test that a python value matches the recorded contents of a file
based on a "check" name. The value must be
pickable with `torch.save`. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using an EXPECTTEST_ACCEPT=1 env variable.
"""
expected_file = _get_expected_file(name)
if ACCEPT:
filename = {os.path.basename(expected_file)}
print(f"Accepting updated output for {filename}:\n\n{output}")
torch.save(output, expected_file)
MAX_PICKLE_SIZE = 50 * 1000 # 50 KB
binary_size = os.path.getsize(expected_file)
if binary_size > MAX_PICKLE_SIZE:
raise RuntimeError(f"The output for {filename}, is larger than 50kb - got {binary_size}kb")
else:
expected = torch.load(expected_file)
rtol = rtol or prec # keeping prec param for legacy reason, but could be removed ideally
atol = atol or prec
torch.testing.assert_close(output, expected, rtol=rtol, atol=atol, check_dtype=False, check_device=False)
def _check_jit_scriptable(nn_module, args, unwrapper=None, eager_out=None):
"""Check that a nn.Module's results in TorchScript match eager and that it can be exported"""
def get_export_import_copy(m):
"""Save and load a TorchScript model"""
with TemporaryDirectory() as dir:
path = os.path.join(dir, "script.pt")
m.save(path)
imported = torch.jit.load(path)
return imported
sm = torch.jit.script(nn_module)
sm.eval()
if eager_out is None:
with torch.no_grad(), freeze_rng_state():
eager_out = nn_module(*args)
with torch.no_grad(), freeze_rng_state():
script_out = sm(*args)
if unwrapper:
script_out = unwrapper(script_out)
torch.testing.assert_close(eager_out, script_out, atol=1e-4, rtol=1e-4)
m_import = get_export_import_copy(sm)
with torch.no_grad(), freeze_rng_state():
imported_script_out = m_import(*args)
if unwrapper:
imported_script_out = unwrapper(imported_script_out)
torch.testing.assert_close(script_out, imported_script_out, atol=3e-4, rtol=3e-4)
def _check_fx_compatible(model, inputs, eager_out=None):
model_fx = torch.fx.symbolic_trace(model)
if eager_out is None:
eager_out = model(inputs)
with torch.no_grad(), freeze_rng_state():
fx_out = model_fx(inputs)
torch.testing.assert_close(eager_out, fx_out)
def _check_input_backprop(model, inputs):
if isinstance(inputs, list):
requires_grad = list()
for inp in inputs:
requires_grad.append(inp.requires_grad)
inp.requires_grad_(True)
else:
requires_grad = inputs.requires_grad
inputs.requires_grad_(True)
out = model(inputs)
if isinstance(out, dict):
out["out"].sum().backward()
else:
if isinstance(out[0], dict):
out[0]["scores"].sum().backward()
else:
out[0].sum().backward()
if isinstance(inputs, list):
for i, inp in enumerate(inputs):
assert inputs[i].grad is not None
inp.requires_grad_(requires_grad[i])
else:
assert inputs.grad is not None
inputs.requires_grad_(requires_grad)
# If 'unwrapper' is provided it will be called with the script model outputs
# before they are compared to the eager model outputs. This is useful if the
# model outputs are different between TorchScript / Eager mode
script_model_unwrapper = {
"googlenet": lambda x: x.logits,
"inception_v3": lambda x: x.logits,
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_resnet50_fpn_v2": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn_v2": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn_v2": lambda x: x[1],
"ssd300_vgg16": lambda x: x[1],
"ssdlite320_mobilenet_v3_large": lambda x: x[1],
"fcos_resnet50_fpn": lambda x: x[1],
}
# The following models exhibit flaky numerics under autocast in _test_*_model harnesses.
# This may be caused by the harness environment (e.g. num classes, input initialization
# via torch.rand), and does not prove autocast is unsuitable when training with real data
# (autocast has been used successfully with real data for some of these models).
# TODO: investigate why autocast numerics are flaky in the harnesses.
#
# For the following models, _test_*_model harnesses skip numerical checks on outputs when
# trying autocast. However, they still try an autocasted forward pass, so they still ensure
# autocast coverage suffices to prevent dtype errors in each model.
autocast_flaky_numerics = (
"inception_v3",
"resnet101",
"resnet152",
"wide_resnet101_2",
"deeplabv3_resnet50",
"deeplabv3_resnet101",
"deeplabv3_mobilenet_v3_large",
"fcn_resnet50",
"fcn_resnet101",
"lraspp_mobilenet_v3_large",
"maskrcnn_resnet50_fpn",
"maskrcnn_resnet50_fpn_v2",
"keypointrcnn_resnet50_fpn",
)
# The tests for the following quantized models are flaky possibly due to inconsistent
# rounding errors in different platforms. For this reason the input/output consistency
# tests under test_quantized_classification_model will be skipped for the following models.
quantized_flaky_models = ("inception_v3", "resnet50")
# The tests for the following detection models are flaky.
# We run those tests on float64 to avoid floating point errors.
# FIXME: we shouldn't have to do that :'/
detection_flaky_models = ("keypointrcnn_resnet50_fpn", "maskrcnn_resnet50_fpn", "maskrcnn_resnet50_fpn_v2")
# The following contains configuration parameters for all models which are used by
# the _test_*_model methods.
_model_params = {
"inception_v3": {"input_shape": (1, 3, 299, 299), "init_weights": True},
"retinanet_resnet50_fpn": {
"num_classes": 20,
"score_thresh": 0.01,
"min_size": 224,
"max_size": 224,
"input_shape": (3, 224, 224),
"real_image": True,
},
"retinanet_resnet50_fpn_v2": {
"num_classes": 20,
"score_thresh": 0.01,
"min_size": 224,
"max_size": 224,
"input_shape": (3, 224, 224),
"real_image": True,
},
"keypointrcnn_resnet50_fpn": {
"num_classes": 2,
"min_size": 224,
"max_size": 224,
"box_score_thresh": 0.17,
"input_shape": (3, 224, 224),
"real_image": True,
},
"fasterrcnn_resnet50_fpn": {
"num_classes": 20,
"min_size": 224,
"max_size": 224,
"input_shape": (3, 224, 224),
"real_image": True,
},
"fasterrcnn_resnet50_fpn_v2": {
"num_classes": 20,
"min_size": 224,
"max_size": 224,
"input_shape": (3, 224, 224),
"real_image": True,
},
"fcos_resnet50_fpn": {
"num_classes": 2,
"score_thresh": 0.05,
"min_size": 224,
"max_size": 224,
"input_shape": (3, 224, 224),
"real_image": True,
},
"maskrcnn_resnet50_fpn": {
"num_classes": 10,
"min_size": 224,
"max_size": 224,
"input_shape": (3, 224, 224),
"real_image": True,
},
"maskrcnn_resnet50_fpn_v2": {
"num_classes": 10,
"min_size": 224,
"max_size": 224,
"input_shape": (3, 224, 224),
"real_image": True,
},
"fasterrcnn_mobilenet_v3_large_fpn": {
"box_score_thresh": 0.02076,
},
"fasterrcnn_mobilenet_v3_large_320_fpn": {
"box_score_thresh": 0.02076,
"rpn_pre_nms_top_n_test": 1000,
"rpn_post_nms_top_n_test": 1000,
},
"vit_h_14": {
"image_size": 56,
"input_shape": (1, 3, 56, 56),
},
"mvit_v1_b": {
"input_shape": (1, 3, 16, 224, 224),
},
"mvit_v2_s": {
"input_shape": (1, 3, 16, 224, 224),
},
"s3d": {
"input_shape": (1, 3, 16, 224, 224),
},
"googlenet": {"init_weights": True},
}
# speeding up slow models:
slow_models = [
"convnext_base",
"convnext_large",
"resnext101_32x8d",
"resnext101_64x4d",
"wide_resnet101_2",
"efficientnet_b6",
"efficientnet_b7",
"efficientnet_v2_m",
"efficientnet_v2_l",
"regnet_y_16gf",
"regnet_y_32gf",
"regnet_y_128gf",
"regnet_x_16gf",
"regnet_x_32gf",
"swin_t",
"swin_s",
"swin_b",
"swin_v2_t",
"swin_v2_s",
"swin_v2_b",
]
for m in slow_models:
_model_params[m] = {"input_shape": (1, 3, 64, 64)}
# skip big models to reduce memory usage on CI test. We can exclude combinations of (platform-system, device).
skipped_big_models = {
"vit_h_14": {("Windows", "cpu"), ("Windows", "cuda")},
"regnet_y_128gf": {("Windows", "cpu"), ("Windows", "cuda")},
"mvit_v1_b": {("Windows", "cuda"), ("Linux", "cuda")},
"mvit_v2_s": {("Windows", "cuda"), ("Linux", "cuda")},
}
def is_skippable(model_name, device):
if model_name not in skipped_big_models:
return False
platform_system = platform.system()
device_name = str(device).split(":")[0]
return (platform_system, device_name) in skipped_big_models[model_name]
# The following contains configuration and expected values to be used tests that are model specific
_model_tests_values = {
"retinanet_resnet50_fpn": {
"max_trainable": 5,
"n_trn_params_per_layer": [36, 46, 65, 78, 88, 89],
},
"retinanet_resnet50_fpn_v2": {
"max_trainable": 5,
"n_trn_params_per_layer": [44, 74, 131, 170, 200, 203],
},
"keypointrcnn_resnet50_fpn": {
"max_trainable": 5,
"n_trn_params_per_layer": [48, 58, 77, 90, 100, 101],
},
"fasterrcnn_resnet50_fpn": {
"max_trainable": 5,
"n_trn_params_per_layer": [30, 40, 59, 72, 82, 83],
},
"fasterrcnn_resnet50_fpn_v2": {
"max_trainable": 5,
"n_trn_params_per_layer": [50, 80, 137, 176, 206, 209],
},
"maskrcnn_resnet50_fpn": {
"max_trainable": 5,
"n_trn_params_per_layer": [42, 52, 71, 84, 94, 95],
},
"maskrcnn_resnet50_fpn_v2": {
"max_trainable": 5,
"n_trn_params_per_layer": [66, 96, 153, 192, 222, 225],
},
"fasterrcnn_mobilenet_v3_large_fpn": {
"max_trainable": 6,
"n_trn_params_per_layer": [22, 23, 44, 70, 91, 97, 100],
},
"fasterrcnn_mobilenet_v3_large_320_fpn": {
"max_trainable": 6,
"n_trn_params_per_layer": [22, 23, 44, 70, 91, 97, 100],
},
"ssd300_vgg16": {
"max_trainable": 5,
"n_trn_params_per_layer": [45, 51, 57, 63, 67, 71],
},
"ssdlite320_mobilenet_v3_large": {
"max_trainable": 6,
"n_trn_params_per_layer": [96, 99, 138, 200, 239, 257, 266],
},
"fcos_resnet50_fpn": {
"max_trainable": 5,
"n_trn_params_per_layer": [54, 64, 83, 96, 106, 107],
},
}
def _make_sliced_model(model, stop_layer):
layers = OrderedDict()
for name, layer in model.named_children():
layers[name] = layer
if name == stop_layer:
break
new_model = torch.nn.Sequential(layers)
return new_model
@pytest.mark.parametrize("model_fn", [models.densenet121, models.densenet169, models.densenet201, models.densenet161])
def test_memory_efficient_densenet(model_fn):
input_shape = (1, 3, 300, 300)
x = torch.rand(input_shape)
model1 = model_fn(num_classes=50, memory_efficient=True)
params = model1.state_dict()
num_params = sum(x.numel() for x in model1.parameters())
model1.eval()
out1 = model1(x)
out1.sum().backward()
num_grad = sum(x.grad.numel() for x in model1.parameters() if x.grad is not None)
model2 = model_fn(num_classes=50, memory_efficient=False)
model2.load_state_dict(params)
model2.eval()
out2 = model2(x)
assert num_params == num_grad
torch.testing.assert_close(out1, out2, rtol=0.0, atol=1e-5)
_check_input_backprop(model1, x)
_check_input_backprop(model2, x)
@pytest.mark.parametrize("dilate_layer_2", (True, False))
@pytest.mark.parametrize("dilate_layer_3", (True, False))
@pytest.mark.parametrize("dilate_layer_4", (True, False))
def test_resnet_dilation(dilate_layer_2, dilate_layer_3, dilate_layer_4):
# TODO improve tests to also check that each layer has the right dimensionality
model = models.resnet50(replace_stride_with_dilation=(dilate_layer_2, dilate_layer_3, dilate_layer_4))
model = _make_sliced_model(model, stop_layer="layer4")
model.eval()
x = torch.rand(1, 3, 224, 224)
out = model(x)
f = 2 ** sum((dilate_layer_2, dilate_layer_3, dilate_layer_4))
assert out.shape == (1, 2048, 7 * f, 7 * f)
def test_mobilenet_v2_residual_setting():
model = models.mobilenet_v2(inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]])
model.eval()
x = torch.rand(1, 3, 224, 224)
out = model(x)
assert out.shape[-1] == 1000
@pytest.mark.parametrize("model_fn", [models.mobilenet_v2, models.mobilenet_v3_large, models.mobilenet_v3_small])
def test_mobilenet_norm_layer(model_fn):
model = model_fn()
assert any(isinstance(x, nn.BatchNorm2d) for x in model.modules())
def get_gn(num_channels):
return nn.GroupNorm(1, num_channels)
model = model_fn(norm_layer=get_gn)
assert not (any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))
assert any(isinstance(x, nn.GroupNorm) for x in model.modules())
def test_inception_v3_eval():
kwargs = {}
kwargs["transform_input"] = True
kwargs["aux_logits"] = True
kwargs["init_weights"] = False
name = "inception_v3"
model = models.Inception3(**kwargs)
model.aux_logits = False
model.AuxLogits = None
model = model.eval()
x = torch.rand(1, 3, 299, 299)
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(name, None))
_check_input_backprop(model, x)
def test_fasterrcnn_double():
model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, weights=None, weights_backbone=None)
model.double()
model.eval()
input_shape = (3, 300, 300)
x = torch.rand(input_shape, dtype=torch.float64)
model_input = [x]
out = model(model_input)
assert model_input[0] is x
assert len(out) == 1
assert "boxes" in out[0]
assert "scores" in out[0]
assert "labels" in out[0]
_check_input_backprop(model, model_input)
def test_googlenet_eval():
kwargs = {}
kwargs["transform_input"] = True
kwargs["aux_logits"] = True
kwargs["init_weights"] = False
name = "googlenet"
model = models.GoogLeNet(**kwargs)
model.aux_logits = False
model.aux1 = None
model.aux2 = None
model = model.eval()
x = torch.rand(1, 3, 224, 224)
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(name, None))
_check_input_backprop(model, x)
@needs_cuda
def test_fasterrcnn_switch_devices():
def checkOut(out):
assert len(out) == 1
assert "boxes" in out[0]
assert "scores" in out[0]
assert "labels" in out[0]
model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, weights=None, weights_backbone=None)
model.cuda()
model.eval()
input_shape = (3, 300, 300)
x = torch.rand(input_shape, device="cuda")
model_input = [x]
out = model(model_input)
assert model_input[0] is x
checkOut(out)
with torch.cuda.amp.autocast():
out = model(model_input)
checkOut(out)
_check_input_backprop(model, model_input)
# now switch to cpu and make sure it works
model.cpu()
x = x.cpu()
out_cpu = model([x])
checkOut(out_cpu)
_check_input_backprop(model, [x])
def test_generalizedrcnn_transform_repr():
min_size, max_size = 224, 299
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
t = models.detection.transform.GeneralizedRCNNTransform(
min_size=min_size, max_size=max_size, image_mean=image_mean, image_std=image_std
)
# Check integrity of object __repr__ attribute
expected_string = "GeneralizedRCNNTransform("
_indent = "\n "
expected_string += f"{_indent}Normalize(mean={image_mean}, std={image_std})"
expected_string += f"{_indent}Resize(min_size=({min_size},), max_size={max_size}, "
expected_string += "mode='bilinear')\n)"
assert t.__repr__() == expected_string
test_vit_conv_stem_configs = [
models.vision_transformer.ConvStemConfig(kernel_size=3, stride=2, out_channels=64),
models.vision_transformer.ConvStemConfig(kernel_size=3, stride=2, out_channels=128),
models.vision_transformer.ConvStemConfig(kernel_size=3, stride=1, out_channels=128),
models.vision_transformer.ConvStemConfig(kernel_size=3, stride=2, out_channels=256),
models.vision_transformer.ConvStemConfig(kernel_size=3, stride=1, out_channels=256),
models.vision_transformer.ConvStemConfig(kernel_size=3, stride=2, out_channels=512),
]
def vitc_b_16(**kwargs: Any):
return models.VisionTransformer(
image_size=224,
patch_size=16,
num_layers=12,
num_heads=12,
hidden_dim=768,
mlp_dim=3072,
conv_stem_configs=test_vit_conv_stem_configs,
**kwargs,
)
@pytest.mark.parametrize("model_fn", [vitc_b_16])
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_vitc_models(model_fn, dev):
test_classification_model(model_fn, dev)
@disable_tf32() # see: https://github.com/pytorch/vision/issues/7618
@pytest.mark.parametrize("model_fn", list_model_fns(models))
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_classification_model(model_fn, dev):
set_rng_seed(0)
defaults = {
"num_classes": 50,
"input_shape": (1, 3, 224, 224),
}
model_name = model_fn.__name__
if SKIP_BIG_MODEL and is_skippable(model_name, dev):
pytest.skip("Skipped to reduce memory usage. Set env var SKIP_BIG_MODEL=0 to enable test for this model")
kwargs = {**defaults, **_model_params.get(model_name, {})}
num_classes = kwargs.get("num_classes")
input_shape = kwargs.pop("input_shape")
real_image = kwargs.pop("real_image", False)
model = model_fn(**kwargs)
model.eval().to(device=dev)
x = _get_image(input_shape=input_shape, real_image=real_image, device=dev)
out = model(x)
# FIXME: this if/else is nasty and only here to please our CI prior to the
# release. We rethink these tests altogether.
if model_name == "resnet101":
prec = 0.2
else:
# FIXME: this is probably still way too high.
prec = 0.1
_assert_expected(out.cpu(), model_name, prec=prec)
assert out.shape[-1] == num_classes
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)
_check_fx_compatible(model, x, eager_out=out)
if dev == "cuda":
with torch.cuda.amp.autocast():
out = model(x)
# See autocast_flaky_numerics comment at top of file.
if model_name not in autocast_flaky_numerics:
_assert_expected(out.cpu(), model_name, prec=0.1)
assert out.shape[-1] == 50
_check_input_backprop(model, x)
@pytest.mark.parametrize("model_fn", list_model_fns(models.segmentation))
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_segmentation_model(model_fn, dev):
set_rng_seed(0)
defaults = {
"num_classes": 10,
"weights_backbone": None,
"input_shape": (1, 3, 32, 32),
}
model_name = model_fn.__name__
kwargs = {**defaults, **_model_params.get(model_name, {})}
input_shape = kwargs.pop("input_shape")
model = model_fn(**kwargs)
model.eval().to(device=dev)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
x = torch.rand(input_shape).to(device=dev)
with torch.no_grad(), freeze_rng_state():
out = model(x)
def check_out(out):
prec = 0.01
try:
# We first try to assert the entire output if possible. This is not
# only the best way to assert results but also handles the cases
# where we need to create a new expected result.
_assert_expected(out.cpu(), model_name, prec=prec)
except AssertionError:
# Unfortunately some segmentation models are flaky with autocast
# so instead of validating the probability scores, check that the class
# predictions match.
expected_file = _get_expected_file(model_name)
expected = torch.load(expected_file)
torch.testing.assert_close(
out.argmax(dim=1), expected.argmax(dim=1), rtol=prec, atol=prec, check_device=False
)
return False # Partial validation performed
return True # Full validation performed
full_validation = check_out(out["out"])
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)
_check_fx_compatible(model, x, eager_out=out)
if dev == "cuda":
with torch.cuda.amp.autocast(), torch.no_grad(), freeze_rng_state():
out = model(x)
# See autocast_flaky_numerics comment at top of file.
if model_name not in autocast_flaky_numerics:
full_validation &= check_out(out["out"])
if not full_validation:
msg = (
f"The output of {test_segmentation_model.__name__} could only be partially validated. "
"This is likely due to unit-test flakiness, but you may "
"want to do additional manual checks if you made "
"significant changes to the codebase."
)
warnings.warn(msg, RuntimeWarning)
pytest.skip(msg)
_check_input_backprop(model, x)
@pytest.mark.parametrize("model_fn", list_model_fns(models.detection))
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_detection_model(model_fn, dev):
set_rng_seed(0)
defaults = {
"num_classes": 50,
"weights_backbone": None,
"input_shape": (3, 300, 300),
}
model_name = model_fn.__name__
if model_name in detection_flaky_models:
dtype = torch.float64
else:
dtype = torch.get_default_dtype()
kwargs = {**defaults, **_model_params.get(model_name, {})}
input_shape = kwargs.pop("input_shape")
real_image = kwargs.pop("real_image", False)
model = model_fn(**kwargs)
model.eval().to(device=dev, dtype=dtype)
x = _get_image(input_shape=input_shape, real_image=real_image, device=dev, dtype=dtype)
model_input = [x]
with torch.no_grad(), freeze_rng_state():
out = model(model_input)
assert model_input[0] is x
def check_out(out):
assert len(out) == 1
def compact(tensor):
tensor = tensor.cpu()
size = tensor.size()
elements_per_sample = functools.reduce(operator.mul, size[1:], 1)
if elements_per_sample > 30:
return compute_mean_std(tensor)
else:
return subsample_tensor(tensor)
def subsample_tensor(tensor):
num_elems = tensor.size(0)
num_samples = 20
if num_elems <= num_samples:
return tensor
ith_index = num_elems // num_samples
return tensor[ith_index - 1 :: ith_index]
def compute_mean_std(tensor):
# can't compute mean of integral tensor
tensor = tensor.to(torch.double)
mean = torch.mean(tensor)
std = torch.std(tensor)
return {"mean": mean, "std": std}
output = map_nested_tensor_object(out, tensor_map_fn=compact)
prec = 0.01
try:
# We first try to assert the entire output if possible. This is not
# only the best way to assert results but also handles the cases
# where we need to create a new expected result.
_assert_expected(output, model_name, prec=prec)
except AssertionError:
# Unfortunately detection models are flaky due to the unstable sort
# in NMS. If matching across all outputs fails, use the same approach
# as in NMSTester.test_nms_cuda to see if this is caused by duplicate
# scores.
expected_file = _get_expected_file(model_name)
expected = torch.load(expected_file)
torch.testing.assert_close(
output[0]["scores"], expected[0]["scores"], rtol=prec, atol=prec, check_device=False, check_dtype=False
)
# Note: Fmassa proposed turning off NMS by adapting the threshold
# and then using the Hungarian algorithm as in DETR to find the
# best match between output and expected boxes and eliminate some
# of the flakiness. Worth exploring.
return False # Partial validation performed
return True # Full validation performed
full_validation = check_out(out)
_check_jit_scriptable(model, ([x],), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)
if dev == "cuda":
with torch.cuda.amp.autocast(), torch.no_grad(), freeze_rng_state():
out = model(model_input)
# See autocast_flaky_numerics comment at top of file.
if model_name not in autocast_flaky_numerics:
full_validation &= check_out(out)
if not full_validation:
msg = (
f"The output of {test_detection_model.__name__} could only be partially validated. "
"This is likely due to unit-test flakiness, but you may "
"want to do additional manual checks if you made "
"significant changes to the codebase."
)
warnings.warn(msg, RuntimeWarning)
pytest.skip(msg)
_check_input_backprop(model, model_input)
@pytest.mark.parametrize("model_fn", list_model_fns(models.detection))
def test_detection_model_validation(model_fn):
set_rng_seed(0)
model = model_fn(num_classes=50, weights=None, weights_backbone=None)
input_shape = (3, 300, 300)
x = [torch.rand(input_shape)]
# validate that targets are present in training
with pytest.raises(AssertionError):
model(x)
# validate type
targets = [{"boxes": 0.0}]
with pytest.raises(AssertionError):
model(x, targets=targets)
# validate boxes shape
for boxes in (torch.rand((4,)), torch.rand((1, 5))):
targets = [{"boxes": boxes}]
with pytest.raises(AssertionError):
model(x, targets=targets)
# validate that no degenerate boxes are present
boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]])
targets = [{"boxes": boxes}]
with pytest.raises(AssertionError):
model(x, targets=targets)
@pytest.mark.parametrize("model_fn", list_model_fns(models.video))
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_video_model(model_fn, dev):
set_rng_seed(0)
# the default input shape is
# bs * num_channels * clip_len * h *w
defaults = {
"input_shape": (1, 3, 4, 112, 112),
"num_classes": 50,
}
model_name = model_fn.__name__
if SKIP_BIG_MODEL and is_skippable(model_name, dev):
pytest.skip("Skipped to reduce memory usage. Set env var SKIP_BIG_MODEL=0 to enable test for this model")
kwargs = {**defaults, **_model_params.get(model_name, {})}
num_classes = kwargs.get("num_classes")
input_shape = kwargs.pop("input_shape")
# test both basicblock and Bottleneck
model = model_fn(**kwargs)
model.eval().to(device=dev)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
x = torch.rand(input_shape).to(device=dev)
out = model(x)
_assert_expected(out.cpu(), model_name, prec=0.1)
assert out.shape[-1] == num_classes
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)
_check_fx_compatible(model, x, eager_out=out)
assert out.shape[-1] == num_classes
if dev == "cuda":
with torch.cuda.amp.autocast():
out = model(x)
# See autocast_flaky_numerics comment at top of file.
if model_name not in autocast_flaky_numerics:
_assert_expected(out.cpu(), model_name, prec=0.1)
assert out.shape[-1] == num_classes
_check_input_backprop(model, x)
@pytest.mark.skipif(
not (
"fbgemm" in torch.backends.quantized.supported_engines
and "qnnpack" in torch.backends.quantized.supported_engines
),
reason="This Pytorch Build has not been built with fbgemm and qnnpack",
)
@pytest.mark.parametrize("model_fn", list_model_fns(models.quantization))
def test_quantized_classification_model(model_fn):
set_rng_seed(0)
defaults = {
"num_classes": 5,
"input_shape": (1, 3, 224, 224),
"quantize": True,
}
model_name = model_fn.__name__
kwargs = {**defaults, **_model_params.get(model_name, {})}
input_shape = kwargs.pop("input_shape")
# First check if quantize=True provides models that can run with input data
model = model_fn(**kwargs)
model.eval()
x = torch.rand(input_shape)
out = model(x)
if model_name not in quantized_flaky_models:
_assert_expected(out.cpu(), model_name + "_quantized", prec=2e-2)
assert out.shape[-1] == 5
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)
_check_fx_compatible(model, x, eager_out=out)
else:
try:
torch.jit.script(model)
except Exception as e:
raise AssertionError("model cannot be scripted.") from e
kwargs["quantize"] = False
for eval_mode in [True, False]:
model = model_fn(**kwargs)
if eval_mode:
model.eval()
model.qconfig = torch.ao.quantization.default_qconfig
else:
model.train()
model.qconfig = torch.ao.quantization.default_qat_qconfig
model.fuse_model(is_qat=not eval_mode)
if eval_mode:
torch.ao.quantization.prepare(model, inplace=True)
else:
torch.ao.quantization.prepare_qat(model, inplace=True)
model.eval()
torch.ao.quantization.convert(model, inplace=True)
@pytest.mark.parametrize("model_fn", list_model_fns(models.detection))
def test_detection_model_trainable_backbone_layers(model_fn, disable_weight_loading):
model_name = model_fn.__name__
max_trainable = _model_tests_values[model_name]["max_trainable"]
n_trainable_params = []
for trainable_layers in range(0, max_trainable + 1):
model = model_fn(weights=None, weights_backbone="DEFAULT", trainable_backbone_layers=trainable_layers)
n_trainable_params.append(len([p for p in model.parameters() if p.requires_grad]))
assert n_trainable_params == _model_tests_values[model_name]["n_trn_params_per_layer"]
@needs_cuda
@pytest.mark.parametrize("model_fn", list_model_fns(models.optical_flow))
@pytest.mark.parametrize("scripted", (False, True))
def test_raft(model_fn, scripted):
torch.manual_seed(0)
# We need very small images, otherwise the pickle size would exceed the 50KB
# As a result we need to override the correlation pyramid to not downsample
# too much, otherwise we would get nan values (effective H and W would be
# reduced to 1)
corr_block = models.optical_flow.raft.CorrBlock(num_levels=2, radius=2)
model = model_fn(corr_block=corr_block).eval().to("cuda")
if scripted:
model = torch.jit.script(model)
bs = 1
img1 = torch.rand(bs, 3, 80, 72).cuda()
img2 = torch.rand(bs, 3, 80, 72).cuda()
preds = model(img1, img2)
flow_pred = preds[-1]
# Tolerance is fairly high, but there are 2 * H * W outputs to check
# The .pkl were generated on the AWS cluter, on the CI it looks like the results are slightly different
_assert_expected(flow_pred.cpu(), name=model_fn.__name__, atol=1e-2, rtol=1)
def test_presets_antialias():
img = torch.randint(0, 256, size=(1, 3, 224, 224), dtype=torch.uint8)
match = "The default value of the antialias parameter"
with pytest.warns(UserWarning, match=match):
models.ResNet18_Weights.DEFAULT.transforms()(img)
with pytest.warns(UserWarning, match=match):
models.segmentation.DeepLabV3_ResNet50_Weights.DEFAULT.transforms()(img)
with warnings.catch_warnings():
warnings.simplefilter("error")
models.ResNet18_Weights.DEFAULT.transforms(antialias=True)(img)
models.segmentation.DeepLabV3_ResNet50_Weights.DEFAULT.transforms(antialias=True)(img)
models.detection.FasterRCNN_ResNet50_FPN_Weights.DEFAULT.transforms()(img)
models.video.R3D_18_Weights.DEFAULT.transforms()(img)
models.optical_flow.Raft_Small_Weights.DEFAULT.transforms()(img, img)
if __name__ == "__main__":
pytest.main([__file__])
|
from classifier import NaiveBayseClassifier
from data_helper import load_data
from utils import printResult
import numpy as np
import argparse
"""
This program demonstrate the process of naive bayes classifier
The usage of my scratch is just like sklearn
Moreover, the style to load the data is just like keras
Author : SunnerLi
Date : 2017/10/04
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=int, default=1, dest='mode', help='discrete mode is 0, contineous mode is 1')
args = parser.parse_args()
print('load data...')
(train_x, train_y), (test_x, test_y) = load_data()
clf = NaiveBayseClassifier(mode=args.mode)
print('train...')
clf.fit(train_x, train_y)
print('predict...')
res = clf.predict(test_x)
printResult(res, test_y)
|
from django.shortcuts import redirect
from django.views.generic import CreateView
from todo.models import *
class TaskCreateView(CreateView):
template_name = "task/form.html"
model = Task
success_url = '/story'
fields = ('body', 'end', 'status',)
def form_valid(self, form):
fk = self.kwargs.get('fk')
story = Story.objects.get(pk = fk)
self.object = form.save(commit = False)
self.object.story = story
self.object.save()
return redirect(self.success_url)
|
from enum import Enum
from math import floor
from tkinter import messagebox
import random
import pygame
class Maze:
ARENA_HEIGHT = 10
ARENA_WIDTH = 10
ARENA_SIZE = ARENA_WIDTH * ARENA_HEIGHT
class Node:
def __init__(self):
self.visited = False
self.canGoRight = False
self.canGoDown = False
def __init__(self):
self.nodes = [self.Node() for i in range(floor(self.ARENA_SIZE / 4))]
self.tourToNumber = [None] * self.ARENA_SIZE
def getPathNumber(self, x, y):
return self.tourToNumber[floor(x + self.ARENA_WIDTH * y)]
def markVisited(self, x, y):
self.nodes[floor(x + y * self.ARENA_WIDTH / 2)].visited = True
def markCanGoRight(self, x, y):
self.nodes[floor(x + y * self.ARENA_WIDTH / 2)].canGoRight = True
def markCanGoDown(self, x, y):
self.nodes[floor(x + y * self.ARENA_WIDTH / 2)].canGoDown = True
def canGoRight(self, x, y):
return self.nodes[floor(x + y * self.ARENA_WIDTH / 2)].canGoRight
def canGoDown(self, x, y):
return self.nodes[floor(x + y * self.ARENA_WIDTH / 2)].canGoDown
def canGoLeft(self, x, y):
if x == 0:
return False
return self.nodes[floor((x - 1) + y * self.ARENA_WIDTH / 2)].canGoRight
def canGoUp(self, x, y):
if (y == 0):
return False
return self.nodes[floor(x + (y - 1) * self.ARENA_WIDTH / 2)].canGoDown
def isVisited(self, x, y):
return self.nodes[floor(x + y * self.ARENA_WIDTH / 2)].visited
def calc_dist(self, a, b):
if (a > b):
dist = a - b - 1
else:
dist = a - b - 1 + self.ARENA_SIZE
return dist
def generate(self):
self.generate_r(-1, -1, 0, 0)
self.generateTourNumber()
def generate_r(self, fromx, fromy, x, y):
if (x < 0 or y < 0 or x >= self.ARENA_WIDTH / 2 or y >= self.ARENA_HEIGHT / 2):
return
if (self.isVisited(x, y)):
return
self.markVisited(x, y)
if (fromx != -1):
if (fromx < x):
self.markCanGoRight(fromx, fromy)
elif (fromx > x):
self.markCanGoRight(x, y)
elif (fromy < y):
self.markCanGoDown(fromx, fromy)
elif (fromy > y):
self.markCanGoDown(x, y)
for i in range(2):
r = random.randint(0, 3)
if r == 0:
self.generate_r(x, y, x - 1, y)
elif r == 1:
self.generate_r(x, y, x + 1, y)
elif r == 2:
self.generate_r(x, y, x, y - 1)
elif r == 3:
self.generate_r(x, y, x, y + 1)
self.generate_r(x, y, x - 1, y)
self.generate_r(x, y, x + 1, y)
self.generate_r(x, y, x, y + 1)
self.generate_r(x, y, x, y - 1)
def findNextDir(self, x, y, dir):
if (dir == Directions.right):
if (self.canGoUp(x, y)):
return Directions.up
if (self.canGoRight(x, y)):
return Directions.right
if (self.canGoDown(x, y)):
return Directions.down
return Directions.left
elif (dir == Directions.down):
if (self.canGoRight(x, y)):
return Directions.right
if (self.canGoDown(x, y)):
return Directions.down
if (self.canGoLeft(x, y)):
return Directions.left
return Directions.up
elif (dir == Directions.left):
if (self.canGoDown(x, y)):
return Directions.down
if (self.canGoLeft(x, y)):
return Directions.left
if (self.canGoUp(x, y)):
return Directions.up
return Directions.right
elif (dir == Directions.up):
if (self.canGoLeft(x, y)):
return Directions.left
if (self.canGoUp(x, y)):
return Directions.up
if (self.canGoRight(x, y)):
return Directions.right
return Directions.down
return Directions.none
def setTourNumber(self, x, y, number):
if (not self.getPathNumber(x, y) == None):
return
self.tourToNumber[x + self.ARENA_WIDTH * y] = number
def debug_print_maze_path(self):
for y in range(self.ARENA_HEIGHT):
a = ""
for x in range(self.ARENA_WIDTH):
if self.getPathNumber(x, y) > 10:
a = a + " " + str(self.getPathNumber(x, y))
else:
a = a + " " + str(self.getPathNumber(x, y)) + " "
print(a + "\n")
def generateTourNumber(self):
start_x = 0
start_y = 0
x = start_x
y = start_y
start_dir = Directions.up if self.canGoDown(x, y) else Directions.left
dir = start_dir
number = 0
while (number != self.ARENA_SIZE):
nextDir = self.findNextDir(x, y, dir)
if dir == Directions.right:
self.setTourNumber(x * 2, y * 2, number)
number = number + 1
if (nextDir == dir or nextDir == Directions.down or nextDir == Directions.left):
self.setTourNumber(x * 2 + 1, y * 2, number)
number = number + 1
if (nextDir == Directions.down or nextDir == Directions.left):
self.setTourNumber(x * 2 + 1, y * 2 + 1, number)
number = number + 1
if (nextDir == Directions.left):
self.setTourNumber(x * 2, y * 2 + 1, number)
number = number + 1
elif dir == Directions.down:
self.setTourNumber(x * 2 + 1, y * 2, number)
number = number + 1
if nextDir == dir or nextDir == Directions.left or nextDir == Directions.up:
self.setTourNumber(x * 2 + 1, y * 2 + 1, number)
number = number + 1
if nextDir == Directions.left or nextDir == Directions.up:
self.setTourNumber(x * 2, y * 2 + 1, number)
number = number + 1
if (nextDir == Directions.up):
self.setTourNumber(x * 2, y * 2, number)
number = number + 1
elif dir == Directions.left:
self.setTourNumber(x * 2 + 1, y * 2 + 1, number)
number = number + 1
if (nextDir == dir or nextDir == Directions.up or nextDir == Directions.right):
self.setTourNumber(x * 2, y * 2 + 1, number)
number = number + 1
if (nextDir == Directions.up or nextDir == Directions.right):
self.setTourNumber(x * 2, y * 2, number)
number = number + 1
if (nextDir == Directions.right):
self.setTourNumber(x * 2 + 1, y * 2, number)
number = number + 1
elif dir == Directions.up:
self.setTourNumber(x * 2, y * 2 + 1, number)
number = number + 1
if (nextDir == dir or nextDir == Directions.right or nextDir == Directions.down):
self.setTourNumber(x * 2, y * 2, number)
number = number + 1
if (nextDir == Directions.right or nextDir == Directions.down):
self.setTourNumber(x * 2 + 1, y * 2, number)
number = number + 1
if (nextDir == Directions.down):
self.setTourNumber(x * 2 + 1, y * 2 + 1, number)
number = number + 1
dir = nextDir
if nextDir == Directions.right:
x += 1
elif nextDir == Directions.left:
x -= 1
if nextDir == Directions.down:
y += 1
if nextDir == Directions.up:
y -= 1
class Directions(Enum):
up = [-1, 0]
down = [1, 0]
left = [0, -1]
right = [0, 1]
none = [-100, -100]
class Snake:
def __init__(self, tail):
# head is first element of tail
self.tail = tail
class Board:
def __init__(self, height, width):
headrow = floor(height / 2)
headcol = floor(width / 2)
tail = [[headrow, headcol]]
self.snake = Snake(tail)
self.height = height
self.width = width
self.board = [[0 for row in range(height)] for col in range(width)]
self.board[headrow][headcol] = 1
for point in tail:
self.board[point[0]][point[1]] = 1
self.apple = self.generate_apple()
def move_snake(self, dir):
prev_head_row, prev_head_col = self.snake.tail[0]
row = prev_head_row + dir.value[0]
col = prev_head_col + dir.value[1]
if not self.check_colisions(row, col):
self.snake.tail.insert(0, [row, col])
if self.board[row][col] == 2:
if self.generate_apple()[0] == -1:
print("You WON !!!")
return 1
else:
deletedpoint = self.snake.tail.pop()
self.board[deletedpoint[0]][deletedpoint[1]] = 0
self.board[row][col] = 3
if len(self.snake.tail) > 1:
self.board[prev_head_row][prev_head_col] = 1
return 0
return -1
def check_colisions(self, row, col):
if row >= self.height or row < 0:
return True
if col >= self.width or col < 0:
return True
if [row, col] in self.snake.tail:
return True
return False
def find_empty(self):
emptyspots = []
for row, sublist in enumerate(self.board):
for col, value in enumerate(sublist):
if value == 0:
emptyspots.append([row, col])
return emptyspots
def generate_apple(self):
emptyspots = self.find_empty()
# End game condition
if len(emptyspots) == 0:
return -1, -1
chosenspot = random.choice(emptyspots)
applerow = chosenspot[0]
applecol = chosenspot[1]
self.board[applerow][applecol] = 2
self.apple = (applerow, applecol)
return applerow, applecol
class Gui:
BACKGROUND_COLOR = (0, 0, 0)
SNAKE_COLOR = (255, 255, 255)
APPLE_COLOR = (255, 0, 0)
HEAD_COLOR = (0, 255, 0)
def __init__(self, width, height, board, maze):
# Create first window
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Snake")
self.size = (width, height)
self.screen = screen
self.board = board
self.maze = maze
def paint_board(self):
for row in range(self.board.height):
for col in range(self.board.width):
#if row + self.maze.ARENA_WIDTH * col < self.maze.ARENA_SIZE:
#self.paint_number(row, col, self.maze.getPathNumber(row, col))
if (self.board.board[row][col] == 1):
self.paint_rectangle(row, col, self.SNAKE_COLOR)
elif (self.board.board[row][col] == 2):
self.paint_rectangle(row, col, self.APPLE_COLOR)
elif (self.board.board[row][col] == 3):
self.paint_rectangle(row, col, self.HEAD_COLOR)
def paint_rectangle(self, row, col, color):
width, height = self.size
rectcol = floor(width / self.board.width * col)
rectrow = floor(height / self.board.height * row)
rectwidth = floor(width / self.board.width) - 2
rectheight = floor(height / self.board.height) - 2
rect = pygame.Rect(rectcol, rectrow, rectwidth, rectheight)
pygame.draw.rect(self.screen, color, rect)
def paint_number(self, row, col, number):
width, height = self.size
font = pygame.font.SysFont("ComicSans", height // self.board.height - 4)
number = font.render(str(number), 1, (255, 255, 255))
self.screen.blit(number, ((col * (width // self.board.width)) + (width // 4 // self.board.width),
(row * (height // self.board.height)) + (height // 4 // self.board.height)))
class AI():
def get_next_dir(self, board, maze):
headrow, headcol = board.snake.tail[0]
path_number = maze.getPathNumber(headrow, headcol)
dir_to_zero = Directions.none
if not board.check_colisions(headrow + 1, headcol):
next_path_number = maze.getPathNumber(headrow + 1, headcol)
if next_path_number == 0:
dir_to_zero = Directions.down
if next_path_number - 1 == path_number:
return Directions.down
if not board.check_colisions(headrow - 1, headcol):
next_path_number = maze.getPathNumber(headrow - 1, headcol)
if next_path_number == 0:
dir_to_zero = Directions.up
if next_path_number - 1 == path_number:
return Directions.up
if not board.check_colisions(headrow, headcol + 1):
next_path_number = maze.getPathNumber(headrow, headcol + 1)
if next_path_number == 0:
dir_to_zero = Directions.right
if next_path_number - 1 == path_number:
return Directions.right
if not board.check_colisions(headrow, headcol - 1):
next_path_number = maze.getPathNumber(headrow, headcol - 1)
if next_path_number == 0:
dir_to_zero = Directions.left
if next_path_number - 1 == path_number:
return Directions.left
if path_number == 99:
return dir_to_zero
return Directions.none
def get_next_dir_upgraded(self, board, maze):
headrow, headcol = board.snake.tail[0]
lastrow, lastcol = board.snake.tail[len(board.snake.tail) - 1]
applerow, applecol = board.apple
head_path_number = maze.getPathNumber(headrow, headcol)
apple_path_number = maze.getPathNumber(applerow, applecol)
last_path_number = maze.getPathNumber(lastrow, lastcol)
current_dist_to_apple = maze.calc_dist(apple_path_number, head_path_number)
steps_skipped = 0
dir = self.get_next_dir(board, maze)
if len(board.snake.tail) < maze.ARENA_SIZE / 2:
if not board.check_colisions(headrow + 1, headcol):
next_path_number = maze.getPathNumber(headrow + 1, headcol)
if (next_path_number < last_path_number and next_path_number < head_path_number) or (next_path_number > last_path_number and next_path_number > head_path_number):
next_dist_to_apple = maze.calc_dist(apple_path_number, next_path_number)
if steps_skipped < current_dist_to_apple - next_dist_to_apple:
steps_skipped = current_dist_to_apple - next_dist_to_apple
dir = Directions.down
if not board.check_colisions(headrow - 1, headcol):
next_path_number = maze.getPathNumber(headrow - 1, headcol)
if (next_path_number < last_path_number and next_path_number < head_path_number) or (next_path_number > last_path_number and next_path_number > head_path_number):
next_dist_to_apple = maze.calc_dist(apple_path_number, next_path_number)
if steps_skipped < current_dist_to_apple - next_dist_to_apple:
steps_skipped = current_dist_to_apple - next_dist_to_apple
dir = Directions.up
if not board.check_colisions(headrow, headcol + 1):
next_path_number = maze.getPathNumber(headrow, headcol + 1)
if (next_path_number < last_path_number and next_path_number < head_path_number) or (next_path_number > last_path_number and next_path_number > head_path_number):
next_dist_to_apple = maze.calc_dist(apple_path_number, next_path_number)
if steps_skipped < current_dist_to_apple - next_dist_to_apple:
steps_skipped = current_dist_to_apple - next_dist_to_apple
dir = Directions.right
if not board.check_colisions(headrow, headcol - 1):
next_path_number = maze.getPathNumber(headrow, headcol - 1)
if (next_path_number < last_path_number and next_path_number < head_path_number) or (next_path_number > last_path_number and next_path_number > head_path_number):
next_dist_to_apple = maze.calc_dist(apple_path_number, next_path_number)
if steps_skipped < current_dist_to_apple - next_dist_to_apple:
dir = Directions.left
if dir == Directions.none:
if not board.check_colisions(headrow + 1, headcol):
return Directions.down
if not board.check_colisions(headrow - 1, headcol):
return Directions.up
if not board.check_colisions(headrow, headcol + 1):
return Directions.right
if not board.check_colisions(headrow, headcol - 1):
return Directions.left
return dir
def win_game():
messagebox.showinfo("Ok", "You won!")
def main():
maze = Maze()
maze.generate()
ai = AI()
# size of the board (how log can snake be)
board = Board(maze.ARENA_HEIGHT, maze.ARENA_WIDTH)
# size of screen in pixels
gui = Gui(1200, 800, board, maze)
# game loop termination condition
running = True
# lose condition tracker
lose = 0
# game clock
clock = pygame.time.Clock()
# event added to event queue every %time miliseconds
# this even is used to automove snake
AUTOMOVEEVENT = pygame.USEREVENT
time = 250
pygame.time.set_timer(AUTOMOVEEVENT, time)
# direction where snake will move on its own
lastknowndirection = Directions.left
pause = False
while (running):
gui.screen.fill(Gui.BACKGROUND_COLOR)
gui.paint_board()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# if event.type == AUTOMOVEEVENT:
# lose = gui.board.move_snake(lastknowndirection)
# if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_UP:
# lose = gui.board.move_snake(Directions.up)
# lastknowndirection = Directions.up
# elif event.key == pygame.K_DOWN:
# lose = gui.board.move_snake(Directions.down)
# lastknowndirection = Directions.down
# elif event.key == pygame.K_LEFT:
# lose = gui.board.move_snake(Directions.left)
# lastknowndirection = Directions.left
# elif event.key == pygame.K_RIGHT:
# lose = gui.board.move_snake(Directions.right)
# lastknowndirection = Directions.right
dir = ai.get_next_dir_upgraded(board, maze)
lose = gui.board.move_snake(dir)
lastknowndirection = dir
if lose == -1:
board = Board(maze.ARENA_HEIGHT, maze.ARENA_HEIGHT)
gui = Gui(1200, 800, board, maze)
lose = 0
lastknowndirection = Directions.left
elif lose == 1:
win_game()
pygame.display.update()
# game will run in 60 FPS
clock.tick(30)
if __name__ == '__main__':
pygame.init()
main()
|
#!/usr/bin/python3
# functions.py by Bill Weinman [http://bw.org/]
# This is an exercise file from Python 3 Essential Training on lynda.com
# Copyright 2010 The BearHeart Group, LLC
def main():
testfunc(3,5,6,one =1, two = 2, four = 42)
#keyword arguements are also optional and are treated as dictionaries
def testfunc(this,that,another,**kwargs):
print(this,that,another,kwargs['one'],kwargs['two'],kwargs['four'])
for k in kwargs:
print(k, kwargs[k])
if __name__ == "__main__": main()
|
from kfp.components import OutputPath
def download(dataset_path: OutputPath(str)):
import json
import os
import tempfile
import zipfile
import requests
from tqdm import tqdm
# Download GloVe
print("Downloading glove")
GLOVE_DIR = dataset_path + "/data/glove"
os.makedirs(GLOVE_DIR, exist_ok=True)
r = requests.get("http://nlp.stanford.edu/data/glove.6B.zip", stream=True)
total_size_in_bytes = int(r.headers.get("content-length", 0))
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with tempfile.TemporaryFile() as tf:
for chunk in r.iter_content(chunk_size=1024):
progress_bar.update(len(chunk))
tf.write(chunk)
with zipfile.ZipFile(tf, "r") as f:
f.extractall(GLOVE_DIR)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
print("ERROR, something went wrong")
print("Finished downloading glove")
# Download SQuAD
SQUAD_DIR = dataset_path + "/data/squad"
os.makedirs(SQUAD_DIR, exist_ok=True)
r_train = requests.get(
"https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json"
)
squad_train_json = json.loads(r_train.text)
with open(SQUAD_DIR + "/train-v1.1.json", "w") as f:
json.dump(squad_train_json, f)
print(os.listdir(SQUAD_DIR))
r_dev = requests.get(
"https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"
)
squad_dev_json = json.loads(r_dev.text)
with open(SQUAD_DIR + "/dev-v1.1.json", "w") as f:
json.dump(squad_dev_json, f)
print(os.listdir(SQUAD_DIR))
|
length = 33
iter_ = (x for x in range(length)) #0,1,2
c = 0
while 1:
try :
print(next(iter_))
except StopIteration :
break
howkteam = {'name' : 'Kteam', 'kter':69}
# list_values = list(howkteam.items())
# print(list_values)
# print(list_values[0])
# print(list_values[1])
for key, value in howkteam.items():
print(key,'=>', value)
# if (key ="Kteam")
# break
s = 'How Kteam'
for ch in s:
if ch == ' ':
break
else :
print(ch)
for k in (1,2,3) :
print(k)
if k%2 == 0 :
break
else :
print('Done!')
lst = [5,(1,2,2), {'abc', 'xyz'}]
for i in range(len(lst)) :
print(i)
lst = []
for a, b, c in [('how', 'kteam','Education'), ('chia', 'se', 'FREE')]:
a = a.capitalize()
b = b.upper()
c = c.lower()
lst.append('--'.join((a, b + c)))
print(lst)
student_list = ['Long', 'Giau', 'Trung', 'Thanh']
for idx, student in enumerate(student_list, 9):
print(idx, '=>', student)
#xu ly ham
kter ='kter'
def kteam(age, text = kter) :
print(age)
print(text)
kteam(10,'Hello Kteam')
#Function
def f(kteam= []) :
kteam.append('F')
print(kteam)
f()
f()
f()
f()
#
# def kteam(a,b,c,d):
# pass #
# kteam(3,'Free Eduction', d=1, c=5)
# print(stored([3,4,1], reverse = True))
def Teo(a, b=2, c=3, d =4):
f = (a + d) * (b+c)
print(f)
Teo(2,4,5,6)
def kteam1(k,t,e,*,r='Kter'):
print(k)
print(t,e)
print('end', r)
lst = ['123', 'Kteam', 69.96]
# kteam1(lst[0], lst[1], lst[2], lst[3])
kteam1(*lst, r= 'K9')
#tupes
def kteam2(*args, kter):
# print(args)
# print(type(args))
print(kter)
# kteam2('Kteam', 69.96, 'Henry')
kteam2(*(x for x in range(70)), kter= 'a hi hi')
#dic : xuat key
def kteam2(a, b):
print(a)
print(b)
dic = {'name' : 'kteam', 'member':69}
kteam(*dic)
#dic : xuat value
def kteam2(name, member):
print('name =>', name)
print('member =>', member)
dic = {'name' : 'kteam', 'member':69}
kteam2(**dic)
#cach 2 xuat kieu for
# def kteam3(*kwargs):
# for key, value in kwargs.items():
# print(key, "=>" , value)
# kteam3(name='Kteam', member= 69)
def make_global() :
global x
x = 1
def local():
x = 5
print('x in local', x)
make_global()
print(x)
local()
print()
|
from .accounts import AccountTests
from .collections import CollectionsTests
from .discover import DiscoverTests
from .feed import FeedTests
from .friendships import FriendshipTests
from .live import LiveTests
from .locations import LocationTests
from .media import MediaTests
from .misc import MiscTests
from .tags import TagsTests
from .upload import UploadTests
from .users import UsersTests
from .usertags import UsertagsTests
from .highlights import HighlightsTests
from .igtv import IGTVTests
from .apiutils import ApiUtilsTests
from .client import ClientTests
from .compatpatch import CompatPatchTests
from .common import (
Client,
ClientError,
ClientLoginError,
ClientCookieExpiredError,
__version__,
to_json,
from_json,
)
|
#!/usr/bin/python3
"""
Lists all states from the database hbtn_0e_0_usa where name matches
with the given argument.
"""
import MySQLdb
from sys import argv
if __name__ == '__main__':
try:
db = MySQLdb.connect(
host="localhost",
port=3306,
user=argv[1],
passwd=argv[2],
db=argv[3],
charset="utf8"
)
cursor = db.cursor()
cursor.execute(
"SELECT * FROM states WHERE name LIKE '{:s}' ORDER BY id ASC"
.format(argv[4])
)
rows = cursor.fetchall()
for row in rows:
if row[1] == argv[4]:
print(row)
cursor.close()
db.close()
except Exception as e:
print("Error: {}".format(e))
|
# Generated by Django 2.1.2 on 2018-11-24 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0002_comment_client_commit'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='commit',
),
migrations.AddField(
model_name='comment',
name='r_commit',
field=models.CharField(blank=True, help_text='Ваш коментар', max_length=256, null=True, verbose_name='Відгук / коментар'),
),
]
|
from django.urls import path
from bye import views
urlpatterns = [
path('index/', views.index),
path('byestu/', views.bye_student),
path('getstu/', views.get_student),
path('updatestu/', views.update_student), # root url, include下级
path('deletestu/', views.delete_student),
]
|
import xml.etree.ElementTree as ET
filename = '{{ .VARIABLE }}'
tree = ET.parse(filename)
root = tree.getroot()
for child in root:
print(child.tag, child.attrib)
|
from jsonschema import Draft7Validator
from utils import *
SCHEMA_REMISE_DOMAINE = {
TYPE: OBJECT,
PROPERTIES: {
"trigrammeSIP": STRING_TYPE,
"idCorrelation": STRING_TYPE,
"idUtilisateurRemettant": STRING_TYPE,
"idEntiteRemettante": INTEGER_TYPE,
"idTypeBien": STRING_TYPE,
"idCategorie": STRING_TYPE,
"idSousCategorie": STRING_TYPE,
"nature": {
TYPE: OBJECT,
PROPERTIES: {
"autre": BOOLEAN_TYPE,
"scelle": BOOLEAN_TYPE,
"ot": BOOLEAN_TYPE,
"heberge": BOOLEAN_TYPE,
},
REQUIRED: ["autre", "scelle", "ot", "heberge"],
ADDITIONAL_PROPERTIES: False
},
"fourriereAdministrative": BOOLEAN_TYPE,
"description": STRING_TYPE,
"ministere": STRING_TYPE,
"compteBudgetaire": STRING_TYPE,
"programme": STRING_TYPE,
"fondsConcours": STRING_TYPE,
"marque": STRING_TYPE,
"modele": STRING_TYPE,
"genre": STRING_TYPE,
"carrosserie": STRING_TYPE,
"numeroImmatriculation": STRING_TYPE,
"numeroSerieVin": STRING_TYPE,
"typeMineCnit": STRING_TYPE,
"dateCirculation": DATE_TIME_TYPE,
"dateCT": DATE_TIME_TYPE,
"kilometrage": INTEGER_TYPE,
"compteurChange": BOOLEAN_TYPE,
"commentaireCompteur": STRING_TYPE,
"bva": BOOLEAN_TYPE,
"typeEnergie": STRING_TYPE,
"nombrePlace": INTEGER_TYPE,
"puissanceFiscale": STRING_TYPE,
"controleTechnique": BOOLEAN_TYPE,
"reimmatriculable": BOOLEAN_TYPE,
"certificatImmatriculation": BOOLEAN_TYPE,
"dateFourriere": DATE_TIME_TYPE,
"dateAbandon": DATE_TIME_TYPE,
"idLieuDepot": INTEGER_TYPE,
},
REQUIRED: [
"trigrammeSIP",
"idCorrelation",
"idUtilisateurRemettant",
"idEntiteRemettante",
"idTypeBien",
"idCategorie",
"idSousCategorie",
"nature",
"fourriereAdministrative",
"description",
"ministere",
"compteBudgetaire",
"marque",
"genre",
"carrosserie",
"numeroImmatriculation",
"numeroSerieVin",
"typeMineCnit",
"dateCirculation",
"typeEnergie",
"nombrePlace",
"puissanceFiscale",
"dateFourriere",
"dateAbandon",
"idLieuDepot",
],
ADDITIONAL_PROPERTIES: False
}
Draft7Validator.check_schema(SCHEMA_REMISE_DOMAINE)
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import json
import logging
import os.path
from abc import ABC
from dataclasses import dataclass, field
from typing import Any, ClassVar, Iterable, Mapping, Optional, Tuple
import yaml
from typing_extensions import Literal
from pants.backend.project_info import dependencies
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.base.specs import AncestorGlobSpec, RawSpecs
from pants.build_graph.address import Address
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.core.goals.package import OutputPathField
from pants.core.target_types import (
TargetGeneratorSourcesHelperSourcesField,
TargetGeneratorSourcesHelperTarget,
)
from pants.core.util_rules import stripped_source_files
from pants.engine import fs
from pants.engine.collection import Collection, DeduplicatedCollection
from pants.engine.fs import (
CreateDigest,
DigestContents,
FileContent,
GlobExpansionConjunction,
PathGlobs,
)
from pants.engine.internals import graph
from pants.engine.internals.graph import (
ResolveAllTargetGeneratorRequests,
ResolvedTargetGeneratorRequests,
)
from pants.engine.internals.native_engine import Digest, Snapshot
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import Rule, collect_rules, rule
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AllTargets,
Dependencies,
DependenciesRequest,
GeneratedTargets,
GenerateTargetsRequest,
InvalidFieldException,
ScalarField,
SequenceField,
SingleSourceField,
SourcesField,
StringField,
StringSequenceField,
Target,
TargetGenerator,
Targets,
)
from pants.engine.unions import UnionMembership, UnionRule
from pants.option.global_options import UnmatchedBuildFileGlobs
from pants.util.frozendict import FrozenDict
from pants.util.strutil import help_text, softwrap
_logger = logging.getLogger(__name__)
class NodePackageDependenciesField(Dependencies):
pass
class PackageJsonSourceField(SingleSourceField):
default = "package.json"
required = False
class NodeScript(ABC):
entry_point: str
alias: ClassVar[str]
@dataclass(frozen=True)
class NodeBuildScript(NodeScript):
entry_point: str
output_directories: tuple[str, ...] = ()
output_files: tuple[str, ...] = ()
extra_caches: tuple[str, ...] = ()
extra_env_vars: tuple[str, ...] = ()
alias: ClassVar[str] = "node_build_script"
@classmethod
def create(
cls,
entry_point: str,
output_directories: Iterable[str] = (),
output_files: Iterable[str] = (),
extra_caches: Iterable[str] = (),
extra_env_vars: Iterable[str] = (),
) -> NodeBuildScript:
"""A build script, mapped from the `scripts` section of a package.json file.
Either the `output_directories` or the `output_files` argument has to be set to capture the
output artifacts of the build.
"""
return cls(
entry_point=entry_point,
output_directories=tuple(output_directories),
output_files=tuple(output_files),
extra_caches=tuple(extra_caches),
extra_env_vars=tuple(extra_env_vars),
)
@dataclass(frozen=True)
class NodeTestScript(NodeScript):
entry_point: str = "test"
report_args: tuple[str, ...] = ()
report_output_files: tuple[str, ...] = ()
report_output_directories: tuple[str, ...] = ()
coverage_args: tuple[str, ...] = ()
coverage_output_files: tuple[str, ...] = ()
coverage_output_directories: tuple[str, ...] = ()
coverage_entry_point: str | None = None
extra_caches: tuple[str, ...] = ()
alias: ClassVar[str] = "node_test_script"
def __str__(self) -> str:
return f'{self.alias}(entry_point="{self.entry_point}", ...)'
@classmethod
def create(
cls,
entry_point: str = "test",
report_args: Iterable[str] = (),
report_output_files: Iterable[str] = (),
report_output_directories: Iterable[str] = (),
coverage_args: Iterable[str] = (),
coverage_output_files: Iterable[str] = (),
coverage_output_directories: Iterable[str] = (),
coverage_entry_point: str | None = None,
) -> NodeTestScript:
"""The test script for this package, mapped from the `scripts` section of a package.json
file. The pointed to script should accept a variadic number of ([ARG]...) path arguments.
This entry point is the "test" script, by default.
"""
return cls(
entry_point=entry_point,
report_args=tuple(report_args),
report_output_files=tuple(report_output_files),
report_output_directories=tuple(report_output_directories),
coverage_args=tuple(coverage_args),
coverage_output_files=tuple(coverage_output_files),
coverage_output_directories=tuple(coverage_output_directories),
coverage_entry_point=coverage_entry_point,
)
def supports_coverage(self) -> bool:
return bool(self.coverage_entry_point) or bool(self.coverage_args)
def coverage_globs(self, working_directory: str) -> PathGlobs:
return self.coverage_globs_for(
working_directory,
self.coverage_output_files,
self.coverage_output_directories,
GlobMatchErrorBehavior.ignore,
)
@classmethod
def coverage_globs_for(
cls,
working_directory: str,
files: tuple[str, ...],
directories: tuple[str, ...],
error_behaviour: GlobMatchErrorBehavior,
conjunction: GlobExpansionConjunction = GlobExpansionConjunction.any_match,
description_of_origin: str | None = None,
) -> PathGlobs:
dir_globs = (os.path.join(directory, "*") for directory in directories)
return PathGlobs(
(os.path.join(working_directory, glob) for glob in itertools.chain(files, dir_globs)),
conjunction=conjunction,
glob_match_error_behavior=error_behaviour,
description_of_origin=description_of_origin,
)
class NodePackageScriptsField(SequenceField[NodeScript]):
alias = "scripts"
expected_element_type = NodeScript
help = help_text(
"""
Custom node package manager scripts that should be known
and ran as part of relevant goals.
Maps the package.json#scripts section to a cacheable pants invocation.
"""
)
expected_type_description = (
'[node_build_script(entry_point="build", output_directories=["./dist/"], ...])'
)
default = ()
@classmethod
def compute_value(
cls, raw_value: Optional[Iterable[Any]], address: Address
) -> Optional[Tuple[NodeScript, ...]]:
values = super().compute_value(raw_value, address)
test_scripts = [value for value in values or () if isinstance(value, NodeTestScript)]
if len(test_scripts) > 1:
entry_points = ", ".join(str(script) for script in test_scripts)
raise InvalidFieldException(
softwrap(
f"""
You can only specify one `{NodeTestScript.alias}` per `{PackageJsonTarget.alias}`,
but the {cls.alias} contains {entry_points}.
"""
)
)
return values
def build_scripts(self) -> Iterable[NodeBuildScript]:
for script in self.value or ():
if isinstance(script, NodeBuildScript):
yield script
def get_test_script(self) -> NodeTestScript:
for script in self.value or ():
if isinstance(script, NodeTestScript):
return script
return NodeTestScript()
class NodePackageTestScriptField(ScalarField[NodeTestScript]):
alias = "_node_test_script"
expected_type = NodeTestScript
expected_type_description = (
'node_test_script(entry_point="test", coverage_args="--coverage=true")'
)
default = NodeTestScript()
value: NodeTestScript
class NodePackageVersionField(StringField):
alias = "version"
help = help_text(
"""
Version of the Node package, as specified in the package.json.
This field should not be overridden; use the value from target generation.
"""
)
required = False
value: str | None
class NodeThirdPartyPackageVersionField(NodePackageVersionField):
alias = "version"
help = help_text(
"""
Version of the Node package, as specified in the package.json.
This field should not be overridden; use the value from target generation.
"""
)
required = True
value: str
class NodePackageNameField(StringField):
alias = "package"
help = help_text(
"""
Name of the Node package, as specified in the package.json.
This field should not be overridden; use the value from target generation.
"""
)
required = True
value: str
class NodeThirdPartyPackageNameField(NodePackageNameField):
pass
class NodeThirdPartyPackageDependenciesField(Dependencies):
pass
class NodeThirdPartyPackageTarget(Target):
alias = "node_third_party_package"
help = "A third party node package."
core_fields = (
*COMMON_TARGET_FIELDS,
NodeThirdPartyPackageNameField,
NodeThirdPartyPackageVersionField,
NodeThirdPartyPackageDependenciesField,
)
class NodePackageTarget(Target):
alias = "node_package"
help = "A first party node package."
core_fields = (
*COMMON_TARGET_FIELDS,
PackageJsonSourceField,
NodePackageNameField,
NodePackageVersionField,
NodePackageDependenciesField,
NodePackageTestScriptField,
)
class NPMDistributionTarget(Target):
alias = "npm_distribution"
help = help_text(
"""
A publishable npm registry distribution, typically a gzipped tarball
of the sources and any resources, but omitting the lockfile.
Generated using the projects package manager `pack` implementation.
"""
)
core_fields = (
*COMMON_TARGET_FIELDS,
PackageJsonSourceField,
OutputPathField,
)
class PackageJsonTarget(TargetGenerator):
alias = "package_json"
core_fields = (
*COMMON_TARGET_FIELDS,
PackageJsonSourceField,
NodePackageScriptsField,
)
help = help_text(
f"""
A package.json file describing a nodejs package. (https://nodejs.org/api/packages.html#introduction)
Generates a `{NodePackageTarget.alias}` target for the package.
Generates `{NodeThirdPartyPackageTarget.alias}` targets for each specified
3rd party dependency (e.g. in the package.json#devDependencies field).
"""
)
copied_fields = COMMON_TARGET_FIELDS
moved_fields = (NodePackageDependenciesField,)
class NodeBuildScriptEntryPointField(StringField):
alias = "entry_point"
required = True
value: str
class NodeBuildScriptSourcesField(SourcesField):
alias = "_sources"
required = False
default = None
help = "Marker field for node_build_scripts used in export-codegen."
class NodeBuildScriptOutputFilesField(StringSequenceField):
alias = "output_files"
required = False
default = ()
help = help_text(
"""
Specify the build script's output files to capture, relative to the package.json.
For directories, use `output_directories`. At least one of `output_files` and
`output_directories` must be specified.
Relative paths (including `..`) may be used, as long as the path does not ascend further
than the package.json parent directory.
"""
)
class NodeBuildScriptOutputDirectoriesField(StringSequenceField):
alias = "output_directories"
required = False
default = ()
help = help_text(
"""
Specify full directories (including recursive descendants) of output to capture from the
build script, relative to the package.json.
For individual files, use `output_files`. At least one of `output_files` and
`output_directories` must be specified.
Relative paths (including `..`) may be used, as long as the path does not ascend further
than the package.json parent directory.
"""
)
class NodeBuildScriptExtraEnvVarsField(StringSequenceField):
alias = "extra_env_vars"
required = False
default = ()
help = help_text(
"""
Additional environment variables to include in environment when running a build script process.
Entries are strings in the form `ENV_VAR=value` to use explicitly; or just
`ENV_VAR` to copy the value of a variable in Pants's own environment.
"""
)
class NodeBuildScriptExtraCaches(StringSequenceField):
alias = "extra_caches"
required = False
default = ()
help = help_text(
f"""
Specify directories that pants should treat as caches for the build script.
These directories will not be available as sources, but are available to
subsequent executions of the build script.
Example usage:
# BUILD
{PackageJsonTarget.alias}(
scripts={NodeBuildScript.alias}(
entry_point="build",
output_directories=["dist"],
extra_caches=[".parcel-cache"],
)
)
# package.json
{{
...
"scripts": {{
"build": "parcel build --dist-dir=dist --cache-dir=.parcel-cache"
...
}}
...
}}
"""
)
class NodeBuildScriptTarget(Target):
core_fields = (
*COMMON_TARGET_FIELDS,
NodeBuildScriptEntryPointField,
NodeBuildScriptOutputDirectoriesField,
NodeBuildScriptOutputFilesField,
NodeBuildScriptSourcesField,
NodeBuildScriptExtraCaches,
NodeBuildScriptExtraEnvVarsField,
NodePackageDependenciesField,
OutputPathField,
)
alias = "_node_build_script"
help = help_text(
"""
A package.json script that is invoked by the configured package manager
to produce `resource` targets or a packaged artifact.
"""
)
@dataclass(frozen=True)
class PackageJsonImports:
"""https://nodejs.org/api/packages.html#subpath-imports."""
imports: FrozenDict[str, tuple[str, ...]]
root_dir: str
@classmethod
def from_package_json(cls, pkg_json: PackageJson) -> PackageJsonImports:
return cls(
imports=cls._import_from_package_json(pkg_json),
root_dir=pkg_json.root_dir,
)
@staticmethod
def _import_from_package_json(
pkg_json: PackageJson,
) -> FrozenDict[str, tuple[str, ...]]:
imports: Mapping[str, Any] | None = pkg_json.content.get("imports")
def get_subpaths(value: str | Mapping[str, Any]) -> Iterable[str]:
if isinstance(value, str):
yield value
elif isinstance(value, Mapping):
for v in value.values():
yield from get_subpaths(v)
if not imports:
return FrozenDict()
return FrozenDict(
{key: tuple(sorted(get_subpaths(subpath))) for key, subpath in imports.items()}
)
@dataclass(frozen=True)
class PackageJsonEntryPoints:
"""See https://nodejs.org/api/packages.html#package-entry-points and
https://docs.npmjs.com/cli/v9/configuring-npm/package-json#browser."""
exports: FrozenDict[str, str]
bin: FrozenDict[str, str]
root_dir: str
@property
def globs(self) -> Iterable[str]:
for export in self.exports.values():
yield export.replace("*", "**/*")
yield from self.bin.values()
def globs_from_root(self) -> Iterable[str]:
for path in self.globs:
yield os.path.normpath(os.path.join(self.root_dir, path))
@classmethod
def from_package_json(cls, pkg_json: PackageJson) -> PackageJsonEntryPoints:
return cls(
exports=cls._exports_form_package_json(pkg_json),
bin=cls._binaries_from_package_json(pkg_json),
root_dir=pkg_json.root_dir,
)
@staticmethod
def _exports_form_package_json(pkg_json: PackageJson) -> FrozenDict[str, str]:
content = pkg_json.content
exports: str | Mapping[str, str] | None = content.get("exports")
main: str | None = content.get("main")
browser: str | None = content.get("browser")
source: str | None = content.get("source")
if exports:
if isinstance(exports, str):
return FrozenDict({".": exports})
else:
return FrozenDict(exports)
elif browser:
return FrozenDict({".": browser})
elif main:
return FrozenDict({".": main})
elif source:
return FrozenDict({".": source})
return FrozenDict()
@staticmethod
def _binaries_from_package_json(pkg_json: PackageJson) -> FrozenDict[str, str]:
binaries: str | Mapping[str, str] | None = pkg_json.content.get("bin")
if binaries:
if isinstance(binaries, str):
return FrozenDict({pkg_json.name: binaries})
else:
return FrozenDict(binaries)
return FrozenDict()
@dataclass(frozen=True)
class PackageJsonScripts:
scripts: FrozenDict[str, str]
@classmethod
def from_package_json(cls, pkg_json: PackageJson) -> PackageJsonScripts:
return cls(FrozenDict.deep_freeze(pkg_json.content.get("scripts", {})))
@dataclass(frozen=True)
class PackageJson:
content: FrozenDict[str, Any]
name: str
version: str | None
snapshot: Snapshot
workspaces: tuple[str, ...] = ()
module: Literal["commonjs", "module"] | None = None
dependencies: FrozenDict[str, str] = field(default_factory=FrozenDict)
package_manager: str | None = None
def __post_init__(self) -> None:
if self.module not in (None, "commonjs", "module"):
raise ValueError(
f'package.json "type" can only be one of "commonjs", "module", but was "{self.module}".'
)
@property
def digest(self) -> Digest:
return self.snapshot.digest
@property
def file(self) -> str:
return self.snapshot.files[0]
@property
def root_dir(self) -> str:
return os.path.dirname(self.file)
class FirstPartyNodePackageTargets(Targets):
pass
class AllPackageJson(Collection[PackageJson]):
pass
class PackageJsonForGlobs(Collection[PackageJson]):
pass
@rule
async def all_first_party_node_package_targets(targets: AllTargets) -> FirstPartyNodePackageTargets:
return FirstPartyNodePackageTargets(
tgt for tgt in targets if tgt.has_fields((PackageJsonSourceField, NodePackageNameField))
)
@dataclass(frozen=True)
class OwningNodePackageRequest:
address: Address
@dataclass(frozen=True)
class OwningNodePackage:
target: Target | None = None
third_party: tuple[Target, ...] = ()
@classmethod
def no_owner(cls) -> OwningNodePackage:
return cls()
def ensure_owner(self) -> Target:
if self != OwningNodePackage.no_owner():
assert self.target
return self.target
raise ValueError("No owner could be determined.")
@rule
async def find_owning_package(request: OwningNodePackageRequest) -> OwningNodePackage:
candidate_targets = await Get(
Targets,
RawSpecs(
ancestor_globs=(AncestorGlobSpec(request.address.spec_path),),
description_of_origin=f"the `{OwningNodePackage.__name__}` rule",
),
)
package_json_tgts = sorted(
(
tgt
for tgt in candidate_targets
if tgt.has_field(PackageJsonSourceField) and tgt.has_field(NodePackageNameField)
),
key=lambda tgt: tgt.address.spec_path,
reverse=True,
)
tgt = package_json_tgts[0] if package_json_tgts else None
if tgt:
deps = await Get(Targets, DependenciesRequest(tgt[Dependencies]))
return OwningNodePackage(
tgt, tuple(dep for dep in deps if dep.has_field(NodeThirdPartyPackageNameField))
)
return OwningNodePackage()
@rule
async def parse_package_json(content: FileContent) -> PackageJson:
parsed_package_json = FrozenDict.deep_freeze(json.loads(content.content))
return PackageJson(
content=parsed_package_json,
name=parsed_package_json["name"],
version=parsed_package_json.get("version"),
snapshot=await Get(Snapshot, PathGlobs([content.path])),
module=parsed_package_json.get("type"),
workspaces=tuple(parsed_package_json.get("workspaces", ())),
dependencies=FrozenDict.deep_freeze(
{
**parsed_package_json.get("dependencies", {}),
**parsed_package_json.get("devDependencies", {}),
**parsed_package_json.get("peerDependencies", {}),
}
),
package_manager=parsed_package_json.get("packageManager"),
)
@rule
async def read_package_jsons(globs: PathGlobs) -> PackageJsonForGlobs:
snapshot = await Get(Snapshot, PathGlobs, globs)
digest_contents = await Get(DigestContents, Digest, snapshot.digest)
return PackageJsonForGlobs(
await MultiGet(
Get(PackageJson, FileContent, digest_content) for digest_content in digest_contents
)
)
@rule
async def all_package_json() -> AllPackageJson:
# Avoids using `AllTargets` due to a circular rule dependency.
# `generate_node_package_targets` requires knowledge of all
# first party package names.
description_of_origin = "The `AllPackageJson` rule"
requests = await Get(
ResolvedTargetGeneratorRequests,
ResolveAllTargetGeneratorRequests(
description_of_origin=description_of_origin, of_type=PackageJsonTarget
),
)
globs = [
glob
for req in requests.requests
for glob in req.generator[PackageJsonSourceField]
.path_globs(UnmatchedBuildFileGlobs.error())
.globs
]
return AllPackageJson(
await Get(
PackageJsonForGlobs,
PathGlobs(
globs, GlobMatchErrorBehavior.error, description_of_origin=description_of_origin
),
)
)
@dataclass(frozen=True)
class PnpmWorkspaceGlobs:
packages: tuple[str, ...]
digest: Digest
class PnpmWorkspaces(FrozenDict[PackageJson, PnpmWorkspaceGlobs]):
def for_root(self, root_dir: str) -> PnpmWorkspaceGlobs | None:
for pkg, workspaces in self.items():
if pkg.root_dir == root_dir:
return workspaces
return None
@rule
async def pnpm_workspace_files(pkgs: AllPackageJson) -> PnpmWorkspaces:
snapshot = await Get(
Snapshot, PathGlobs(os.path.join(pkg.root_dir, "pnpm-workspace.yaml") for pkg in pkgs)
)
digest_contents = await Get(DigestContents, Digest, snapshot.digest)
async def parse_package_globs(content: FileContent) -> PnpmWorkspaceGlobs:
parsed = yaml.safe_load(content.content) or {"packages": ("**",)}
return PnpmWorkspaceGlobs(
tuple(parsed.get("packages", ("**",)) or ("**",)),
await Get(Digest, CreateDigest([content])),
)
globs_per_root = {
os.path.dirname(digest_content.path): await parse_package_globs(digest_content)
for digest_content in digest_contents
}
return PnpmWorkspaces(
{pkg: globs_per_root[pkg.root_dir] for pkg in pkgs if pkg.root_dir in globs_per_root}
)
class AllPackageJsonNames(DeduplicatedCollection[str]):
"""Used to not invalidate all generated node package targets when any package.json contents are
changed."""
@rule
async def all_package_json_names(all_pkg_jsons: AllPackageJson) -> AllPackageJsonNames:
return AllPackageJsonNames(pkg.name for pkg in all_pkg_jsons)
@rule
async def package_json_for_source(source_field: PackageJsonSourceField) -> PackageJson:
[pkg_json] = await Get(
PackageJsonForGlobs, PathGlobs, source_field.path_globs(UnmatchedBuildFileGlobs.error())
)
return pkg_json
@rule
async def script_entrypoints_for_source(
source_field: PackageJsonSourceField,
) -> PackageJsonEntryPoints:
return PackageJsonEntryPoints.from_package_json(
await Get(PackageJson, PackageJsonSourceField, source_field)
)
@rule
async def subpath_imports_for_source(
source_field: PackageJsonSourceField,
) -> PackageJsonImports:
return PackageJsonImports.from_package_json(
await Get(PackageJson, PackageJsonSourceField, source_field)
)
class GenerateNodePackageTargets(GenerateTargetsRequest):
generate_from = PackageJsonTarget
def _script_missing_error(entry_point: str, scripts: Iterable[str], address: Address) -> ValueError:
return ValueError(
softwrap(
f"""
{entry_point} was not found in package.json#scripts section
of the `{PackageJsonTarget.alias}` target with address {address}.
Available scripts are: {', '.join(scripts)}.
"""
)
)
@rule
async def generate_node_package_targets(
request: GenerateNodePackageTargets,
union_membership: UnionMembership,
first_party_names: AllPackageJsonNames,
) -> GeneratedTargets:
file = request.generator[PackageJsonSourceField].file_path
file_tgt = TargetGeneratorSourcesHelperTarget(
{TargetGeneratorSourcesHelperSourcesField.alias: os.path.basename(file)},
request.generator.address.create_generated(file),
union_membership,
)
pkg_json = await Get(
PackageJson, PackageJsonSourceField, request.generator[PackageJsonSourceField]
)
third_party_tgts = [
NodeThirdPartyPackageTarget(
{
**{
key: value
for key, value in request.template.items()
if key != PackageJsonSourceField.alias
},
NodeThirdPartyPackageNameField.alias: name,
NodeThirdPartyPackageVersionField.alias: version,
NodeThirdPartyPackageDependenciesField.alias: [file_tgt.address.spec],
},
request.generator.address.create_generated(name.replace("@", "__")),
union_membership,
)
for name, version in pkg_json.dependencies.items()
if name not in first_party_names
]
package_target = NodePackageTarget(
{
**request.template,
NodePackageNameField.alias: pkg_json.name,
NodePackageVersionField.alias: pkg_json.version,
NodePackageDependenciesField.alias: [
file_tgt.address.spec,
*(tgt.address.spec for tgt in third_party_tgts),
*request.template.get("dependencies", []),
],
NodePackageTestScriptField.alias: request.generator[
NodePackageScriptsField
].get_test_script(),
},
request.generator.address.create_generated(pkg_json.name.replace("@", "__")),
union_membership,
)
scripts = PackageJsonScripts.from_package_json(pkg_json).scripts
build_script_tgts = []
for build_script in request.generator[NodePackageScriptsField].build_scripts():
if build_script.entry_point in scripts:
build_script_tgts.append(
NodeBuildScriptTarget(
{
**request.template,
NodeBuildScriptEntryPointField.alias: build_script.entry_point,
NodeBuildScriptOutputDirectoriesField.alias: build_script.output_directories,
NodeBuildScriptOutputFilesField.alias: build_script.output_files,
NodeBuildScriptExtraEnvVarsField.alias: build_script.extra_env_vars,
NodeBuildScriptExtraCaches.alias: build_script.extra_caches,
NodePackageDependenciesField.alias: [
file_tgt.address.spec,
*(tgt.address.spec for tgt in third_party_tgts),
*request.template.get("dependencies", []),
package_target.address.spec,
],
},
request.generator.address.create_generated(build_script.entry_point),
union_membership,
)
)
else:
raise _script_missing_error(
build_script.entry_point, scripts, request.generator.address
)
coverage_script = package_target[NodePackageTestScriptField].value.coverage_entry_point
if coverage_script and coverage_script not in scripts:
raise _script_missing_error(coverage_script, scripts, request.generator.address)
return GeneratedTargets(
request.generator, [package_target, file_tgt, *third_party_tgts, *build_script_tgts]
)
def target_types() -> Iterable[type[Target]]:
return [PackageJsonTarget, NodePackageTarget, NodeThirdPartyPackageTarget]
def rules() -> Iterable[Rule | UnionRule]:
return [
*graph.rules(),
*dependencies.rules(),
*stripped_source_files.rules(),
*fs.rules(),
*collect_rules(),
UnionRule(GenerateTargetsRequest, GenerateNodePackageTargets),
]
def build_file_aliases() -> BuildFileAliases:
return BuildFileAliases(
objects={
NodeBuildScript.alias: NodeBuildScript.create,
NodeTestScript.alias: NodeTestScript.create,
}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.