content
stringlengths 5
1.05M
|
|---|
import inspect
import textwrap
import shlex
from docopt import docopt
class Cmd3Command (object): pass
def command(func):
'''
A decorator to create a function with docopt arguments.
It also generates a help function
@command
def do_myfunc(self, args):
""" docopts text """
pass
will create
def do_myfunc(self, args, arguments):
""" docopts text """
...
def help_myfunc(self, args, arguments):
... prints the docopt text ...
:param func: the function for the decorator
'''
classname = inspect.getouterframes(inspect.currentframe())[1][3]
name = func.__name__
help_name = name.replace("do_", "help_")
doc = textwrap.dedent(func.__doc__)
def new(instance, args):
# instance.new.__doc__ = doc
try:
argv = shlex.split(args)
arguments = docopt(doc, help=True, argv=argv)
func(instance, args, arguments)
except SystemExit:
if args not in ('-h', '--help'):
print("Could not execute the command.")
print(doc)
new.__doc__ = doc
return new
|
from collections import defaultdict
import inspect
import os
import os.path
import unittest
from typing import Dict
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium.webdriver.support.ui import WebDriverWait
class BaseTestCase(unittest.TestCase):
_shot_ids: Dict[str, int] = defaultdict(int)
@classmethod
def setUpClass(cls):
if cls is BaseTestCase:
raise unittest.SkipTest
def setUp(self):
self.httpbun_base = os.getenv("HTTPBUN_URL", "https://httpbun.com")
self.prestige_base = os.getenv("FRONTEND_URL", "https://prestigemad.com")
browser = os.environ.get("PRESTIGE_SELENIUM_BROWSER", "chrome").lower()
if browser.startswith("f"):
options = webdriver.FirefoxOptions()
options.headless = True
self.driver = webdriver.Firefox(options=options)
else:
options = webdriver.ChromeOptions()
options.headless = True
options.capabilities["goog:loggingPrefs"] = {
"browser": "ALL",
}
self.driver = webdriver.Chrome(options=options)
self.driver.set_window_size(1366, 1200)
self.wait = WebDriverWait(self.driver, 10)
self.driver.get(self.prestige_base)
def tearDown(self):
for entry in self.driver.get_log("browser"):
print(entry)
self.driver.quit()
def shot(self, title: str = None):
fn = inspect.stack()[1].function
self._shot_ids[fn] += 1
title = fn if title is None else (fn + "-" + title)
path = f"shots/{title}-{self._shot_ids[fn]}.png"
os.makedirs(os.path.dirname(path), exist_ok=True)
self.driver.save_screenshot(path)
def set_editor_content(self, content):
textarea = self.wait_for("textarea")
self.query_selector(".CodeMirror .CodeMirror-line").click()
textarea.send_keys(Keys.COMMAND, "a")
textarea.send_keys(Keys.BACKSPACE)
textarea.send_keys(content)
def editor_run(self):
textarea = self.query_selector("textarea")
textarea.send_keys(Keys.CONTROL, Keys.HOME)
textarea.send_keys(Keys.COMMAND, Keys.HOME)
textarea.send_keys(Keys.CONTROL, Keys.ENTER)
def query_selector(self, css: str):
return self.driver.find_element_by_css_selector(css)
def wait_for(self, css: str):
"""
Wait for the presence of an element by the given CSS selector, and return the element.
"""
self.wait.until(presence_of_element_located((By.CSS_SELECTOR, css)))
return self.query_selector(css)
class SearchText(BaseTestCase):
def test_get_200(self):
self.wait.until(presence_of_element_located((By.CSS_SELECTOR, "header h1")))
print(self.driver.find_element_by_css_selector("header h1 + div").text)
self.set_editor_content("GET " + self.httpbun_base + "/get\n\n###\n\n")
self.editor_run()
status_el = self.wait_for(".result-pane .status")
self.shot()
assert status_el.text == "200 Ok"
if __name__ == '__main__':
unittest.main()
|
import logging
import sys
from configargparse import ArgParser
from mowgli_etl.cli.commands.augment_cskg_release_command import AugmentCskgReleaseCommand
from mowgli_etl.cli.commands.drive_upload_command import DriveUploadCommand
from mowgli_etl.cli.commands.etl_command import EtlCommand
try:
from mowgli_etl.cli.commands.index_concept_net_command import IndexConceptNetCommand
except ImportError:
IndexConceptNetCommand = None
class Cli:
def __init__(self):
self.__commands = {
"augment-cskg-release": AugmentCskgReleaseCommand(),
"etl": EtlCommand(),
"drive-upload": DriveUploadCommand()
}
if IndexConceptNetCommand is not None:
self.__commands["index-concept-net"] = IndexConceptNetCommand()
@staticmethod
def __add_global_args(arg_parser: ArgParser):
arg_parser.add_argument("-c", is_config_file=True, help="config file path")
arg_parser.add_argument(
"--debug", action="store_true", help="turn on debugging"
)
arg_parser.add_argument(
"--logging-level",
help="set logging-level level (see Python logging module)",
)
def __configure_logging(self, args):
if args.debug:
logging_level = logging.DEBUG
elif args.logging_level is not None:
logging_level = getattr(logging, args.logging_level.upper())
else:
logging_level = logging.INFO
logging.basicConfig(
format="%(asctime)s:%(processName)s:%(module)s:%(lineno)s:%(name)s:%(levelname)s: %(message)s",
level=logging_level,
)
def main(self):
args = self.__parse_args()
self.__configure_logging(args)
self.__commands[args.command](args)
def __parse_args(self):
arg_parser = ArgParser()
subparsers = arg_parser.add_subparsers(
title="commands", dest="command"
)
self.__add_global_args(arg_parser)
for command_name, command in self.__commands.items():
subparser = subparsers.add_parser(command_name)
self.__add_global_args(subparser)
command.add_arguments(subparser, self.__add_global_args)
parsed_args = arg_parser.parse_args()
if parsed_args.command is None:
arg_parser.print_usage()
sys.exit(1)
return parsed_args
def main():
Cli().main()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
import io
import os
import hashlib
WORK_DIR = os.path.dirname(os.path.realpath(__file__))
WIN_INC_DIR = os.path.join(WORK_DIR, 'include')
API_HASH_FILE = os.path.join(WORK_DIR, 'src', 'ag_dns_h_hash.inc')
API_FILES = [
os.path.join(WIN_INC_DIR, 'ag_dns.h'),
]
file_hash = hashlib.sha256()
for file in sorted(API_FILES):
print('Processing "{0}"...'.format(file))
with io.open(file, 'r', encoding='utf-8', errors='ignore') as file_handle:
for line in file_handle:
file_hash.update(line.encode('utf-8'))
digest = file_hash.hexdigest()
print('Generated hash: {0}'.format(digest))
with open(API_HASH_FILE, 'r+') as file_handle:
api_hash_line = '#define AG_DNSLIBS_H_HASH "{0}"\n'.format(digest)
if api_hash_line not in file_handle.read():
file_handle.seek(0)
file_handle.write(api_hash_line)
|
n1 = int(input('Digite um número: '))
n2 = int( input('Digite outro número: '))
if n1>n2:
print('O primeiro número é maior')
elif n2>n1:
print('O segundo número é maior ')
|
import re
import functools
class command(object):
def __init__(self, matcher=None):
self._matcher = matcher
def __call__(self, func):
# Default the command's name to an exact match of the function's name.
# ^func_name$
matcher = self._matcher
if matcher is None:
matcher = r'^%s$' % func.__name__
# convert matcher to regular expression
matcher = re.compile(matcher)
@functools.wraps(func)
def wrapped_command(*args, **kwargs):
return func(*args, **kwargs)
wrapped_command._type = "COMMAND"
wrapped_command._matcher = matcher
return wrapped_command
class privmsg(object):
def __init__(self, matcher=None):
self._matcher = matcher
def __call__(self, func):
# convert matcher to regular expression
matcher = re.compile(self._matcher)
@functools.wraps(func)
def wrapped_command(*args, **kwargs):
return func(*args, **kwargs)
wrapped_command._type = "PRIVMSG"
wrapped_command._matcher = matcher
return wrapped_command
def interval(milliseconds):
def wrapped(func):
@functools.wraps(func)
def wrapped_command(*args, **kwargs):
return func(*args, **kwargs)
wrapped_command._type = "REPEAT"
wrapped_command._interval = milliseconds
return wrapped_command
return wrapped
|
# -*- coding: utf-8 -*-
"""
mslib.msui.mpl_map
~~~~~~~~~~~~~~~~~~
Map canvas for the top view.
As Matplotlib's Basemap is primarily designed to produce static plots,
we derived a class MapCanvas to allow for a certain degree of
interactivity. MapCanvas extends Basemap by functionality to, for
instance, automatically draw a graticule. It also keeps references to
plotted map elements to allow the user to toggle their visibility, or
to redraw when map section (zoom/pan) or projection have been changed.
This file is part of mss.
:copyright: Copyright 2008-2014 Deutsches Zentrum fuer Luft- und Raumfahrt e.V.
:copyright: Copyright 2011-2014 Marc Rautenhaus (mr)
:copyright: Copyright 2016-2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import copy
import numpy as np
from shapely.geometry import Polygon
import matplotlib
from matplotlib.cm import get_cmap
import matplotlib.path as mpath
import matplotlib.patches as mpatches
from matplotlib.collections import PolyCollection
import mpl_toolkits.basemap as basemap
try:
import mpl_toolkits.basemap.pyproj as pyproj
except ImportError:
logging.debug("Failed to pyproj from mpl_toolkits.basemap")
import pyproj
from mslib.msui import mpl_pathinteractor as mpl_pi
from mslib.utils.airdata import get_airports, get_airspaces
OPENAIP_NOTICE = "Airspace data used comes from openAIP.\n" \
"Visit openAIP.net and contribute to better aviation data, free for everyone to use and share."
OURAIRPORTS_NOTICE = "Airports provided by OurAirports."
class MapCanvas(basemap.Basemap):
"""
Derivative of mpl_toolkits.basemap, providing additional methods to
automatically draw a graticule and to redraw specific map elements.
"""
def __init__(self, identifier=None, CRS=None, BBOX_UNITS=None, OPERATION_NAME=None,
appearance=None, **kwargs):
"""
New constructor automatically adds coastlines, continents, and
a graticule to the map.
Keyword arguments are the same as for mpl_toolkits.basemap.
Additional arguments:
CRS -- string describing the coordinate reference system of the map.
BBOX_UNITS -- string describing the units of the map coordinates.
OPERATION_NAME -- string with operation name
"""
# Coordinate reference system identifier and coordinate system units.
self.crs = CRS if CRS is not None else self.crs if hasattr(self, "crs") else None
if BBOX_UNITS is not None:
self.bbox_units = BBOX_UNITS
else:
self.bbox_units = getattr(self, "bbox_units", None)
self.operation_name = OPERATION_NAME if OPERATION_NAME is not None else self.operation_name \
if hasattr(self, "operation_name") else None
# Dictionary containing map appearance settings.
if appearance is not None:
param_appearance = appearance
else:
param_appearance = getattr(self, "appearance", {})
default_appearance = {"draw_graticule": True,
"draw_coastlines": True,
"fill_waterbodies": True,
"fill_continents": True,
"colour_water": ((153 / 255.), (255 / 255.), (255 / 255.), (255 / 255.)),
"colour_land": ((204 / 255.), (153 / 255.), (102 / 255.), (255 / 255.))}
default_appearance.update(param_appearance)
self.appearance = default_appearance
# Identifier of this map canvas (used to query data structures that
# are observed by different views).
if identifier is not None:
self.identifier = identifier
else:
self.identifier = getattr(self, "identifier", None)
# Call the Basemap constructor. If a cylindrical projection was used
# before, Basemap stores an EPSG code that will not be changed if
# the Basemap constructor is called a second time. Hence, we have to
# delete the attribute (mr, 08Feb2013).
if hasattr(self, "epsg"):
del self.epsg
super().__init__(**kwargs)
self.gc = pyproj.Geod(a=self.rmajor, b=self.rminor)
self.kwargs = kwargs
# Set up the map appearance.
if self.appearance["draw_coastlines"]:
if len(self.coastsegs) > 0 and len(self.coastsegs[0]) > 0:
self.map_coastlines = self.drawcoastlines(zorder=3)
self.map_countries = self.drawcountries(zorder=3)
else:
self.map_coastlines = None
self.map_countries = None
if self.appearance["fill_waterbodies"]:
self.map_boundary = self.drawmapboundary(fill_color=self.appearance["colour_water"])
else:
self.map_boundary = None
# zorder = 0 is necessary to paint over the filled continents with
# scatter() for drawing the flight tracks and trajectories.
# Curiously, plot() works fine without this setting, but scatter()
# doesn't.
if self.appearance["fill_continents"]:
self.map_continents = self.fillcontinents(
color=self.appearance["colour_land"], lake_color=self.appearance["colour_water"], zorder=1)
else:
self.map_continents = None
self.image = None
# Print project name and CRS identifier into figure.
crs_text = ""
if self.operation_name is not None:
crs_text += self.operation_name
if self.crs is not None:
if len(crs_text) > 0:
crs_text += "\n"
crs_text += self.crs
if hasattr(self, "crs_text"): # update existing textbox
self.crs_text.set_text(crs_text)
else:
self.crs_text = self.ax.figure.text(0, 0, crs_text)
if self.appearance["draw_graticule"]:
pass
# self._draw_auto_graticule() ; It's already called in mpl_qtwidget.py in MplTopviewCanvas init_map().
else:
self.map_parallels = None
self.map_meridians = None
# self.warpimage() # disable fillcontinents when loading bluemarble
self.ax.set_autoscale_on(False)
if not hasattr(self, "airports") or not self.airports:
self.airports = None
self.airtext = None
if not hasattr(self, "airspaces") or not self.airspaces:
self.airspaces = None
self.airspacetext = None
def set_identifier(self, identifier):
self.identifier = identifier
def set_axes_limits(self, ax=None):
"""
See Basemap.set_axes_limits() for documentation.
This function is overridden in MapCanvas as a workaround to a problem
in Basemap.set_axes_limits() that occurs in interactive matplotlib
mode. If matplotlib.is_interactive() is True, Basemap.set_axes_limits()
tries to redraw the canvas by accessing the pylab figure manager.
If the matplotlib object is embedded in a Qt application, this manager
is not available and an exception is raised. Hence, the interactive
mode is disabled here before the original Basemap.set_axes_limits()
method is called. It is restored afterwards.
"""
intact = matplotlib.is_interactive()
matplotlib.interactive(False)
super(MapCanvas, self).set_axes_limits(ax=ax)
matplotlib.interactive(intact)
def _draw_auto_graticule(self, font_size=None):
"""
Draw an automatically spaced graticule on the map.
"""
# Compute some map coordinates that are required below for the automatic
# determination of which meridians and parallels to draw.
axis = self.ax.axis()
upperLeftCornerLon, upperLeftCornerLat = self.__call__(
axis[0], axis[3], inverse=True)
lowerRightCornerLon, lowerRightCornerLat = self.__call__(
axis[1], axis[2], inverse=True)
middleUpperBoundaryLon, middleUpperBoundaryLat = self.__call__(
np.mean([axis[0], axis[1]]), axis[3], inverse=True)
middleLowerBoundaryLon, middleLowerBoundaryLat = self.__call__(
np.mean([axis[0], axis[1]]), axis[2], inverse=True)
# Determine which parallels and meridians should be drawn.
# a) determine which are the minimum and maximum visible
# longitudes and latitudes, respectively. These
# values depend on the map projection.
if self.projection in ['npstere', 'spstere', 'stere', 'lcc']:
# For stereographic projections: Draw meridians from the minimum
# longitude contained in the map at one of the four corners to the
# maximum longitude at one of these corner points. If
# the southern or norther pole is contained in the map, draw all
# meridians around the globe.
# Draw parallels from the min latitude contained in the map at
# one of the four corners OR the middle top or bottom to the
# maximum latitude at one of these six points.
# If the map centre in contained in the map, replace either
# start or end latitude by the centre latitude (whichever is
# smaller/larger).
# check if centre point of projection is contained in the map,
# use projection coordinates for this test
centre_x = self.projparams["x_0"]
centre_y = self.projparams["y_0"]
centre_lon, centre_lat = self.__call__(centre_x, centre_y, inverse=True)
if centre_lat > 0:
pole_lon, pole_lat = 0, 90
else:
pole_lon, pole_lat = 0, -90
pole_x, pole_y = self.__call__(pole_lon, pole_lat)
if self.urcrnrx > self.llcrnrx:
contains_pole = (self.llcrnrx <= pole_x <= self.urcrnrx)
else:
contains_pole = (self.llcrnrx >= pole_x >= self.urcrnrx)
if self.urcrnry > self.llcrnry:
contains_pole = contains_pole and (self.llcrnry <= pole_y <= self.urcrnry)
else:
contains_pole = contains_pole and (self.llcrnry >= pole_y >= self.urcrnry)
# merdidians
if contains_pole:
mapLonStart = -180.
mapLonStop = 180.
else:
mapLonStart = min(upperLeftCornerLon, self.llcrnrlon,
self.urcrnrlon, lowerRightCornerLon)
mapLonStop = max(upperLeftCornerLon, self.llcrnrlon,
self.urcrnrlon, lowerRightCornerLon)
# parallels
mapLatStart = min(middleLowerBoundaryLat, lowerRightCornerLat,
self.llcrnrlat,
middleUpperBoundaryLat, upperLeftCornerLat,
self.urcrnrlat)
mapLatStop = max(middleLowerBoundaryLat, lowerRightCornerLat,
self.llcrnrlat,
middleUpperBoundaryLat, upperLeftCornerLat,
self.urcrnrlat)
if contains_pole:
centre_lat = self.projparams["lat_0"]
mapLatStart = min(mapLatStart, centre_lat)
mapLatStop = max(mapLatStop, centre_lat)
else:
# for other projections (preliminary): difference between the
# lower left and the upper right corner.
mapLonStart = self.llcrnrlon
mapLonStop = self.urcrnrlon
mapLatStart = self.llcrnrlat
mapLatStop = self.urcrnrlat
# b) parallels and meridians can be drawn with a spacing of
# >spacingValues< degrees. Determine the appropriate
# spacing for the lon/lat differences: about 10 lines
# should be drawn in each direction. (The following lines
# filter the spacingValues list for all spacing values
# that are larger than lat/lon difference / 10, then
# take the first value (first values that's larger)).
spacingValues = [0.05, 0.1, 0.25, 0.5, 1, 2, 5, 10, 20, 30, 40]
deltaLon = mapLonStop - mapLonStart
deltaLat = mapLatStop - mapLatStart
spacingLon = ([i for i in spacingValues if i > (deltaLon / 11.)] + [60])[0]
spacingLat = ([i for i in spacingValues if i > (deltaLat / 11.)] + [60])[0]
# c) parallels and meridians start at the first value in the
# spacingLon/Lat grid that's smaller than the lon/lat of the
# lower left corner; they stop at the first values in the
# grid that's larger than the lon/lat of the upper right corner.
lonStart = np.floor((mapLonStart / spacingLon)) * spacingLon
lonStop = np.ceil((mapLonStop / spacingLon)) * spacingLon
latStart = np.floor((mapLatStart / spacingLat)) * spacingLat
latStop = np.ceil((mapLatStop / spacingLat)) * spacingLat
# d) call the basemap methods to draw the lines in the determined
# range.
self.map_parallels = self.drawparallels(np.arange(latStart, latStop,
spacingLat),
labels=[1, 1, 0, 0], fontsize=font_size, zorder=3)
self.map_meridians = self.drawmeridians(np.arange(lonStart, lonStop,
spacingLon),
labels=[0, 0, 0, 1], fontsize=font_size, zorder=3)
def set_graticule_visible(self, visible=True):
"""
Set the visibily of the graticule.
Removes a currently visible graticule by deleting internally stored
line and text objects representing graticule lines and labels, then
redrawing the map canvas.
See http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg09349.html
"""
self.appearance["draw_graticule"] = visible
if visible and self.map_parallels is None and self.map_meridians is None:
# Draw new graticule if visible is True and no current graticule
# exists.
self._draw_auto_graticule()
# Update the figure canvas.
self.ax.figure.canvas.draw()
elif not visible and self.map_parallels is not None and self.map_meridians is not None:
# If visible if False, remove current graticule if one exists.
# Every item in self.map_parallels and self.map_meridians is
# a tuple of a list of lines and a list of text labels.
for item in self.map_parallels.values():
for line in item[0]:
line.remove()
for text in item[1]:
text.remove()
for item in self.map_meridians.values():
for line in item[0]:
line.remove()
for text in item[1]:
text.remove()
self.map_parallels = None
self.map_meridians = None
# Update the figure canvas.
self.ax.figure.canvas.draw()
def set_draw_airports(self, value, port_type=["small_airport"], reload=True):
"""
Sets airports to visible or not visible
"""
if (reload or not value) and self.airports:
if OURAIRPORTS_NOTICE in self.crs_text.get_text():
self.crs_text.set_text(self.crs_text.get_text().replace(f"{OURAIRPORTS_NOTICE}\n", ""))
self.airports.remove()
self.airtext.remove()
self.airports = None
self.airtext = None
self.ax.figure.canvas.mpl_disconnect(self.airports_event)
if value:
self.draw_airports(port_type)
def set_draw_airspaces(self, value, airspaces=[], range_km=None, reload=True):
"""
Sets airspaces to visible or not visible
"""
if (reload or not value) and self.airspaces:
if OPENAIP_NOTICE in self.crs_text.get_text():
self.crs_text.set_text(self.crs_text.get_text().replace(f"{OPENAIP_NOTICE}\n", ""))
self.airspaces.remove()
self.airspacetext.remove()
self.airspaces = None
self.airspacetext = None
self.ax.figure.canvas.mpl_disconnect(self.airspace_event)
if value:
country_codes = [airspace.split(" ")[-1] for airspace in airspaces]
self.draw_airspaces(country_codes, range_km)
def draw_airspaces(self, countries=[], range_km=None):
"""
Load and draw airspace data
"""
if not self.airspaces:
airspaces = copy.deepcopy(get_airspaces(countries))
if not airspaces:
logging.error("Tried to draw airspaces without .aip files.")
return
for i, airspace in enumerate(airspaces):
airspaces[i]["polygon"] = list(zip(*self.projtran(*list(zip(*airspace["polygon"])))))
map_polygon = Polygon([(self.llcrnrx, self.llcrnry), (self.urcrnrx, self.llcrnry),
(self.urcrnrx, self.urcrnry), (self.llcrnrx, self.urcrnry)])
airspaces = [airspace for airspace in airspaces if
(not range_km or range_km[0] <= airspace["bottom"] <= range_km[1]) and
Polygon(airspace["polygon"]).intersects(map_polygon)]
if not airspaces:
return
if OPENAIP_NOTICE not in self.crs_text.get_text():
self.crs_text.set_text(f"{OPENAIP_NOTICE}\n" + self.crs_text.get_text())
airspaces.sort(key=lambda x: (x["bottom"], x["top"] - x["bottom"]))
max_height = max(airspaces[-1]["bottom"], 0.001)
cmap = get_cmap("Blues")
airspace_colors = [cmap(1 - airspaces[i]["bottom"] / max_height) for i in range(len(airspaces))]
collection = PolyCollection([airspace["polygon"] for airspace in airspaces], alpha=0.5, edgecolor="black",
zorder=5, facecolors=airspace_colors)
collection.set_pickradius(0)
self.airspaces = self.ax.add_collection(collection)
self.airspacetext = self.ax.annotate(airspaces[0]["name"], xy=airspaces[0]["polygon"][0], xycoords="data",
bbox={"boxstyle": "round", "facecolor": "w",
"edgecolor": "0.5", "alpha": 0.9}, zorder=7)
self.airspacetext.set_visible(False)
def update_text(index, xydata):
self.airspacetext.xy = xydata
self.airspacetext.set_position(xydata)
self.airspacetext.set_text("\n".join([f"{airspaces[i]['name']}, {airspaces[i]['bottom']} - "
f"{airspaces[i]['top']}km" for i in index["ind"]]))
highlight_cmap = get_cmap("YlGn")
for i in index["ind"]:
airspace_colors[i] = highlight_cmap(1 - airspaces[i]["bottom"] / max_height)
self.airspaces.set_facecolor(airspace_colors)
for i in index["ind"]:
airspace_colors[i] = cmap(1 - airspaces[i]["bottom"] / max_height)
def on_move(event):
if self.airspaces and event.inaxes == self.ax:
cont, ind = self.airspaces.contains(event)
if cont:
update_text(ind, (event.xdata, event.ydata))
self.airspacetext.set_visible(True)
self.ax.figure.canvas.draw_idle()
elif self.airspacetext.get_visible():
self.airspacetext.set_visible(False)
self.airspaces.set_facecolor(airspace_colors)
self.ax.figure.canvas.draw_idle()
self.airspace_event = self.ax.figure.canvas.mpl_connect('motion_notify_event', on_move)
def draw_airports(self, port_type):
"""
Load and draw airports and their respective name on hover
"""
if not self.airports:
airports = get_airports()
if not airports:
logging.error("Tried to draw airports but none were found. Try redownloading.")
return
lons, lats = self.projtran(*zip(*[(float(airport["longitude_deg"]),
float(airport["latitude_deg"])) for airport in airports]))
for i, airport in enumerate(airports):
airports[i]["longitude_deg"] = lons[i]
airports[i]["latitude_deg"] = lats[i]
airports = [airport for airport in airports if airport["type"] in port_type and
self.llcrnrx <= float(airport["longitude_deg"]) <= self.urcrnrx and
self.llcrnry <= float(airport["latitude_deg"]) <= self.urcrnry]
lons = [float(airport["longitude_deg"]) for airport in airports]
lats = [float(airport["latitude_deg"]) for airport in airports]
annotations = [airport["name"] for airport in airports]
if not airports:
return
if OURAIRPORTS_NOTICE not in self.crs_text.get_text():
self.crs_text.set_text(f"{OURAIRPORTS_NOTICE}\n" + self.crs_text.get_text())
self.airports = self.ax.scatter(lons, lats, marker="o", color="r", linewidth=1, s=9, edgecolor="black",
zorder=6)
self.airports.set_pickradius(1)
self.airtext = self.ax.annotate(annotations[0], xy=(lons[0], lats[0]), xycoords="data",
bbox={"boxstyle": "round", "facecolor": "w",
"edgecolor": "0.5", "alpha": 0.9}, zorder=8)
self.airtext.set_visible(False)
def update_text(index):
pos = self.airports.get_offsets()[index["ind"][0]]
self.airtext.xy = pos
self.airtext.set_position(pos)
self.airtext.set_text("\n".join([annotations[i] for i in index["ind"]]))
def on_move(event):
if self.airports and event.inaxes == self.ax:
cont, ind = self.airports.contains(event)
if cont:
update_text(ind)
self.airtext.set_visible(True)
self.ax.figure.canvas.draw_idle()
elif self.airtext.get_visible():
self.airtext.set_visible(False)
self.ax.figure.canvas.draw_idle()
self.airports_event = self.ax.figure.canvas.mpl_connect('motion_notify_event', on_move)
def set_fillcontinents_visible(self, visible=True, land_color=None,
lake_color=None):
"""
Set the visibility of continent fillings.
"""
if land_color is not None:
self.appearance["colour_land"] = land_color
if lake_color is not None:
self.appearance["colour_water"] = lake_color
self.appearance["fill_continents"] = visible
if visible and self.map_continents is None:
# zorder = 0 is necessary to paint over the filled continents with
# scatter() for drawing the flight tracks and trajectories.
# Curiously, plot() works fine without this setting, but scatter()
# doesn't.
self.map_continents = self.fillcontinents(color=self.appearance["colour_land"],
lake_color=self.appearance["colour_water"],
zorder=1)
self.ax.figure.canvas.draw()
elif not visible and self.map_continents is not None:
# Remove current fills. They are stored as a list of polygon patches
# in self.map_continents.
for patch in self.map_continents:
patch.remove()
self.map_continents = None
self.ax.figure.canvas.draw()
elif visible and self.map_continents is not None:
# Colours have changed: Remove the old fill and redraw.
for patch in self.map_continents:
patch.remove()
self.map_continents = self.fillcontinents(color=self.appearance["colour_land"],
lake_color=self.appearance["colour_water"],
zorder=1)
self.ax.figure.canvas.draw()
def set_coastlines_visible(self, visible=True):
"""
Set the visibility of coastlines and country borders.
"""
self.appearance["draw_coastlines"] = visible
if visible and self.map_coastlines is None and self.map_countries is None:
self.map_coastlines = self.drawcoastlines(zorder=3)
self.map_countries = self.drawcountries(zorder=3)
self.ax.figure.canvas.draw()
elif not visible and self.map_coastlines is not None and self.map_countries is not None:
self.map_coastlines.remove()
self.map_countries.remove()
del self.cntrysegs
self.map_coastlines = None
self.map_countries = None
self.ax.figure.canvas.draw()
def set_mapboundary_visible(self, visible=True, bg_color='#99ffff'):
"""
"""
# TODO: This doesn't work. Removing the map background only makes sense
# if there's a second image underneath this map. Maybe we should work
# with alpha values instead.
self.appearance["fill_waterbodies"] = visible
self.appearance["colour_water"] = bg_color
if not visible and self.map_boundary is not None:
try:
self.map_boundary.remove()
except NotImplementedError as ex:
logging.debug("%s", ex)
self.map_boundary = None
self.ax.figure.canvas.draw()
elif visible:
self.map_boundary = self.drawmapboundary(fill_color=bg_color)
self.ax.figure.canvas.draw()
def update_with_coordinate_change(self, kwargs_update=None):
"""
Redraws the entire map. This is necessary after zoom/pan operations.
Determines corner coordinates of the current axes, removes all items
belonging the the current map and draws a new one by calling
self.__init__().
DRAWBACK of this approach is that the map coordinate system changes, as
basemap always takes the lower left axis corner as (0,0). This means
that all other objects on the matplotlib canvas (flight track, markers,
..) will be incorrectly placed after a redraw. Their coordinates need
to be adjusted by 1) transforming their coordinates to lat/lon BEFORE
the map is redrawn, 2) redrawing the map, 3) transforming the stored
lat/lon coordinates to the new map coordinates.
"""
# Convert the current axis corners to lat/lon coordinates.
axis = self.ax.axis()
self.kwargs['llcrnrlon'], self.kwargs['llcrnrlat'] = \
self.__call__(axis[0], axis[2], inverse=True)
self.kwargs['urcrnrlon'], self.kwargs['urcrnrlat'] = \
self.__call__(axis[1], axis[3], inverse=True)
logging.debug("corner coordinates (lat/lon): ll(%.2f,%.2f), ur(%.2f,%.2f)",
self.kwargs['llcrnrlat'], self.kwargs['llcrnrlon'],
self.kwargs['urcrnrlat'], self.kwargs['urcrnrlon'])
if (self.kwargs.get("projection") in ["cyl"] or
self.kwargs.get("epsg") in ["4326"]):
# Latitudes in cylindrical projection need to be within -90..90.
self.kwargs['llcrnrlat'] = max(self.kwargs['llcrnrlat'], -90)
self.kwargs['urcrnrlat'] = max(self.kwargs['urcrnrlat'], -89)
self.kwargs['llcrnrlat'] = min(self.kwargs['llcrnrlat'], 89)
self.kwargs['urcrnrlat'] = min(self.kwargs['urcrnrlat'], 90)
# Longitudes in cylindrical projection need to be within -360..540.
self.kwargs["llcrnrlon"] = max(self.kwargs["llcrnrlon"], -360)
self.kwargs["urcrnrlon"] = max(self.kwargs["urcrnrlon"], -359)
self.kwargs["llcrnrlon"] = min(self.kwargs["llcrnrlon"], 539)
self.kwargs["urcrnrlon"] = min(self.kwargs["urcrnrlon"], 540)
# Remove the current map artists.
grat_vis = self.appearance["draw_graticule"]
self.set_graticule_visible(False)
self.appearance["draw_graticule"] = grat_vis
if self.map_coastlines is not None and (len(self.coastsegs) > 0 and len(self.coastsegs[0]) > 0):
self.map_coastlines.remove()
if self.image is not None:
self.image.remove()
self.image = None
# Refer to Basemap.drawcountries() on how to remove country borders.
# In addition to the matplotlib lines, the loaded country segment data
# needs to be loaded. THE SAME NEEDS TO BE DONE WITH RIVERS ETC.
if self.map_countries is not None:
self.map_countries.remove()
del self.cntrysegs
# map_boundary is None for rectangular projections (basemap simply sets
# the background colour).
if self.map_boundary is not None:
try:
self.map_boundary.remove()
except NotImplementedError as ex:
logging.debug("%s", ex)
self.map_boundary = None
cont_vis = self.appearance["fill_continents"]
self.set_fillcontinents_visible(False)
self.appearance["fill_continents"] = cont_vis
# POSSIBILITY A): Call self.__init__ again with stored keywords.
# Update kwargs if new parameters such as the map region have been
# given.
if kwargs_update:
proj_keys = ["epsg", "projection"]
if any(_x in kwargs_update for _x in proj_keys):
for key in (_x for _x in proj_keys if _x in self.kwargs):
del self.kwargs[key]
self.kwargs.update(kwargs_update)
self.__init__(**self.kwargs)
# TODO: HOW TO MAKE THIS MORE EFFICIENT.
# POSSIBILITY B): Only set the Basemap parameters that influence the
# plot (corner lat/lon, x/y, width/height, ..) and re-define the
# polygons that represent the coastlines etc. In Basemap, they are
# defined in __init__(), at the very bottom (the code that comes
# with the comments
# >> read in coastline polygons, only keeping those that
# >> intersect map boundary polygon.
# ). Basemap only loads the coastline data that is actually displayed.
# If we only change llcrnrlon etc. here and replot coastlines etc.,
# the polygons and the map extent will remain the same.
# However, it should be possible to make a map change WITHOUT changing
# coordinates.
#
# self.llcrnrlon = llcrnrlon
# self.llcrnrlat = llcrnrlat
# self.urcrnrlon = urcrnrlon
# self.urcrnrlat = urcrnrlat
# self.llcrnrx = axis[0]
# self.llcrnry = axis[2]
# self.urcrnrx = axis[1]
# self.urcrnry = axis[3]
def imshow(self, X, **kwargs):
"""
Overloads basemap.imshow(). Deletes any existing image and
redraws the figure after the new image has been plotted.
"""
if self.image is not None:
self.image.remove()
self.image = super(MapCanvas, self).imshow(X, zorder=2, **kwargs)
self.ax.figure.canvas.draw()
return self.image
def gcpoints2(self, lon0, lat0, lon1, lat1, del_s=100., map_coords=True):
"""
The same as basemap.gcpoints(), but takes a distance interval del_s
to space the points instead of a number of points.
"""
# use great circle formula for a perfect sphere.
_, _, dist = self.gc.inv(lon0, lat0, lon1, lat1)
npoints = int((dist + 0.5 * 1000. * del_s) / (1000. * del_s))
lonlats = self.gc.npts(lon0, lat0, lon1, lat1, npoints)
lons = [lon0]
lats = [lat0]
for lon, lat in lonlats:
lons.append(lon)
lats.append(lat)
lons.append(lon1)
lats.append(lat1)
if map_coords:
x, y = self(lons, lats)
else:
x, y = (lons, lats)
return x, y
def gcpoints_path(self, lons, lats, del_s=100., map_coords=True):
"""
Same as gcpoints2, but for an entire path, i.e. multiple
line segments. lons and lats are lists of waypoint coordinates.
"""
# use great circle formula for a perfect sphere.
assert len(lons) == len(lats)
assert len(lons) > 1
gclons = [lons[0]]
gclats = [lats[0]]
for i in range(len(lons) - 1):
_, _, dist = self.gc.inv(lons[i], lats[i], lons[i + 1], lats[i + 1])
npoints = int((dist + 0.5 * 1000. * del_s) / (1000. * del_s))
lonlats = []
if npoints > 0:
lonlats = self.gc.npts(lons[i], lats[i], lons[i + 1], lats[i + 1], npoints)
for lon, lat in lonlats:
gclons.append(lon)
gclats.append(lat)
gclons.append(lons[i + 1])
gclats.append(lats[i + 1])
if self.projection == "cyl": # hack for wraparound
lon_min, lon_max = self.llcrnrlon, self.urcrnrlon
gclons = np.array(gclons)
gclons[gclons < lon_min] += 360
gclons[gclons > lon_max] -= 360
idcs = np.where(abs(np.diff(gclons)) > 300)[0]
gclons[idcs] = np.nan
if map_coords:
x, y = self(gclons, gclats)
else:
x, y = (gclons, gclats)
return x, y
def drawgreatcircle_path(self, lons, lats, del_s=100., **kwargs):
"""
"""
x, y = self.gcpoints_path(lons, lats, del_s=del_s)
return self.plot(x, y, **kwargs)
class SatelliteOverpassPatch(object):
"""
Represents a satellite overpass on the top view map (satellite
track and, if available, swath).
"""
# TODO: Derive this class from some Matplotlib actor class? Or create
# a new abstract base class for objects that can be drawn on the
# map -- they all should provide methods remove(), update(),
# etc. update() should automatically remap the object to new map
# coordinates.
def __init__(self, mapcanvas, segment):
"""
"""
self.map = mapcanvas
self.segment = segment
# Filter those time elements that correspond to masked positions -- this
# way the indexes in self.utc correspond to those in self.sat.
# np.ma.getmaskarray is necessary as ..mask only returns a scalar
# "False" if the array contains no masked entries.
self.utc = segment["utc"][~np.ma.getmaskarray(segment["satpos"])[:, 0]]
self.sat = np.ma.compress_rows(segment["satpos"])
self.sw_l = np.ma.compress_rows(segment["swath_left"])
self.sw_r = np.ma.compress_rows(segment["swath_right"])
self.trackline = None
self.patch = None
self.texts = []
self.draw()
def draw(self):
"""
Do the actual plotting of the patch.
"""
# Plot satellite track.
sat = np.copy(self.sat)
sat[:, 0], sat[:, 1] = self.map(sat[:, 0], sat[:, 1])
self.trackline = self.map.plot(sat[:, 0], sat[:, 1], zorder=10,
marker='+', markerfacecolor='g')
# Plot polygon patch that represents the swath of the sensor.
sw_l = self.sw_l
sw_r = self.sw_r
Path = mpath.Path
pathdata = [(Path.MOVETO, self.map(sw_l[0, 0], sw_l[0, 1]))]
for point in sw_l[1:]:
pathdata.append((Path.LINETO, self.map(point[0], point[1])))
for point in sw_r[::-1]:
pathdata.append((Path.LINETO, self.map(point[0], point[1])))
codes, verts = list(zip(*pathdata))
path = mpl_pi.PathH(verts, codes, map=self.map)
patch = mpatches.PathPatch(path, facecolor='yellow',
edgecolor='yellow', alpha=0.4, zorder=10)
self.patch = patch
self.map.ax.add_patch(patch)
# Draw text labels.
self.texts.append(self.map.ax.text(sat[0, 0], sat[0, 1],
self.utc[0].strftime("%H:%M:%S"),
zorder=10,
bbox=dict(facecolor='white',
alpha=0.5,
edgecolor='none')))
self.texts.append(self.map.ax.text(sat[-1, 0], sat[-1, 1],
self.utc[-1].strftime("%H:%M:%S"),
zorder=10,
bbox=dict(facecolor='white',
alpha=0.5,
edgecolor='none')))
self.map.ax.figure.canvas.draw()
def update(self):
"""
Removes the current plot of the patch and redraws the patch.
This is necessary, for instance, when the map projection and/or
extent has been changed.
"""
self.remove()
self.draw()
def remove(self):
"""
Remove this satellite patch from the map canvas.
"""
if self.trackline is not None:
for element in self.trackline:
element.remove()
self.trackline = None
if self.patch is not None:
self.patch.remove()
self.patch = None
for element in self.texts:
# Removal of text elements sometimes fails. I don't know why,
# the plots look fine nevertheless.
try:
element.remove()
except Exception as ex:
logging.error("Wildcard exception caught: %s %s", type(ex), ex)
|
def encode(valeur,base):
""" int*int -->String
hyp valeur >=0
hypothèse : base maxi = 16
"""
chaine=""
if valeur>255 or valeur<0 :
return ""
for n in range (1,9) :
calcul = valeur % base
if (calcul)>9:
if calcul==10:
bit='A'
if calcul==11:
bit='B'
if calcul==12:
bit='C'
if calcul==13:
bit='D'
if calcul==14:
bit='E'
if calcul==15:
bit='F'
else :
bit=calcul
chaine =str(bit)+chaine
valeur = valeur // base
n+=1
return (chaine)
def encode2(valeur,base):
""" float*int -->String avec 16 décimales
hypothèse : base maxi = 16
"""
chaine=""
for n in range (1,17) :
valeur=valeur*base
calcul = int(valeur)
if (calcul)>9:
if calcul==10:
bit='A'
if calcul==11:
bit='B'
if calcul==12:
bit='C'
if calcul==13:
bit='D'
if calcul==14:
bit='E'
if calcul==15:
bit='F'
else :
bit=calcul
chaine =chaine+str(bit)
valeur = valeur - int(valeur)
return (chaine)
def conv_10_Vers_B(nombre,base):
"""float*int-->String
convertit de 10 vers b
hyp b max = 16"""
chaine=""
#gestion de la partie entière
entier=int(nombre)
chaine=encode(entier,base)
#gestion de la partie décimale
chaine=chaine+"."
decimal=nombre-entier
chaine=chaine+encode2(decimal,base)
return (chaine)
print(conv_10_Vers_B(16.625,2))
print(conv_10_Vers_B(63.734375,16))
print(conv_10_Vers_B(231.70314025878906,4))
print(conv_10_Vers_B(.1,2))
print(conv_10_Vers_B(.2,2))
print(conv_10_Vers_B(.3,2))
print("------------------------------------------------")
def decode2(chaine,base):
""" String * int --> float
hypothèse : chaine est constitué de 'bits' allant de 0 à base-1
retourne la valeur dans la base 10 de chaine exprimé en base base"""
valeur = 0
i=1
for elt in chaine :
if elt == 'A':
valeur = valeur + 10/base**i
elif elt == 'B':
valeur = valeur + 11/base**i
elif elt == 'C':
valeur = valeur + 12/base**i
elif elt == 'D':
valeur = valeur + 13/base**i
elif elt == 'E':
valeur = valeur + 14/base**i
elif elt == 'F':
valeur = valeur + 15/base**i
else :
valeur = valeur + int(elt)/base**i
i=i+1
return (valeur)
def decode(chaine,base):
""" String * int --> float
hypothèse : chaine est constitué de 8 caractères
retourne la valeur dans la base 10 de chaine exprimé en base base"""
valeur = 0
i=1
for elt in chaine :
if elt=='A':
valeur = valeur + 10*base**(8-i)
elif elt == 'B':
valeur = valeur + 11*base**(8-i)
elif elt== 'C':
valeur = valeur + 12*base**(8-i)
elif elt== 'D':
valeur = valeur + 13*base**(8-i)
elif elt == 'E':
valeur = valeur + 14*base**(8-i)
elif elt == 'F':
valeur = valeur + 15*base**(8-i)
else :
valeur = valeur + int(elt)*base**(8-i)
i=i+1
return (valeur)
def conv_B_Vers_10(chaine,base):
"""String * int--> float
convertit de b vers 10"""
#decoupage en 2 parties de la chaine séparateur .
L = chaine.split(".")
chaine1=L[0]
chaine2=L[1]
#gestion de la partie entière
valeur = decode(chaine1,base)
#gestion de la partie décimale
valeur = valeur + decode2(chaine2,base)
return (valeur)
print("00003213.23100001",conv_B_Vers_10("00003213.23100001",4))
print(conv_10_Vers_B(11.11,2))
print( conv_B_Vers_10("0000000B.1C28F5C28F5C0000",2) )
|
__author__ = 'Alex Hlukhov'
# -*- coding: utf-8 -*-
class Group(object):
def __init__(self, name, header, footer):
self.name = name
self.header = header
self.footer = footer
|
#!/usr/bin/python3
from jjcli import *
c=clfilter(opt="a")
i=j=w=r=0
ocr = {}
#for line in c.input():
for par in c.paragraph():
#all = findall(r"[a-zA-Z]+", par)
for w in findall(r"\w+", par):
ocr[w] =1 if w not in ocr else ocr[w] + 1
#print(all)
def dict_word(d):
for k,v in sorted(d.items(), key = lambda x: x[0]):
print(f"{k}: {v}")
def dict_num(d):
for k,v in sorted(d.items(), key = lambda x: x[1]):
print(f"{k}: {v}")
if("-a" in c.opt):
dict_word(ocr)
else:
dict_num(ocr)
|
# type: ignore
import unittest
from evilunit import test_target
@test_target("prestring:LazyArguments")
class LazyArgumentsTests(unittest.TestCase):
def test_it(self):
target = self._makeOne([1, 2, 3])
self.assertEqual(str(target), "1, 2, 3")
def test_modified_after_rendeing__no_changed(self):
target = self._makeOne([1, 2, 3])
self.assertEqual(str(target), "1, 2, 3")
target.args.append(4)
self.assertEqual(str(target), "1, 2, 3")
def test_modified_before_rendeing__changed(self):
target = self._makeOne([1, 2, 3])
target.args.append(4)
self.assertEqual(str(target), "1, 2, 3, 4")
def test_with_types(self):
target = self._makeOne(["x", "y"], types={"x": "int"})
self.assertEqual(str(target), "x: int, y")
def test_with_actual_types(self):
target = self._makeOne(["x", "y", "*"], types={"x": int, "y": bool})
self.assertEqual(str(target), "x: int, y: bool, *")
def test_with_actual_types2(self):
import typing as t
target = self._makeOne(
["x", "y", "z"],
types={
"x": int,
"y": t.Optional[int],
"z": t.Sequence[t.Optional[int]],
},
)
# TODO: fix, this is work-around
import sys
if (3, 9) > sys.version_info:
self.assertEqual(
str(target),
"x: int, y: 'typing.Union[int, NoneType]', z: 'typing.Sequence[typing.Union[int, NoneType]]'",
)
else:
self.assertEqual(
str(target),
"x: int, y: 'typing.Optional[int]', z: 'typing.Sequence[typing.Optional[int]]'",
)
@test_target("prestring:LazyKeywords")
class LazyKeywordsTests(unittest.TestCase):
def assert_unordered(self, xs, ys):
self.assertEqual(tuple(sorted(xs.split(", "))), tuple(sorted(ys.split(", "))))
def test_it(self):
target = self._makeOne({"x": 1, "y": 2, "z": 3})
self.assert_unordered(str(target), "x=1, y=2, z=3")
def test_modified_after_rendeing__no_changed(self):
target = self._makeOne({"x": 1, "y": 2, "z": 3})
self.assert_unordered(str(target), "x=1, y=2, z=3")
target.kwargs["a"] = "b"
self.assert_unordered(str(target), "x=1, y=2, z=3")
def test_modified_before_rendeing__changed(self):
target = self._makeOne({"x": 1, "y": 2, "z": 3})
target.kwargs["a"] = "b"
self.assert_unordered(str(target), "x=1, y=2, z=3, a=b")
def test_with_types(self):
target = self._makeOne({"x": 1, "y": 2, "z": 3}, types={"x": int, "z": int})
self.assert_unordered(str(target), "x: int = 1, y=2, z: int = 3")
@test_target("prestring:LazyFormat")
class LazyFormatTests(unittest.TestCase):
def test_it(self):
fmt = "{}:{}"
args = ("foo", "bar")
self.assertEqual(str(self._makeOne(fmt, *args)), fmt.format(*args))
def test_it2(self):
fmt = "{x}:{y}"
x, y = "foo", "bar"
target = self._makeOne(fmt, x=x, y=y)
self.assertEqual(str(target), fmt.format(x=x, y=y))
def test_modified_after_rendeing__no_changed(self):
fmt = "{x}:{y}"
x, y = "foo", "bar"
target = self._makeOne(fmt, x=x, y=y)
self.assertEqual(str(target), fmt.format(x=x, y=y))
target.fmt = "{x}:{z}"
target.kwargs["z"] = "boo"
self.assertEqual(str(target), fmt.format(x=x, y=y))
def test_modified_before_rendeing__changed(self):
fmt = "{x}:{y}"
x, y = "foo", "bar"
target = self._makeOne(fmt, x=x, y=y)
target.fmt = fmt2 = "{x}:{z}"
target.kwargs["z"] = "boo"
self.assertEqual(str(target), fmt2.format(x=x, z="boo"))
class MixedTests(unittest.TestCase):
def test_it(self):
from prestring import LazyFormat, LazyArgumentsAndKeywords
args = LazyArgumentsAndKeywords([1, 2, 3], {"x": 1})
target = LazyFormat("{fnname}({args})", fnname="foo", args=args)
self.assertEqual(str(target), "foo(1, 2, 3, x=1)")
def test_it_empty(self):
from prestring import LazyFormat, LazyArgumentsAndKeywords
args = LazyArgumentsAndKeywords([], {})
target = LazyFormat("{fnname}({args})", fnname="foo", args=args)
self.assertEqual(str(target), "foo()")
def test_it_empty2(self):
from prestring import LazyFormat, LazyArgumentsAndKeywords
args = LazyArgumentsAndKeywords()
target = LazyFormat("{fnname}({args})", fnname="foo", args=args)
self.assertEqual(str(target), "foo()")
def test_it_empty_kwargs(self):
from prestring import LazyFormat, LazyArgumentsAndKeywords
args = LazyArgumentsAndKeywords([1])
target = LazyFormat("{fnname}({args})", fnname="foo", args=args)
self.assertEqual(str(target), "foo(1)")
def test_it_empty_args(self):
from prestring import LazyFormat, LazyArgumentsAndKeywords
args = LazyArgumentsAndKeywords(kwargs={"x": 1})
target = LazyFormat("{fnname}({args})", fnname="foo", args=args)
self.assertEqual(str(target), "foo(x=1)")
|
import os
import json
import numpy as np
import pandas as pd
import random
import unittest
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from .. import ErrorAnalyzer
from .. import compute_accuracy_score, balanced_accuracy_score, compute_primary_model_accuracy, compute_confidence_decision
default_seed = 10
np.random.seed(default_seed)
random.seed(default_seed)
DATASET_URL = 'https://www.openml.org/data/get_csv/54002/adult-census.arff'
df = pd.read_csv(DATASET_URL)
target = 'class'
script_dir = os.path.dirname(__file__)
reference_result_file_path = os.path.join(script_dir, 'reference_data.json')
with open(reference_result_file_path) as f:
reference_data = json.load(f)
numeric_features = df.select_dtypes(include=['int64', 'float64']).columns.tolist()
categorical_features = df.select_dtypes(include=['object']).drop([target], axis=1).columns.tolist()
X_numerical = df.dropna().drop(target, axis=1)[numeric_features]
X_all = df.dropna().drop(target, axis=1)
y = df.dropna()[target]
class TestErrorAnalyzer(unittest.TestCase):
def test_with_only_scikit_model(self):
X_train, X_test, y_train, y_test = train_test_split(X_numerical, y, test_size=0.2)
rf = RandomForestClassifier(n_estimators=10)
rf.fit(X_train, y_train)
error_tree = ErrorAnalyzer(rf, feature_names=numeric_features)
error_tree.fit(X_test, y_test)
y_true, _ = error_tree._compute_primary_model_error(X_test.values, y_test)
y_pred = error_tree._error_tree.estimator_.predict(X_test.values)
error_tree_accuracy_score = compute_accuracy_score(y_true, y_pred)
error_tree_balanced_accuracy = balanced_accuracy_score(y_true, y_pred)
primary_model_predicted_accuracy = compute_primary_model_accuracy(y_pred)
primary_model_true_accuracy = compute_primary_model_accuracy(y_true)
fidelity, confidence_decision = compute_confidence_decision(primary_model_true_accuracy,
primary_model_predicted_accuracy)
metric_to_check = {
'error_tree_accuracy_score': error_tree_accuracy_score,
'error_tree_balanced_accuracy': error_tree_balanced_accuracy,
'primary_model_predicted_accuracy': primary_model_predicted_accuracy,
'primary_model_true_accuracy': primary_model_true_accuracy,
'fidelity': fidelity
}
reference_data_for_single_estimator = reference_data.get('single_estimator')
metric_reference = reference_data_for_single_estimator.get('metric_reference')
for metric_name, metric_value in metric_to_check.items():
self.assertAlmostEqual(metric_value, metric_reference[metric_name], 5)
leaf_summary_str = error_tree.get_error_leaf_summary(leaf_selector=98, output_format='str')
leaf_summary_dct = error_tree.get_error_leaf_summary(leaf_selector=98, output_format='dict')
single_leaf_summary_reference_dict = reference_data_for_single_estimator.get('single_leaf_summary_reference')
self.assertListEqual(leaf_summary_str, single_leaf_summary_reference_dict['str'])
self.assertListEqual(leaf_summary_dct, single_leaf_summary_reference_dict['dct'])
all_leaves_summary_str = error_tree.get_error_leaf_summary(output_format='str')
all_leaves_summary_dct = error_tree.get_error_leaf_summary(output_format='dict')
all_leaves_summary_reference_dict = reference_data_for_single_estimator.get('all_leaves_summary_reference')
self.assertListEqual(all_leaves_summary_str, all_leaves_summary_reference_dict['str'])
self.assertListEqual(all_leaves_summary_dct, all_leaves_summary_reference_dict['dct'])
evaluate_str = error_tree.evaluate(X_test, y_test, output_format='str')
evaluate_dct = error_tree.evaluate(X_test, y_test, output_format='dict')
evaluate_reference_dict = reference_data_for_single_estimator.get('evaluate_reference')
self.assertEqual(evaluate_str, evaluate_reference_dict['str'])
self.assertDictEqual(evaluate_dct, evaluate_reference_dict['dct'])
def test_with_scikit_pipeline(self):
X_train, X_test, y_train, y_test = train_test_split(X_all, y, test_size=0.2)
numeric_transformer = Pipeline(steps=[
('SimpleImputer', SimpleImputer(strategy='median', add_indicator=True)),
('StandardScaler', StandardScaler()),
])
categorical_transformer = Pipeline(steps=[
('OneHotEncoder', OneHotEncoder(handle_unknown='ignore')),
])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)
])
rf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', RandomForestClassifier(n_estimators=10))])
rf.fit(X_train, y_train)
error_tree = ErrorAnalyzer(rf)
error_tree.fit(X_test, y_test)
X_test_prep, y_test_prep = error_tree.pipeline_preprocessor.transform(X_test), np.array(y_test)
y_true, _ = error_tree._compute_primary_model_error(X_test_prep, y_test_prep)
y_pred = error_tree._error_tree.estimator_.predict(X_test_prep)
error_tree_accuracy_score = compute_accuracy_score(y_true, y_pred)
error_tree_balanced_accuracy = balanced_accuracy_score(y_true, y_pred)
primary_model_predicted_accuracy = compute_primary_model_accuracy(y_pred)
primary_model_true_accuracy = compute_primary_model_accuracy(y_true)
fidelity, confidence_decision = compute_confidence_decision(primary_model_true_accuracy,
primary_model_predicted_accuracy)
metric_to_check = {
'error_tree_accuracy_score': error_tree_accuracy_score,
'error_tree_balanced_accuracy': error_tree_balanced_accuracy,
'primary_model_predicted_accuracy': primary_model_predicted_accuracy,
'primary_model_true_accuracy': primary_model_true_accuracy,
'fidelity': fidelity
}
reference_data_for_pipeline = reference_data.get('pipeline')
metric_reference = reference_data_for_pipeline.get('metric_reference')
for metric_name, metric_value in metric_to_check.items():
self.assertAlmostEqual(metric_value, metric_reference[metric_name], 5)
_ = error_tree.get_error_leaf_summary(leaf_selector=98, add_path_to_leaves=True, output_format='str')
leaf_summary_str = error_tree.get_error_leaf_summary(leaf_selector=98, output_format='str')
leaf_summary_dct = error_tree.get_error_leaf_summary(leaf_selector=98, output_format='dict')
single_leaf_summary_reference_dict = reference_data_for_pipeline.get('single_leaf_summary_reference')
self.assertListEqual(leaf_summary_str, single_leaf_summary_reference_dict['str'])
self.assertListEqual(leaf_summary_dct, single_leaf_summary_reference_dict['dct'])
all_leaves_summary_str = error_tree.get_error_leaf_summary(output_format='str')
all_leaves_summary_dct = error_tree.get_error_leaf_summary(output_format='dict')
all_leaves_summary_reference_dict = reference_data_for_pipeline.get('all_leaves_summary_reference')
self.assertListEqual(all_leaves_summary_str, all_leaves_summary_reference_dict['str'])
self.assertListEqual(all_leaves_summary_dct, all_leaves_summary_reference_dict['dct'])
evaluate_str = error_tree.evaluate(X_test, y_test, output_format='str')
evaluate_dct = error_tree.evaluate(X_test, y_test, output_format='dict')
evaluate_reference_dict = reference_data_for_pipeline.get('evaluate_reference')
self.assertEqual(evaluate_str, evaluate_reference_dict['str'])
self.assertDictEqual(evaluate_dct, evaluate_reference_dict['dct'])
|
## MapGenerator
##
## Shortcut to regenerate the map and start/stop HOF's MapFinder utility.
##
## Shortcuts:
##
## ALT + G doRegenerate()
## ALT + CTRL + G doStart()
## ALT + CTRL + SHIFT + G doStop()
##
## Adapted from HOF Mod 3.13.001.
##
## Notes
## - May be initialized externally by calling init()
##
## Copyright (c) 2009 The BUG Mod.
##
## Author: HOF Team, EmperorFool
from CvPythonExtensions import *
import AutoSave
import BugCore
import BugDll
import BugUtil
import MapFinderStatusScreen
import os.path
MINIMUM_SAVE_DELAY = 2.0
gc = CyGlobalContext()
options = BugCore.game.MapFinder
# Initialization
def init(minimumSaveDelay=0.0):
"""
Allows config XML to set the minimum delay.
"""
global MINIMUM_SAVE_DELAY
MINIMUM_SAVE_DELAY = minimumSaveDelay
# Regenerate Map
def doRegenerate(argsList=None):
try:
regenerate()
except MapFinderError, e:
e.display()
def canRegenerate():
enforceDll()
if gc.getGame().canRegenerateMap():
return True
else:
raise MapFinderError("TXT_KEY_MAPFINDER_CANNOT_REGENERATE")
def regenerate():
if canRegenerate():
if CyInterface().getShowInterface() != InterfaceVisibility.INTERFACE_SHOW:
CyInterface().setShowInterface(InterfaceVisibility.INTERFACE_SHOW)
BugUtil.alert(BugUtil.getPlainText("TXT_KEY_MAPFINDER_REGNERATING"))
# must defer to allow alert to appear
BugUtil.deferCall(regenerateForReal)
def regenerateForReal():
if not gc.getGame().regenerateMap():
raise MapFinderError("TXT_KEY_MAPFINDER_REGENERATE_FAILED")
# must defer to allow screen to update before moving camera
BugUtil.deferCall(centerCameraOnPlayer)
def centerCameraOnPlayer():
cam = CyCamera()
eSpeed = cam.GetCameraMovementSpeed()
cam.SetCameraMovementSpeed(CameraMovementSpeeds.CAMERAMOVEMENTSPEED_FAST)
plot = gc.getActivePlayer().getStartingPlot()
cam.JustLookAtPlot(plot)
cam.SetCameraMovementSpeed(eSpeed)
# Regeneration Loop
(
NO_FEATURE,
FEATURE_ICE,
FEATURE_JUNGLE,
FEATURE_OASIS,
FEATURE_FLOOD_PLAINS,
FEATURE_FOREST,
FEATURE_FALLOUT,
) = range(-1, 6)
FEATURE_LAKE = 99
(
NO_TERRAIN,
TERRAIN_GRASS,
TERRAIN_PLAINS,
TERRAIN_DESERT,
TERRAIN_TUNDRA,
TERRAIN_SNOW,
TERRAIN_COAST,
TERRAIN_OCEAN,
TERRAIN_PEAK,
TERRAIN_HILL,
) = range(-1, 9)
CODES_BY_TYPES = { # BasicPlot_CodeToTypes
('water', TERRAIN_OCEAN, NO_FEATURE) : 401,
('water', TERRAIN_COAST, FEATURE_ICE) : 402,
('land', TERRAIN_DESERT, NO_FEATURE) : 403,
('hills', TERRAIN_DESERT, NO_FEATURE) : 404,
('land', TERRAIN_DESERT, FEATURE_FLOOD_PLAINS) : 405,
('land', TERRAIN_GRASS, NO_FEATURE) : 406,
('land', TERRAIN_GRASS, FEATURE_FOREST) : 407,
('hills', TERRAIN_GRASS, NO_FEATURE) : 408,
('hills', TERRAIN_GRASS, FEATURE_FOREST) : 409,
('hills', TERRAIN_GRASS, FEATURE_JUNGLE) : 410,
('land', TERRAIN_GRASS, FEATURE_JUNGLE) : 411,
('land', TERRAIN_DESERT, FEATURE_OASIS) : 412,
('water', TERRAIN_OCEAN, FEATURE_ICE) : 413,
('peak', TERRAIN_PEAK, NO_FEATURE) : 414,
('land', TERRAIN_PLAINS, NO_FEATURE) : 415,
('land', TERRAIN_PLAINS, FEATURE_FOREST) : 416,
('hills', TERRAIN_PLAINS, NO_FEATURE) : 417,
('hills', TERRAIN_PLAINS, FEATURE_FOREST) : 418,
('water', TERRAIN_COAST, NO_FEATURE) : 419,
('land', TERRAIN_SNOW, NO_FEATURE) : 420,
('land', TERRAIN_SNOW, FEATURE_FOREST) : 421,
('hills', TERRAIN_SNOW, NO_FEATURE) : 422,
('hills', TERRAIN_SNOW, FEATURE_FOREST) : 423,
('land', TERRAIN_TUNDRA, NO_FEATURE) : 424,
('land', TERRAIN_TUNDRA, FEATURE_FOREST) : 425,
('hills', TERRAIN_TUNDRA, NO_FEATURE) : 426,
('hills', TERRAIN_TUNDRA, FEATURE_FOREST) : 427,
('water', TERRAIN_COAST, FEATURE_LAKE) : 428,
}
# unused
TYPES_BY_CODE = { # BasicPlot_TypesToCode
401 : ('water', TERRAIN_OCEAN, NO_FEATURE),
402 : ('water', TERRAIN_COAST, FEATURE_ICE),
403 : ('land', TERRAIN_DESERT, NO_FEATURE),
404 : ('hills', TERRAIN_DESERT, NO_FEATURE),
405 : ('land', TERRAIN_DESERT, FEATURE_FLOOD_PLAINS),
406 : ('land', TERRAIN_GRASS, NO_FEATURE),
407 : ('land', TERRAIN_GRASS, FEATURE_FOREST),
408 : ('hills', TERRAIN_GRASS, NO_FEATURE),
409 : ('hills', TERRAIN_GRASS, FEATURE_FOREST),
410 : ('hills', TERRAIN_GRASS, FEATURE_JUNGLE),
411 : ('land', TERRAIN_GRASS, FEATURE_JUNGLE),
412 : ('land', TERRAIN_DESERT, FEATURE_OASIS),
413 : ('water', TERRAIN_OCEAN, FEATURE_ICE),
414 : ('peak', TERRAIN_PEAK, NO_FEATURE),
415 : ('land', TERRAIN_PLAINS, NO_FEATURE),
416 : ('land', TERRAIN_PLAINS, FEATURE_FOREST),
417 : ('hills', TERRAIN_PLAINS, NO_FEATURE),
418 : ('hills', TERRAIN_PLAINS, FEATURE_FOREST),
419 : ('water', TERRAIN_COAST, NO_FEATURE),
420 : ('land', TERRAIN_SNOW, NO_FEATURE),
421 : ('land', TERRAIN_SNOW, FEATURE_FOREST),
422 : ('hills', TERRAIN_SNOW, NO_FEATURE),
423 : ('hills', TERRAIN_SNOW, FEATURE_FOREST),
424 : ('land', TERRAIN_TUNDRA, NO_FEATURE),
425 : ('land', TERRAIN_TUNDRA, FEATURE_FOREST),
426 : ('hills', TERRAIN_TUNDRA, NO_FEATURE),
427 : ('hills', TERRAIN_TUNDRA, FEATURE_FOREST),
428 : ('water', TERRAIN_COAST, FEATURE_LAKE),
}
bActive = False
savedInterfaceMode = None
iRegenCount = 0
iSavedCount = 0
def isActive():
return bActive
def doStart(argsList=None):
try:
if not bActive:
start()
else:
BugUtil.alert(BugUtil.getPlainText("TXT_KEY_MAPFINDER_ALREADY_RUNNING"))
except MapFinderError, e:
MapFinderStatusScreen.hide()
e.display()
def doStop(argsList=None):
try:
if bActive:
stop()
else:
BugUtil.alert(BugUtil.getPlainText("TXT_KEY_MAPFINDER_NOT_RUNNING"))
except MapFinderError, e:
e.display()
def start():
if canRegenerate():
setup()
MapFinderStatusScreen.show()
global bActive, iRegenCount, iSavedCount
bActive = True
iRegenCount = 0
iSavedCount = 0
showInterface()
finderStartLoop()
def stop():
global bActive
bActive = False
MapFinderStatusScreen.hide()
restoreInterface()
BugUtil.alert(BugUtil.getPlainText("TXT_KEY_MAPFINDER_STOPPED") + " - " + getCountsText())
def showInterface():
global savedInterfaceMode
if not savedInterfaceMode:
savedInterfaceMode = CyInterface().getShowInterface()
CyInterface().setShowInterface(InterfaceVisibility.INTERFACE_SHOW)
def restoreInterface():
global savedInterfaceMode
if savedInterfaceMode:
CyInterface().setShowInterface(savedInterfaceMode)
def getCountsText():
return (u"%s %d, %s %d" %
(BugUtil.getPlainText("TXT_KEY_MAPFINDER_TOTAL_MAPS"), iRegenCount,
BugUtil.getPlainText("TXT_KEY_MAPFINDER_TOTAL_SAVES"), iSavedCount))
def finderStartLoop():
BugUtil.deferCall(finderCanRegenerate, options.getRegenerationDelay())
def finderCanRegenerate():
if bActive:
try:
if canRegenerate():
MapFinderStatusScreen.setStatus(BugUtil.getPlainText("TXT_KEY_MAPFINDER_REGNERATING"))
# must defer to allow screen to update
BugUtil.deferCall(finderRegenerate)
except MapFinderError, e:
e.display()
stop()
def finderRegenerate():
if bActive:
try:
if not gc.getGame().regenerateMap():
raise MapFinderError("TXT_KEY_MAPFINDER_REGENERATE_FAILED")
# must defer to allow screen to update before moving camera
BugUtil.deferCall(finderCheck)
except MapFinderError, e:
e.display()
stop()
def finderCheck():
centerCameraOnPlayer()
if bActive:
global iRegenCount
iRegenCount += 1
MapFinderStatusScreen.update()
if matchRules():
finderSave()
else:
finderNext()
def finderSave():
MapFinderStatusScreen.setStatus(BugUtil.getPlainText("TXT_KEY_MAPFINDER_SAVING"))
# must delay long enough to allow unrevealed tiles to disappear before taking the screenshot
delay = options.getSaveDelay()
if delay < MINIMUM_SAVE_DELAY:
delay = MINIMUM_SAVE_DELAY
BugUtil.deferCall(save, delay)
def finderNext():
MapFinderStatusScreen.resetStatus()
BugUtil.deferCall(next, options.getSkipDelay())
def next():
if bActive:
if ((iRegenCount >= options.getRegenerationLimit()) or
(iSavedCount >= options.getSaveLimit())):
stop()
else:
# BugUtil.alert("MapFinder running - Count %d, Saved %d", iRegenCount, iSavedCount)
finderStartLoop()
mr = None
def matchRules():
global mr
mr = {}
for x in CodeText.iterkeys():
mr[x] = 0
iActivePlayer = gc.getGame().getActivePlayer()
activePlayer = gc.getPlayer(iActivePlayer)
iTeam = activePlayer.getTeam()
startplot = activePlayer.getStartingPlot()
iStartX = startplot.getX()
iStartY = startplot.getY()
iMaxX = gc.getMap().getGridWidth()
iMaxY = gc.getMap().getGridHeight()
bWrapX = gc.getMap().isWrapX()
bWrapY = gc.getMap().isWrapY()
lX = {}
lY = {}
if (Rules['Range'] != 999):
lMax = (Rules['Range'] * 2) + 1
iX = iStartX - Rules['Range']
if (iX < 0):
if (bWrapX):
iX = iMaxX + iX
else:
iX = 0
for i in range(1, lMax + 1):
lX[i] = iX
iX = iX + 1
if iX > iMaxX: 0
if iX < 0: iMaxX
iY = iStartY - Rules['Range']
if (iY < 0):
if (bWrapY):
iY = iMaxY + iY
else:
iY = 0
for i in range(1, lMax + 1):
lY[i] = iY
## HOF MOD V1.61.005
## iy = iX + 1
iY = iY + 1
## end HOF MOD V1.61.005
if iY > iMaxY: 0
if iY < 0: iMaxY
## displayMsg(str(lX.values()) + "\n" + str(lY.values()))
for iY in range(0, iMaxY):
for iX in range(0, iMaxX):
if (Rules['Range'] != 999):
## HOF MOD V1.61.005
# skip if outside range
if iX not in lX.values(): continue
if iY not in lY.values(): continue
# use fat-cross if over 1 range
if (Rules['Range'] > 1):
# fat cross, skip diagonal corners
if (iX == lX[1] and iY == lY[1]): continue
if (iX == lX[1] and iY == lY[lMax]): continue
if (iX == lX[lMax] and iY == lY[1]): continue
if (iX == lX[lMax] and iY == lY[lMax]): continue
## end HOF MOD V1.61.005
## displayMsg(str(iX) + "/" + str(iY))
plot = gc.getMap().plot(iX, iY)
if (plot.isRevealed(iTeam, False)):
if (plot.isFlatlands()): p = 'land'
elif (plot.isWater()): p = 'water'
elif (plot.isHills()): p = 'hills'
elif (plot.isPeak()): p = 'peak'
t = plot.getTerrainType()
if (plot.isLake()):
f = FEATURE_LAKE
else:
f = plot.getFeatureType()
ip = -1
if CODES_BY_TYPES.has_key((p, t, f)):
ip = CODES_BY_TYPES[(p, t, f)]
mr[ip] = mr[ip] + 1
for k, l in Category_Types.iteritems():
if (ip in l): mr[k] = mr[k] + 1
ib = plot.getBonusType(iTeam) + 500
if mr.has_key(ib):
mr[ib] = mr[ib] + 1
for k, l in Category_Types.iteritems():
if (ib in l): mr[k] = mr[k] + 1
# Base Commerce
xc = plot.calculateYield(YieldTypes.YIELD_COMMERCE, True)
mr[301] = mr[301] + xc
# Base Food
xf = plot.calculateYield(YieldTypes.YIELD_FOOD, True)
mr[302] = mr[302] + xf
# Extra Base Food
if (xf > 2): mr[310] = mr[310] + (xf - 2)
# Base Production
xp = plot.calculateYield(YieldTypes.YIELD_PRODUCTION, True)
mr[303] = mr[303] + xp
if (plot.isGoody()): mr[601] = mr[601] + 1
## HOF MOD V1.61.005
if Combo_Types.has_key((ib, ip)):
ic = Combo_Types[(ib, ip)]
if mr.has_key(ic):
mr[ic] = mr[ic] + 1
# Starting Plot?
if iX == iStartX and iY == iStartY:
if Combo_Types.has_key((999, ip)):
ic = Combo_Types[(999, ip)]
if mr.has_key(ic):
mr[ic] = mr[ic] + 1
if (plot.isRiver()):
mr[602] = mr[602] + 1
ipr = ip + 50
if mr.has_key(ipr):
mr[ipr] = mr[ipr] + 1
if Combo_Types.has_key((ib, ipr)):
ic = Combo_Types[(ib, ipr)]
if mr.has_key(ic):
mr[ic] = mr[ic] + 1
# Starting Plot?
if iX == iStartX and iY == iStartY:
if Combo_Types.has_key((999, ipr)):
ic = Combo_Types[(999, ipr)]
if mr.has_key(ic):
mr[ic] = mr[ic] + 1
if (plot.isFreshWater()):
mr[603] = mr[603] + 1
ipf = ip + 150
if mr.has_key(ipf):
mr[ipf] = mr[ipf] + 1
if Combo_Types.has_key((ib, ipf)):
ic = Combo_Types[(ib, ipf)]
if mr.has_key(ic):
mr[ic] = mr[ic] + 1
# Starting Plot?
if iX == iStartX and iY == iStartY:
if Combo_Types.has_key((999, ipf)):
ic = Combo_Types[(999, ipf)]
if mr.has_key(ic):
mr[ic] = mr[ic] + 1
## end HOF MOD V1.61.005
lPF = []
for g, r in Rules.iteritems():
if (g == 'Range'): continue
grp = True
for k, v in r.iteritems():
if (mr.has_key(k)):
if ((v[1] == 0 and mr[k] != 0) or (mr[k] < v[0]) or (mr[k] > v[1])):
grp = False
break
else:
grp = False
break
lPF.append(grp)
for i in range(len(lPF)):
if (lPF[i]):
return True
return False
def save():
global iRegenCount, iSavedCount, mr
iSavedCount += 1
sMFSavePath = options.getSavePath()
(fileName, _) = AutoSave.getSaveFileName(sMFSavePath)
fullFileName = fileName + "_" + str(iRegenCount) + "_" + str(iSavedCount)
# screenshot
screenFile = fullFileName + ".jpg"
gc.getGame().takeJPEGScreenShot(screenFile)
# report file
reportFile = fullFileName + ".txt"
file = open(reportFile, "a")
ruleFile = options.getRuleFile()
## HOF MOD V1.61.005
# don't change unless file format changes!
file.write("HOF MOD V1.61.004,HOF MOD V1.61.005,\n")
## end HOF MOD V1.61.005
file.write("Name,Name," + str(fileName) + "_" + str(iRegenCount) + "_" + str(iSavedCount) + "\n")
file.write("Rule File,Rule File," + str(ruleFile) + "\n")
file.write("Range,Range," + str(Rules['Range']) + "\n")
lKeys = mr.keys()
lKeys.sort()
for x in lKeys:
if (x < 900):
file.write(str(x) + "," + str(CodeText[x]) + "," + str(mr[x]) + "\n")
file.close()
# saved game
saveFile = fullFileName + ".CivBeyondSwordSave"
gc.getGame().saveGame(saveFile)
MapFinderStatusScreen.update()
MapFinderStatusScreen.resetStatus()
next()
def setup():
root = options.getPath()
if not os.path.isdir(root):
raise MapFinderError("TXT_KEY_MAPFINDER_INVALID_PATH", root)
saves = options.getSavePath()
if not os.path.isdir(saves):
raise MapFinderError("TXT_KEY_MAPFINDER_INVALID_SAVE_PATH", saves)
loadCodeText(root)
loadCategoryTypes(root)
loadComboTypes(root)
loadRuleSet(root)
def findSystemFile(root, file):
path = os.path.join(root, file)
if not os.path.isfile(path):
raise MapFinderError("TXT_KEY_MAPFINDER_INVALID_SYSTEM_FILE", file)
return path
def loadCodeText(root):
global CodeText
lLang = []
CodeText = {}
iLang = gc.getGame().getCurrentLanguage()
path = findSystemFile(root, 'MF_Text.dat')
file = open(path, "r")
for temp in file:
(sCat, sCode, sLang0, sLang1, sLang2, sLang3, sLang4) = temp.split(",")
iCat = int(sCat.strip())
iCode = int(sCode.strip())
lLang = [sLang0.strip(), sLang1.strip(), sLang2.strip(),
sLang3.strip(), sLang4.strip()]
CodeText[iCode] = lLang[iLang]
file.close()
file = None
def loadCategoryTypes(root):
global Category_Types
Category_Types = {}
path = findSystemFile(root, 'MF_Cat_Rules.dat')
file = open(path, "r")
iCatSave = -1
for temp in file:
(sCat, sRule) = temp.split(",")
iCat = int(sCat.strip())
iRule = int(sRule.strip())
if (iCat != iCatSave):
Category_Types[iCat] = (iRule,)
else:
Category_Types[iCat] = Category_Types[iCat] + (iRule,)
iCatSave = iCat
file.close()
file = None
def loadComboTypes(root):
global Combo_Types
Combo_Types = {}
path = findSystemFile(root, 'MF_Combo_Rules.dat')
file = open(path, "r")
for temp in file:
(sCat, sBonus, sTerrain) = temp.split(",")
iCat = int(sCat.strip())
iBonus = int(sBonus.strip())
iTerrain = int(sTerrain.strip())
Combo_Types[(iBonus, iTerrain)] = iCat
file.close()
file = None
def loadRuleSet(root):
global Rules
Rules = {}
Rules['Range'] = 2
path = os.path.join(root, "Rules", options.getRuleFile())
if not os.path.isfile(path):
raise MapFinderError("Invalid MapFinder rule file: %s", options.getRuleFile())
iGrpSave = 0
Rules = {}
file = open(path, "r")
for temp in file:
(sGrp, sCat, sRule, sMin, sMax) = temp.split(",")
iGrp = int(sGrp.strip())
iCat = int(sCat.strip())
if (iGrp == 0):
Rules['Range'] = iCat
else:
iRule = int(sRule.strip())
iMin = int(sMin.strip())
iMax = int(sMax.strip())
if (iGrp != iGrpSave):
Rules[iGrp] = {iRule : (iMin, iMax)}
else:
Rules[iGrp][iRule] = (iMin, iMax)
iGrpSave = iGrp
file.close()
file = None
# common utility functions
def enforceDll():
if not BugDll.isPresent():
raise MapFinderError("TXT_KEY_MAPFINDER_REQUIRES_BULL")
class MapFinderError:
def __init__(self, key, *args):
self.key = key
self.args = args
def display(self):
BugUtil.error(BugUtil.getText(self.key, self.args))
|
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
return [scrapy.FormRequest('http://www.example.com/login',
formdata={'user': 'json', 'pass': 'secret'}, callback=self.login)]
def login(self, response):
pass
|
#!/usr/bin/env python3
##############################################################################
# This script takes a gap file produced by prune_infreq.py or SPLATT and a map
# file of keys, and merges them into a new map file with gapped keys removed.
##############################################################################
import sys
if len(sys.argv) != 4:
print('usage: {} <mode-X-gaps.map> <mode-X-keys.map> <new.map>'.format(sys.argv[0]))
sys.exit(1)
gap_fname = sys.argv[1]
key_fname = sys.argv[2]
new_fname = sys.argv[3]
# read keys
keys = []
with open(key_fname, 'r') as fin:
for line in fin:
keys.append(line.strip())
fout = open(new_fname, 'w')
with open(gap_fname, 'r') as gap_file:
for line in gap_file:
# new ID
key_id = int(line.strip())
try:
key = keys[key_id - 1]
except:
print('keys: {} found key {}'.format(len(keys), key_id))
print(key, file=fout)
fout.close()
|
for i in range(int(input())): # IntegerInput
word = input()
word=word.upper()
vowels = ['A', 'E', 'I', 'O', 'U']
count = 0 # Removed =
for j in range(0, len(word)): # Colon & len
if word[j] in vowels: # Removed bracket
if j == 0: # Added =
count += 1
elif word[j+1] in vowels: # Removed Bracket
count += 1 #Changed Increment
break
else:
count += 1 #Changed Increment
print(count)
|
"""
@file
@brief Check various settings.
"""
import sys
import os
import site
from io import BytesIO
import urllib.request as urllib_request
def getsitepackages():
"""
Overwrites function :epkg:`getsitepackages`
which does not work for a virtual environment.
@return site-package somewhere
"""
try:
return site.getsitepackages()
except AttributeError:
import sphinx
return [os.path.normpath(os.path.join(os.path.dirname(sphinx.__file__), ".."))]
def locate_image_documentation(image_name):
"""
Tries to local an image in the module for help generation in a folder ``_doc``.
@param image_name path
@return local file
When a notebook is taken out from the sources, the image using NbImage
cannot be displayed because the function cannot guess from which project
it was taken. The function was entering an infinite loop.
The function can deal with subfolder and not only the folder which contains the notebook.
"""
image_name = os.path.abspath(image_name)
if os.path.exists(image_name):
return image_name
folder, filename = os.path.split(image_name)
while (len(folder) > 0 and
(not os.path.exists(folder) or "_doc" not in os.listdir(folder))):
fold = os.path.split(folder)[0]
if fold == folder:
break
folder = fold
doc = os.path.join(folder, "_doc")
if not os.path.exists(doc):
raise FileNotFoundError(
"Unable to find a folder called _doc, "
"the function cannot locate an image %r, doc=%r, folder=%r."
"" % (image_name, doc, folder))
for root, _, files in os.walk(doc):
for name in files:
t = os.path.join(root, name)
fn = os.path.split(t)[-1]
if filename == fn:
return t
raise FileNotFoundError(image_name)
def _NbImage_path(name, repository=None, force_github=False, branch='master'):
if not isinstance(name, str):
return name
if os.path.exists(name):
return os.path.abspath(name).replace("\\", "/")
if not name.startswith('http://') and not name.startswith('https://'):
# local file
local = name
local_split = name.split("/")
if "notebooks" not in local_split:
local = locate_image_documentation(local)
return local
else:
return name
# otherwise --> github
paths = local.replace("\\", "/").split("/")
try:
pos = paths.index("notebooks") - 1
except IndexError as e:
# we are looking for the right path
raise IndexError(
"The image is not retrieved from a notebook from a folder "
"`_docs/notebooks` or you changed the current folder:"
"\n{0}".format(local)) from e
except ValueError as ee:
# we are looking for the right path
raise IndexError(
"The image is not retrieve from a notebook from a folder "
"``_docs/notebooks`` or you changed the current folder:"
"\n{0}".format(local)) from ee
if repository is None:
module = paths[pos - 1]
if module not in sys.modules:
if "ensae_teaching_cs" in local:
# For some specific modules, we add the location.
repository = "https://github.com/sdpython/ensae_teaching_cs/"
else:
raise ImportError(
"The module {0} was not imported, cannot guess "
"the location of the repository".format(module))
else:
modobj = sys.modules[module]
if not hasattr(modobj, "__github__"):
raise AttributeError(
"The module has no attribute '__github__'. "
"The repository cannot be guessed.")
repository = modobj.__github__
repository = repository.rstrip("/")
loc = "/".join([branch, "_doc", "notebooks"] + paths[pos + 2:])
url = repository + "/" + loc
url = url.replace("github.com", "raw.githubusercontent.com")
return url
def _NbImage(url, width=None):
if isinstance(url, str):
if url.startswith('http://') or url.startswith('https://'):
with urllib_request.urlopen(url) as u:
text = u.read()
content = BytesIO(text)
return NbImage(content)
return NbImage(url, width=width)
def NbImage(*name, repository=None, force_github=False, width=None,
branch='master', row_height=200, background=(255, 255, 255)):
"""
Retrieves a name or a url of the image if it is not found in the local folder
or a subfolder.
:param name: image name (name.png) (or multiple names)
:param force_github: force the system to retrieve the image from GitHub
:param repository: repository, see below
:param width: to modify the width
:param branch: branch
:param row_height: row height if there are multiple images
:param background: background color (only if there multiple images)
:return: an `Image object
<http://ipython.org/ipython-doc/2/api/generated/IPython.core.display.html
#IPython.core.display.Image>`_
We assume the image is retrieved from a notebook.
This function will display an image even though the notebook is not run
from the sources. IPython must be installed.
if *repository* is None, then the function will use the variable
``module.__github__`` to guess the location of the image.
The function is able to retrieve an image in a subfolder.
Displays a better message if ``__github__`` was not found.
See notebook :ref:`examplenbimagerst`.
"""
from IPython.core.display import Image
if len(name) == 1:
url = _NbImage_path(
name[0], repository=repository,
force_github=force_github, branch=branch)
return Image(url, width=width)
if len(name) == 0:
raise ValueError( # pragma: no cover
"No image to display.")
from ..imghelper.img_helper import concat_images
from PIL import Image as pil_image
images = []
for img in name:
url = _NbImage_path(
img, repository=repository,
force_github=force_github, branch=branch)
if url.startswith('http://') or url.startswith('https://'):
with urllib_request.urlopen(url) as u:
text = u.read()
content = BytesIO(text)
images.append(pil_image.open(content))
else:
images.append(pil_image.open(url))
if width is None:
width = max(img.size[0] for img in images) * 2
width = max(200, width)
new_image = concat_images(images, width=width, height=row_height,
background=background)
b = BytesIO()
new_image.save(b, format='png')
data = b.getvalue()
return Image(data, width=width)
|
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
'''Class for boxes in complex frequency space'''
# The routines of this class assist in the solving and classification of
# QNM solutions
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
class cwbox:
# ************************************************************* #
# This is a class to fascilitate the solving of leaver's equations varying
# the real and comlex frequency components, and optimizing over the separation constants.
# ************************************************************* #
def __init__(this,
l,m, # QNM indeces
cwr, # Center coordinate of real part
cwc, # Center coordinate of imag part
wid, # box width
hig, # box height
res = 50, # Number of gridpoints in each dimension
parent = None, # Parent of current object
sc = None, # optiional holder for separatino constant
verbose = False, # be verbose
maxn = None, # Overtones with n>maxn will be actively ignored. NOTE that by convention n>=0.
smallboxes = True, # Toggle for using small boxes for new solutions
**kwargs ):
#
from numpy import array,complex128,meshgrid,float128
#
this.verbose,this.res = verbose,res
# Store QNM ideces
this.l,this.m = l,m
# Set box params
this.width,this.height = None,None
this.setboxprops(cwr,cwc,wid,hig,res,sc=sc)
# Initial a list of children: if a box contains multiple solutions, then it is split according to each solutions location
this.children = [this]
# Point the object to its parent
this.parent = parent
#
this.__jf__ = []
# temp grid of separation constants
this.__scgrid__ = []
# current value of scalarized work-function
this.__lvrfmin__ = None
# Dictionary for high-level data: the data of all of this object's children is collected here
this.data = {}
this.dataformat = '{ ... (l,m,n,tail_flag) : { "jf":[...],"cw":[...],"sc":[...],"lvrfmin":[...] } ... }'
# Dictionary for low-level data: If this object is fundamental, then its data will be stored here in the same format as above
this.__data__ = {}
# QNM label: (l,m,n,t), NOTE that "t" is 0 if the QNM is not a power-law tail and 1 otherwise
this.__label__ = ()
# Counter for the number of times map hass benn called on this object
this.mapcount = 0
# Default value for temporary separation constant
this.__sc__ = 4.0
# Maximum overtone label allowed. NOTE that by convention n>=0.
this.__maxn__ = maxn
#
this.__removeme__ = False
#
this.__smallboxes__ = smallboxes
#################################################################
'''************************************************************ #
Set box params & separation constant center
# ************************************************************'''
#################################################################
def setboxprops(this,cwr,cwc,wid,hig,res,sc=None,data=None,pec=None):
# import maths and other
from numpy import complex128,float128,array,linspace
import matplotlib.patches as patches
# set props for box geometry
this.center = array([cwr,cwc])
this.__cw__ = cwr + 1j*cwc # Store cw for convinience
# Boxes may only shrink. NOTE that this is usefull as some poetntial solutions, or unwanted solutions may be reomved, and we want to avoid finding them again. NOTE that this would be nice to implement, but it currently brakes the root finding.
this.width,this.height = float128( abs(wid) ),float128( abs(hig) )
# if (this.width is None) or (this.height is None):
# this.width,this.height = float128( abs(wid) ),float128( abs(hig) )
# else:
# this.width,this.height = min(float128( abs(wid) ),this.width),min(this.height,float128( abs(hig) ))
this.limit = array([this.center[0]-this.width/2.0, # real min
this.center[0]+this.width/2.0, # real max
this.center[1]-this.height/2.0, # imag min
this.center[1]+this.height/2.0]) # imag max
this.wr_range = linspace( this.limit[0], this.limit[1], res )
this.wc_range = linspace( this.limit[2], this.limit[3], res )
# Set patch object for plotting. NOTE the negative sign exists here per convention
if None is pec: pec = 'k'
this.patch = patches.Rectangle( (min(this.limit[0:2]), min(-this.limit[2:4]) ), this.width, this.height, fill=False, edgecolor=pec, alpha=0.4, linestyle='dotted' )
# set holder for separation constant value
if sc is not None:
this.__sc__ = sc
# Initiate the data holder for this box. The data holder will contain lists of spin, official cw and sc values
if data is not None:
this.data=data
#################################################################
'''************************************************************ #
Map the potential solutions in this box
# ************************************************************'''
#################################################################
def map(this,jf):
# Import useful things
from kerr import localmins # finds local minima of a 2D array
from kerr.basics import alert,green,yellow,cyan,bold,magenta,blue
from numpy import array,delete,ones
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#
# Add the input jf to the list of jf values. NOTE that this is not the primary recommended list for referencing jf. Please use the "data" field instead.
this.__jf__.append(jf)
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#
#
if this.verbose:
if this.parent is None:
alert('\n\n# '+'--'*40+' #\n'+blue(bold('Attempting to map qnm solutions for: jf = %1.8f'%(jf)))+'\n# '+'--'*40+' #\n','map')
else:
print '\n# '+'..'*40+' #\n'+blue('jf = %1.8f, label = %s'%(jf,this.__label__))+'\n# '+'..'*40+' #'
# Map solutions using discrete grid
if this.isfundamental():
# Brute-force calculate solutions to leaver's equations
if this.verbose: alert('Solvinq Leaver''s Eqns over grid','map')
this.__x__,this.__scgrid__ = this.lvrgridsolve(jf)
# Use a local-min finder to estimate the qnm locations for the grid of work function values, x
if this.verbose: alert('Searching for local minima. Ignoring mins on boundaries.','map')
this.__localmin__ = localmins(this.__x__,edge_ignore=True)
if this.verbose: alert('Number of local minima found: %s.'%magenta('%i'%(len(array(this.__localmin__)[0]))),'map')
# If needed, split the box into sub-boxes: Give the current box children!
this.splitcenter() # NOTE that if there is only one lcal min, then no split takes place
# So far QNM solutions have been estimates mthat have discretization error. Now, we wish to refine the
# solutions using optimization.
if this.verbose: alert('Refining QNM solution locations using a hybrid strategy.','map')
this.refine(jf)
else:
# Map solutions for all children
for child in [ k for k in this.children if this is not k ]:
child.map(jf)
# Collect QNM solution data for this BH spin. NOTE that only non-fundamental objects are curated
if this.verbose: alert('Collecting final QNM solution information ...','map')
this.curate(jf)
# Remove duplicate solutions
this.validatechildren()
#
if this.verbose: alert('Mapping of Kerr QNM with (l,m)=(%i,%i) within box now complete for this box.' % (this.l,this.m ) ,'map')
# Some book-keeping on the number of times this object has been mapped
this.mapcount += 1
# For the given bh spin, collect all QNM frequencies and separation constants within the current box
# NOTE that the outputs are coincident lists
def curate(this,jf):
#
from numpy import arange,array,sign
#
children = this.collectchildren()
cwlist,sclist = [ child.__cw__ for child in children ],[ child.__sc__ for child in children ]
if this.isfundamental():
cwlist.append( this.__cw__ )
sclist.append( this.__sc__ )
# sort the output lists by the imaginary part of the cw values
sbn = lambda k: abs( cwlist[k].imag ) # Sort By Overtone(N)
space = arange( len(cwlist) )
map_ = sorted( space, key=sbn )
std_cwlist = array( [ cwlist[k] for k in map_ ] )
std_sclist = array( [ sclist[k] for k in map_ ] )
# ---------------------------------------------------------- #
# Separate positive, zero and negative frequency solutions
# ---------------------------------------------------------- #
# Solutions with frequencies less than this value will be considered to be power-laws
pltol = 0.01
# Frequencies
sorted_cw_pos = list( std_cwlist[ (sign(std_cwlist.real) == sign(this.m)) * (abs(std_cwlist.real)>pltol) ] )
sorted_cw_neg = list( std_cwlist[ (sign(std_cwlist.real) ==-sign(this.m)) * (abs(std_cwlist.real)>pltol) ] )
sorted_cw_zro = list( std_cwlist[ abs(std_cwlist.real)<=pltol ] )
# Create a dictionary between (cw,sc) and child objects
A,B = {},{}
for child in children:
A[child] = ( child.__cw__, child.__sc__ )
B[ A[child] ] = child
#
def inferlabel( cwsc ):
cw,sc = cwsc[0],cwsc[1]
ll = this.l
if abs(cw.real)<pltol :
# power-law decay
tt = 1
nn = sorted_cw_zro.index( cw )
mm = this.m
else:
tt = 0
if sign(this.m)==sign(cw.real):
# prograde
mm = this.m
nn = sorted_cw_pos.index( cw )
else:
# retrograde
mm = -1 * this.m
nn = sorted_cw_neg.index( cw )
#
return (ll,mm,nn,tt)
# ---------------------------------------------------------- #
# Create a dictionary to keep track of potential solutions
# ---------------------------------------------------------- #
label = {}
for child in children:
cwsc = ( child.__cw__, child.__sc__ )
label[child] = inferlabel( cwsc )
child.__label__ = label[child]
#
this.label = label
'''
IMPORTANT: Here it is assumed that the solutions will change in a continuous manner, and that after the first mapping, no new solutions are of interest, unless a box-split occurs.
'''
# Store the high-level data product
for child in children:
L = this.label[child]
if not L in this.data:
this.data[ L ] = {}
this.data[ L ][ 'jf' ] = [jf]
this.data[ L ][ 'cw' ] = [ child.__cw__ ]
this.data[ L ][ 'sc' ] = [ child.__sc__ ]
this.data[ L ][ 'lvrfmin' ] = [ child.__lvrfmin__ ]
else:
this.data[ L ][ 'jf' ].append(jf)
this.data[ L ][ 'cw' ].append(child.__cw__)
this.data[ L ][ 'sc' ].append(child.__sc__)
this.data[ L ][ 'lvrfmin' ].append(child.__lvrfmin__)
# Store the information to this child also
child.__data__['jf'] = this.data[ L ][ 'jf' ]
child.__data__['cw'] = this.data[ L ][ 'cw' ]
child.__data__['sc'] = this.data[ L ][ 'sc' ]
child.__data__['lvrfmin'] = this.data[ L ][ 'lvrfmin' ]
# Refine the box center using fminsearch
def refine(this,jf):
# Import useful things
from numpy import complex128,array,linalg,log,exp,abs
from scipy.optimize import fmin,root,fmin_tnc,fmin_slsqp
from kerr.pttools import leaver_workfunction,scberti
from kerr.basics import alert,say,magenta,bold,green,cyan,yellow
from kerr import localmins # finds local minima of a 2D array
#
if this.isfundamental():
# use the box center for refined minimization
CW = complex128( this.center[0] + 1j*this.center[1] )
# SC = this.__sc__
SC = scberti( CW*jf, this.l, this.m )
state = [ CW.real,CW.imag, SC.real,SC.imag ]
#
retrycount,maxretrycount,done = -1,1,False
while done is False:
#
retrycount += 1
#
if retrycount==0:
alert(cyan('* Constructing guess using scberti-grid or extrap.'),'refine')
state = this.guess(jf,gridguess=state)
else:
alert(cyan('* Constructing guess using 4D-grid or extrap.'),'refine')
state = this.guess(jf)
# Solve leaver's equations using a hybrid strategy
cw,sc,this.__lvrfmin__,retry = this.lvrsolve(jf,state)
# If the root finder had some trouble, then mark this box with a warning (for plotting)
done = (not retry) or (retrycount>=maxretrycount)
#
if retry:
newres = 2*this.res
if this.verbose:
msg = yellow( 'The current function value is %s. Retrying root finding for %ind time with higher resolution pre-grid, and brute-force 4D.'%(this.__lvrfmin__, retrycount+2) )
alert(msg,'refine')
# say('Retrying.','refine')
# Increase the resolution of the box
this.setboxprops(this.__cw__.real,this.__cw__.imag,this.width,this.height,newres,sc=this.__sc__)
# NOTE that the commented out code below is depreciated by the use of guess() above.
# # Brute force solve again
# this.__x__,this.__scgrid__ = this.lvrgridsolve(jf,fullopt=True)
# # Use the first local min as a guess
# this.__localmin__ = localmins(this.__x__,edge_ignore=True)
# state = this.grids2states()[0]
# if this.verbose: print X.message+' The final function value is %s'%(this.__lvrfmin__)
if this.verbose: print 'The final function value is '+green(bold('%s'%(this.__lvrfmin__)))
if this.verbose:
print '\n\t Geuss cw: %s' % CW
print '\t Optimal cw: %s' % cw
print '\t Approx sc: %s' % scberti( CW*jf, this.l, this.m )
print '\t Geuss sc: %s' % (state[2]+1j*state[3])
print '\t Optimal sc: %s\n' % sc
# Set the core properties of the new box
this.setboxprops( cw.real, cw.imag, this.width,this.height,this.res,sc=sc )
# Rescale this object's boxes based on new centers
this.parent.sensescale()
else:
#
for child in [ k for k in this.children if this is not k ]:
child.refine(jf)
# Determine if the current object has more than itself as a child
def isfundamental(this):
return len(this.children) is 1
# ************************************************************* #
# Determin whether to split this box into sub-boxes (i.e. children)
# and if needed, split
# ************************************************************* #
def splitcenter(this):
from numpy import array,zeros,linalg,inf,mean,amax,amin,sqrt
from kerr.basics import magenta,bold,alert,error,red,warning,yellow
mins = this.__localmin__
num_solutions = len(array(mins)[0])
if num_solutions > 1: # Split the box
# for each min
for k in range(len(mins[0])):
# construct the center location
kr = mins[1][k]; wr = this.wr_range[ kr ]
kc = mins[0][k]; wc = this.wc_range[ kc ]
sc = this.__scgrid__[kr,kc]
# Determine the resolution of the new box
res = int( max( 20, 1.5*float(this.res)/num_solutions ) )
# Create the new child. NOTE that the child's dimensions will be set below using a standard method.
child = cwbox( this.l,this.m,wr,wc,0,0, res, parent=this, sc=sc, verbose=this.verbose )
# Add the new box to the current box's child list
this.children.append( child )
# NOTE that here we set the box dimensions of all children using the relative distances between them
this.sensescale()
# Now redefine the box size to contain all children
# NOTE that this step exists only to ensure that the box always contains all of its children's centers
children = this.collectchildren()
wr = array( [ child.center[0] for child in children ] )
wc = array( [ child.center[1] for child in children ] )
width = amax(wr)-amin(wr)
height = amax(wc)-amin(wc)
cwr = mean(wr)
cwc = mean(wc)
this.setboxprops( cwr,cwc,width,height,this.res,sc=sc )
elif num_solutions == 1:
# construcut the center location
k = 0 # there should be only one local min
kr = mins[1][k]
kc = mins[0][k]
wr = this.wr_range[ kr ]
wc = this.wc_range[ kc ]
# retrieve associated separation constant
sc = this.__scgrid__[kr,kc]
# Recenter the box on the current min
this.setboxprops(wr,wc,this.width,this.height,this.res,sc=sc)
else:
#
if len(this.__jf__)>3:
alert('Invalid number of local minima found: %s.'% (magenta(bold('%s'%num_solutions))), 'splitcenter' )
# Use the extrapolated values as a guess?
alert(yellow('Now trying to use extrapolation, wrather than grid guess, to center the current box.'),'splitcenter')
#
guess = this.guess(this.__jf__[-1],gridguess=[1.0,1.0,4.0,1.0])
wr,wc,cr,cc = guess[0],guess[1],guess[2],guess[3]
sc = cr+1j*cc
# Recenter the box on the current min
this.setboxprops(wr,wc,this.width,this.height,this.res,sc=sc)
else:
warning('Invalid number of local minima found: %s. This box will be removed. NOTE that this may not be what you want, and further inspection may be warranted.'% (magenta(bold('%s'%num_solutions))), 'splitcenter' )
this.__removeme__ = True
# Validate children: Remove duplicates
def validatechildren(this):
#
from numpy import linalg,array
from kerr import alert,yellow,cyan,blue,magenta
tol = 1e-5
#
if not this.isfundamental():
#
children = this.collectchildren()
initial_count = len(children)
# Remove identical twins
for a,tom in enumerate( children ):
for b,tim in enumerate( children ):
if b>a:
if linalg.norm(array(tom.center)-array(tim.center)) < tol:
tim.parent.children.remove(tim)
del tim
break
# Remove overtones over the max label
if this.__maxn__ is not None:
for k,child in enumerate(this.collectchildren()):
if child.__label__[2] > this.__maxn__:
if this.verbose:
msg = 'Removing overtone '+yellow('%s'%list(child.__label__))+' becuase its label is higher than the allowed value specified.'
alert(msg,'validatechildren')
this.label.pop( child.__label__ , None)
child.parent.children.remove(child)
del child
# Remove all boxes marked for deletion
for child in this.collectchildren():
if child.__removeme__:
this.label.pop( child.__label__, None )
child.parent.children.remove( child )
del child
#
final_count = len( this.collectchildren() )
#
if this.verbose:
if final_count != initial_count:
alert( yellow('%i children have been removed, and %i remain.') % (-final_count+initial_count,final_count) ,'validatechildren')
else:
alert( 'All children have been deemed valid.', 'validatechildren' )
# Method for collecting all fundamental children
def collectchildren(this,children=None):
#
if children is None:
children = []
#
if this.isfundamental():
children.append(this)
else:
for child in [ k for k in this.children if k is not this ]:
children += child.collectchildren()
#
return children
# Method to plot solutions
def plot(this,fig=None,show=False,showlabel=False):
#
from numpy import array,amin,amax,sign
from matplotlib.pyplot import plot,xlim,ylim,xlabel,ylabel,title,figure,gca,text
from matplotlib.pyplot import show as show_
#
children = this.collectchildren()
wr = array( [ child.center[0] for child in children ] )
wc =-array( [ child.center[1] for child in children ] )
wr_min,wr_max = amin(wr),amax(wr)
wc_min,wc_max = amin(wc),amax(wc)
padscale = 0.15
padr,padc = 1.5*padscale*(wr_max-wr_min), padscale*(wc_max-wc_min)
wr_min -= padr; wr_max += padr
wc_min -= padc; wc_max += padc
#
if fig is None:
# fig = figure( figsize=12*array((wr_max-wr_min, wc_max-wc_min))/(wr_max-wr_min), dpi=200, facecolor='w', edgecolor='k' )
fig = figure( figsize=12.0*array((4.5, 3))/4.0, dpi=200, facecolor='w', edgecolor='k' )
#
xlim( [wr_min,wr_max] )
ylim( [wc_min,wc_max] )
ax = gca()
#
for child in children:
plot( child.center[0],-child.center[1], '+k', ms=10 )
ax.add_patch( child.patch )
if showlabel:
text( child.center[0]+sign(child.center[0])*child.width/2,-(child.center[1]+child.height/2),
'$(%i,%i,%i,%i)$'%(this.label[child]),
ha=('right' if sign(child.center[0])<0 else 'left' ),
fontsize=10,
alpha=0.9 )
#
xlabel(r'$\mathrm{re}\;\tilde\omega_{%i%i}$'%(this.l,this.m))
ylabel(r'-$\mathrm{im}\;\tilde\omega_{%i%i}$'%(this.l,this.m))
title(r'$j_f = %1.6f$'%this.__jf__[-1],fontsize=18)
#
if show: show_()
# ************************************************************* #
# Solve leaver's equations in a given box=[wr_range,wc_range]
# NOTE that the box is a list, not an array
# ************************************************************* #
def lvrgridsolve(this,jf=0,fullopt=False):
# Import maths
from numpy import linalg,complex128,ones,array
from kerr.pttools import scberti
from kerr.pttools import leaver_workfunction
from scipy.optimize import fmin,root
import sys
# Pre-allocate an array that will hold work function values
x = ones( ( this.wc_range.size,this.wr_range.size ) )
# Pre-allocate an array that will hold sep const vals
scgrid = ones( ( this.wc_range.size,this.wc_range.size ), dtype=complex128 )
# Solve over the grid
for i,wr in enumerate( this.wr_range ):
for j,wc in enumerate( this.wc_range ):
# Costruct the complex frequency for this i and j
cw = complex128( wr+1j*wc )
# # Define the intermediate work function to be used for this iteration
# fun = lambda SC: linalg.norm( array(leaver_workfunction( jf,this.l,this.m, [cw.real,cw.imag,SC[0],SC[1]] )) )
# # For this complex frequency, optimize over separation constant using initial guess
# SC0_= scberti( cw*jf, this.l, this.m ) # Use Berti's analytic prediction as a guess
# SC0 = [SC0_.real,SC0_.imag]
# X = fmin( fun, SC0, disp=False, full_output=True, maxiter=1 )
# # Store work function value
# x[j][i] = X[1]
# # Store sep const vals
# scgrid[j][i] = X[0][0] + 1j*X[0][1]
if fullopt is False:
# Define the intermediate work function to be used for this iteration
fun = lambda SC: linalg.norm( array(leaver_workfunction( jf,this.l,this.m, [cw.real,cw.imag,SC[0],SC[1]] )) )
# For this complex frequency, optimize over separation constant using initial guess
SC0_= scberti( cw*jf, this.l, this.m ) # Use Berti's analytic prediction as a guess
SC0 = [SC0_.real,SC0_.imag]
# Store work function value
x[j][i] = fun(SC0)
# Store sep const vals
scgrid[j][i] = SC0_
else:
SC0_= scberti( cw*jf, this.l, this.m ) # Use Berti's analytic prediction as a guess
SC0 = [SC0_.real,SC0_.imag,0,0]
#cfun = lambda Y: [ Y[0]+abs(Y[3]), Y[1]+abs(Y[2]) ]
fun = lambda SC:leaver_workfunction( jf,this.l,this.m, [cw.real,cw.imag,SC[0],SC[1]] )
X = root( fun, SC0 )
scgrid[j][i] = X.x[0]+1j*X.x[1]
x[j][i] = linalg.norm( array(X.fun) )
if this.verbose:
sys.stdout.flush()
print '.',
if this.verbose: print 'Done.'
# return work function values AND the optimal separation constants
return x,scgrid
# Convert output of localmin to a state vector for minimization
def grids2states(this):
#
from numpy import complex128
state = []
#
for k in range( len(this.__localmin__[0]) ):
#
kr,kc = this.__localmin__[1][k], this.__localmin__[0][k]
cw = complex128( this.wr_range[kr] + 1j*this.wc_range[kc] )
sc = complex128( this.__scgrid__[kr,kc] )
#
state.append( [cw.real,cw.imag,sc.real,sc.imag] )
#
return state
# Get guess either from local min, or from extrapolation of past data
def guess(this,jf,gridguess=None):
#
from kerr.pttools import leaver_workfunction
from kerr.basics import alert,magenta,apolyfit
from kerr import localmins
from numpy import array,linalg,arange,complex128,allclose,nan
from scipy.interpolate import InterpolatedUnivariateSpline as spline
# Get a guess from the localmin
if gridguess is None:
this.__x__,this.__scgrid__ = this.lvrgridsolve(jf,fullopt=True)
this.__localmin__ = localmins(this.__x__,edge_ignore=True)
guess1 = this.grids2states()[0]
else:
guess1 = gridguess
# Get a guess from extrapolation ( performed in curate() )
guess2 = [ v for v in guess1 ]
if this.mapcount > 3:
# if there are three map points, try to use polynomial fitting to determine the state at the current jf value
nn = len(this.__data__['jf'])
order = min(2,nn)
#
xx = array(this.__data__['jf'])[-4:]
#
yy = array(this.__data__['cw'])[-4:]
yr = apolyfit( xx, yy.real, order )(jf)
yc = apolyfit( yy.real, yy.imag, order )(yr)
cw = complex128( yr + 1j*yc )
#
zz = array(this.__data__['sc'])[-4:]
zr = apolyfit( xx, zz.real, order )(jf)
zc = apolyfit( zz.real, zz.imag, order )(zr)
sc = complex128( zr + 1j*zc )
#
guess2 = [ cw.real, cw.imag, sc.real, sc.imag ]
# Determine the best guess
if not ( allclose(guess1,guess2) ):
x1 = linalg.norm( leaver_workfunction( jf,this.l,this.m, guess1 ) )
x2 = linalg.norm( leaver_workfunction( jf,this.l,this.m, guess2 ) )
alert(magenta('The function value at guess from grid is: %s'%x1),'guess')
alert(magenta('The function value at guess from extrap is: %s'%x2),'guess')
if x2 is nan:
x2 = 100.0*x1
if x1<x2:
guess = guess1
alert(magenta('Using the guess from the grid.'),'guess')
else:
guess = guess2
alert(magenta('Using the guess from extrapolation.'),'guess')
else:
x1 = linalg.norm( leaver_workfunction( jf,this.l,this.m, guess1 ) )
guess = guess1
alert(magenta('The function value at guess from grid is %s'%x1),'guess')
# Return the guess solution
return guess
# Determine whether the current box contains a complex frequency given an iterable whose first two entries are the real and imag part of the complex frequency
def contains(this,guess):
#
cwrmin = min( this.limit[:2] )
cwrmax = max( this.limit[:2] )
cwcmin = min( this.limit[2:] )
cwcmax = max( this.limit[2:] )
#
isin = True
isin = isin and ( guess[0]<cwrmax )
isin = isin and ( guess[0]>cwrmin )
isin = isin and ( guess[1]<cwcmax )
isin = isin and ( guess[1]>cwcmin )
#
return isin
# Try solving the 4D equation near a single guess value [ cw.real cw.imag sc.real sc.imag ]
def lvrsolve(this,jf,guess,tol=1e-8):
# Import Maths
from numpy import log,exp,linalg,array
from scipy.optimize import root,fmin,minimize
from kerr.pttools import leaver_workfunction
from kerr import alert,red
# Try using root
# Define the intermediate work function to be used for this iteration
fun = lambda STATE: log( 1.0 + abs(array(leaver_workfunction( jf,this.l,this.m, STATE ))) )
X = root( fun, guess, tol=tol )
cw1,sc1 = X.x[0]+1j*X.x[1], X.x[2]+1j*X.x[3]
__lvrfmin1__ = linalg.norm(array( exp(X.fun)-1.0 ))
retry1 = ( 'not making good progress' in X.message.lower() ) or ( 'error' in X.message.lower() )
# Try using fmin
# Define the intermediate work function to be used for this iteration
fun = lambda STATE: log(linalg.norm( leaver_workfunction( jf,this.l,this.m, STATE ) ))
X = fmin( fun, guess, disp=False, full_output=True, ftol=tol )
cw2,sc2 = X[0][0]+1j*X[0][1], X[0][2]+1j*X[0][3]
__lvrfmin2__ = exp(X[1])
retry2 = this.__lvrfmin__ > 1e-3
# Use the solution that converged the fastest to avoid solutions that have wandered significantly from the initial guess OR use the solution with the smallest fmin
if __lvrfmin1__ < __lvrfmin2__ : # use the fmin value for convenience
cw,sc,retry = cw1,sc1,retry1
__lvrfmin__ = __lvrfmin1__
else:
cw,sc,retry = cw2,sc2,retry2
__lvrfmin__ = __lvrfmin2__
if not this.contains( [cw.real,cw.imag] ):
alert(red('Trial solution found to be outside of box. I will now try to use a bounded solver, but the performance may be suboptimal.'),'lvrsolve')
s = 2.0
cwrmin = min( this.center[0]-this.width/s, this.center[0]+this.width/s )
cwrmax = max( this.center[0]-this.width/s, this.center[0]+this.width/s )
cwcmin = min( this.center[1]-this.height/s, this.center[1]+this.height/s )
cwcmax = max( this.center[1]-this.height/s, this.center[1]+this.height/s )
scrmin = min( this.__sc__.real-this.width/s, this.__sc__.real+this.width/s )
scrmax = max( this.__sc__.real-this.width/s, this.__sc__.real+this.width/s )
sccmin = min( this.__sc__.imag-this.height/s, this.__sc__.imag+this.height/s )
sccmax = max( this.__sc__.imag-this.height/s, this.__sc__.imag+this.height/s )
bounds = [ (cwrmin,cwrmax), (cwcmin,cwcmax), (scrmin,scrmax), (sccmin,sccmax) ]
# Try using minimize
# Define the intermediate work function to be used for this iteration
fun = lambda STATE: log(linalg.norm( leaver_workfunction( jf,this.l,this.m, STATE ) ))
X = minimize( fun, guess, options={'disp':False}, tol=tol, bounds=bounds )
cw,sc = X.x[0]+1j*X.x[1], X.x[2]+1j*X.x[3]
__lvrfmin__ = exp(X.fun)
# Always retry if the solution is outside of the box
if not this.contains( [cw.real,cw.imag] ):
retry = True
alert(red('Retrying because the trial solution is outside of the box.'),'lvrsolve')
# Don't retry if fval is small
if __lvrfmin__ > 1e-3:
retry = True
alert(red('Retrying because the trial fmin value is greater than 1e-3.'),'lvrsolve')
# Don't retry if fval is small
if retry and (__lvrfmin__ < 1e-4):
retry = False
alert(red('Not retrying becuase the fmin value is low.'),'lvrsolve')
# Return the solution
return cw,sc,__lvrfmin__,retry
# Given a box's children, resize the boxes relative to child locations: no boxes overlap
def sensescale(this):
#
from numpy import array,inf,linalg,sqrt
from kerr import alert
#
children = this.collectchildren()
# Let my people know.
if this.verbose:
alert('Sensing the scale of the current object\'s sub-boxes.','sensescale')
# Determine the distance between this min, and its closest neighbor
scalar = sqrt(2) if (not this.__smallboxes__) else 2.0*sqrt(2.0)
for tom in children:
d = inf
for jerry in [ kid for kid in children if kid is not tom ]:
r = array(tom.center)
r_= array(jerry.center)
d_= linalg.norm(r_-r)
if d_ < d:
d = d_
# Use the smallest distance found to determine a box size
s = d/scalar
width = s; height = s; res = int( max( 20, 1.5*float(this.res)/len(children) ) ) if (len(children)>1) else this.res
# Define the new box size for this child
tom.setboxprops( tom.center[0], tom.center[1], width, height, res )
|
import asyncio
import logging
import sys
import click
from solarium.player import Player
from . import led
from .utils import update_color
LOG_FORMAT = "%(asctime)s %(levelname)-8s %(message)s"
logger = logging.getLogger(__package__)
@click.command()
@click.argument("latitude", type=float)
@click.argument("longitude", type=float)
@click.option(
"--clouds",
"-c",
default=0.1,
help="Cache of clouds between. Between 0 and 1, default 0.1.",
)
@click.option(
"--host", "-h", default="localhost", help="PiGPIO host, default: localhost"
)
@click.option("--warm", default=12, help="Warm LED GPIO pin, default: 12")
@click.option("--cold", default=13, help="Warm LED GPIO pin, default: 13")
@click.option("--sound", help="Path to sound that will be looped during the day.")
@click.option(
"--verbosity", "-v", default=0, count=True, help="Increase output verbosity."
)
def main(latitude, longitude, clouds, host, warm, cold, sound, verbosity):
setup_logging(verbosity)
warm, cold, power_state = led.init(host, warm, cold)
player = Player(sound, power_state)
asyncio.run(
update_color(latitude, longitude, (warm, cold), power_state, clouds, player)
)
def get_logging_level(verbosity):
level = logging.WARNING
level -= verbosity * 10
if level < logging.DEBUG:
level = logging.DEBUG
return level
def setup_logging(verbosity):
hdlr = logging.StreamHandler(sys.stdout)
hdlr.setLevel(get_logging_level(verbosity))
hdlr.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(hdlr)
logger.setLevel(get_logging_level(verbosity))
if __name__ == "__main__":
sys.exit(main())
|
import requests
# Basic Variables
MIN_RANGE = 22000
MAX_RANGE = 40000
BASE_URL = 'http://B%d.cdn.telefonica.com/%d/ch%t/%s.m3u8'
CHANNELS_IDS = ['NICK_SUB', 'DSNJR_SUB', '40TV_SUB', 'DSNYXD_SUB', 'COCINA_SUB', '24HORAS_SUB', 'INVITADO_SUB', 'FOX_SUB',
'AXN_SUB', 'CLL13_SUB', 'TNT_SUB', 'FOXCRIME_SUB', 'CSMO_SUB', 'AXNWHITE_SUB', 'PCMDY_SUB', 'SYFY_SUB', 'TCM_SUB',
'CPLUSLG_SUB', 'MOVFUTBOL_SUB', 'CPLUSCHP_SUB', 'NTLG_SUB', 'NATGEOWILD_SUB', 'CPLUS1_SUB','1','2','3','4','5','6','7','8','9']
CHANNELS_T = ['01', '02', '03','04','05', '06', '07', '08']
# Execution
print 'Test'
for channel in CHANNELS_IDS
print 'testing channel 1'
for host_number in range(MIN_RANGE, MAX_RANGE):
for t_number in CHANNELS_T
url = BASE_URL % (host_number, host_number, t_number,channel)
try:
req = requests.get(url, timeout=30)
if req.status_code == 200 and 'chunklist' not in req.text:
print '%s: %s' % (channel, url)
break
except Exception as e:
print e
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `json2xml` package."""
import unittest
from collections import OrderedDict
import pytest
import xmltodict
from json2xml import json2xml
from json2xml.utils import readfromjson, readfromstring, readfromurl, JSONReadError, StringReadError, URLReadError
class TestJson2xml(unittest.TestCase):
"""Tests for `json2xml` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_read_from_json(self):
"""Test something."""
data = readfromjson("examples/licht.json")
assert type(data) is dict
def test_read_from_invalid_json(self):
"""Test something."""
with pytest.raises(JSONReadError) as pytest_wrapped_e:
data = readfromjson("examples/licht_wrong.json")
assert pytest_wrapped_e.type == JSONReadError
def test_read_from_url(self):
data = readfromurl("https://coderwall.com/vinitcool76.json")
assert type(data) is dict
def test_read_from_wrong_url(self):
with pytest.raises(URLReadError) as pytest_wrapped_e:
data = readfromurl("https://coderwall.com/vinitcool76.jsoni")
assert pytest_wrapped_e.type == URLReadError
def test_read_from_jsonstring(self):
data = readfromstring(
'{"login":"mojombo","id":1,"avatar_url":"https://avatars0.githubusercontent.com/u/1?v=4"}'
)
assert type(data) is dict
def test_read_from_invalid_jsonstring(self):
with pytest.raises(StringReadError) as pytest_wrapped_e:
data = readfromstring(
'{"login":"mojombo","id":1,"avatar_url":"https://avatars0.githubusercontent.com/u/1?v=4"'
)
assert pytest_wrapped_e.type == StringReadError
def test_json_to_xml_conversion(self):
data = readfromstring(
'{"login":"mojombo","id":1,"avatar_url":"https://avatars0.githubusercontent.com/u/1?v=4"}'
)
xmldata = json2xml.Json2xml(data).to_xml()
dict_from_xml = xmltodict.parse(xmldata)
assert type(dict_from_xml["all"]) == OrderedDict
def test_custom_wrapper_and_indent(self):
data = readfromstring(
'{"login":"mojombo","id":1,"avatar_url":"https://avatars0.githubusercontent.com/u/1?v=4"}'
)
xmldata = json2xml.Json2xml(data, wrapper="test", pretty=True).to_xml()
old_dict = xmltodict.parse(xmldata)
# test must be present, snce it is the wrpper
assert "test" in old_dict.keys()
# reverse test, say a wrapper called ramdom won't be present
assert "random" not in old_dict.keys()
|
secondNamePath=r'C:\Users\Billy\PycharmProjects\TestHackZurich2018\Second_Names'
maleNamePath=r'C:\Users\Billy\PycharmProjects\TestHackZurich2018\MaleNames'
femaleNamePath=r'C:\Users\Billy\PycharmProjects\TestHackZurich2018\FemaleNames'
jsonPath=r'C:\Users\Billy\PycharmProjects\TestHackZurich2018\users_json'
countOfUsers = 1000
countOfFriends = 10
|
# -*- coding: utf-8 -*- #
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from models import Project
from util import *
import webapp2
class CreateProjectHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write(
template.render('createproject.html', {}))
class CreateProjectActionHandler(webapp2.RequestHandler):
def post(self):
title = self.request.POST.get('title')
code = self.request.POST.get('code')
admins = [Util.getUsernameFromEmail(users.get_current_user().email())]
project = Project(title=title, code=code, admins=admins)
project.put()
self.redirect('/' + project.code + '/organize')
app = webapp2.WSGIApplication(
[
('/createproject', CreateProjectHandler),
('/createprojectaction', CreateProjectActionHandler)
], debug=True)
|
# encoding: utf-8
from bs4 import BeautifulSoup
from nose.tools import (
assert_equal,
assert_not_equal,
assert_raises,
assert_true,
assert_in
)
from mock import patch, MagicMock
from routes import url_for
import ckan.model as model
import ckan.plugins as p
from ckan.lib import search
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
webtest_submit = helpers.webtest_submit
submit_and_follow = helpers.submit_and_follow
def _get_package_new_page(app):
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url=url_for(controller='package', action='new'),
extra_environ=env,
)
return env, response
class TestPackageNew(helpers.FunctionalTestBase):
def test_form_renders(self):
app = self._get_test_app()
env, response = _get_package_new_page(app)
assert_true('dataset-edit' in response.forms)
@helpers.change_config('ckan.auth.create_unowned_dataset', 'false')
def test_needs_organization_but_no_organizations_has_button(self):
''' Scenario: The settings say every dataset needs an organization
but there are no organizations. If the user is allowed to create an
organization they should be prompted to do so when they try to create
a new dataset'''
app = self._get_test_app()
sysadmin = factories.Sysadmin()
env = {'REMOTE_USER': sysadmin['name'].encode('ascii')}
response = app.get(
url=url_for(controller='package', action='new'),
extra_environ=env
)
assert 'dataset-edit' not in response.forms
assert url_for(controller='organization', action='new') in response
@helpers.mock_auth('ckan.logic.auth.create.package_create')
@helpers.change_config('ckan.auth.create_unowned_dataset', 'false')
@helpers.change_config('ckan.auth.user_create_organizations', 'false')
def test_needs_organization_but_no_organizations_no_button(self,
mock_p_create):
''' Scenario: The settings say every dataset needs an organization
but there are no organizations. If the user is not allowed to create an
organization they should be told to ask the admin but no link should be
presented. Note: This cannot happen with the default ckan and requires
a plugin to overwrite the package_create behavior'''
mock_p_create.return_value = {'success': True}
app = self._get_test_app()
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url=url_for(controller='package', action='new'),
extra_environ=env
)
assert 'dataset-edit' not in response.forms
assert url_for(controller='organization', action='new') not in response
assert 'Ask a system administrator' in response
def test_name_required(self):
app = self._get_test_app()
env, response = _get_package_new_page(app)
form = response.forms['dataset-edit']
response = webtest_submit(form, 'save', status=200, extra_environ=env)
assert_true('dataset-edit' in response.forms)
assert_true('Name: Missing value' in response)
def test_resource_form_renders(self):
app = self._get_test_app()
env, response = _get_package_new_page(app)
form = response.forms['dataset-edit']
form['name'] = u'resource-form-renders'
response = submit_and_follow(app, form, env, 'save')
assert_true('resource-edit' in response.forms)
def test_first_page_creates_draft_package(self):
app = self._get_test_app()
env, response = _get_package_new_page(app)
form = response.forms['dataset-edit']
form['name'] = u'first-page-creates-draft'
webtest_submit(form, 'save', status=302, extra_environ=env)
pkg = model.Package.by_name(u'first-page-creates-draft')
assert_equal(pkg.state, 'draft')
def test_resource_required(self):
app = self._get_test_app()
env, response = _get_package_new_page(app)
form = response.forms['dataset-edit']
form['name'] = u'one-resource-required'
response = submit_and_follow(app, form, env, 'save')
form = response.forms['resource-edit']
response = webtest_submit(form, 'save', value='go-metadata',
status=200, extra_environ=env)
assert_true('resource-edit' in response.forms)
assert_true('You must add at least one data resource' in response)
def test_complete_package_with_one_resource(self):
app = self._get_test_app()
env, response = _get_package_new_page(app)
form = response.forms['dataset-edit']
form['name'] = u'complete-package-with-one-resource'
response = submit_and_follow(app, form, env, 'save')
form = response.forms['resource-edit']
form['url'] = u'http://example.com/resource'
submit_and_follow(app, form, env, 'save', 'go-metadata')
pkg = model.Package.by_name(u'complete-package-with-one-resource')
assert_equal(pkg.resources[0].url, u'http://example.com/resource')
assert_equal(pkg.state, 'active')
def test_complete_package_with_two_resources(self):
app = self._get_test_app()
env, response = _get_package_new_page(app)
form = response.forms['dataset-edit']
form['name'] = u'complete-package-with-two-resources'
response = submit_and_follow(app, form, env, 'save')
form = response.forms['resource-edit']
form['url'] = u'http://example.com/resource0'
response = submit_and_follow(app, form, env, 'save', 'again')
form = response.forms['resource-edit']
form['url'] = u'http://example.com/resource1'
submit_and_follow(app, form, env, 'save', 'go-metadata')
pkg = model.Package.by_name(u'complete-package-with-two-resources')
assert_equal(pkg.resources[0].url, u'http://example.com/resource0')
assert_equal(pkg.resources[1].url, u'http://example.com/resource1')
assert_equal(pkg.state, 'active')
# def test_resource_uploads(self):
# app = self._get_test_app()
# env, response = _get_package_new_page(app)
# form = response.forms['dataset-edit']
# form['name'] = u'complete-package-with-two-resources'
# response = submit_and_follow(app, form, env, 'save')
# form = response.forms['resource-edit']
# form['upload'] = ('README.rst', b'data')
# response = submit_and_follow(app, form, env, 'save', 'go-metadata')
# pkg = model.Package.by_name(u'complete-package-with-two-resources')
# assert_equal(pkg.resources[0].url_type, u'upload')
# assert_equal(pkg.state, 'active')
# response = app.get(
# url_for(
# controller='package',
# action='resource_download',
# id=pkg.id,
# resource_id=pkg.resources[0].id
# ),
# )
# assert_equal('data', response.body)
def test_previous_button_works(self):
app = self._get_test_app()
env, response = _get_package_new_page(app)
form = response.forms['dataset-edit']
form['name'] = u'previous-button-works'
response = submit_and_follow(app, form, env, 'save')
form = response.forms['resource-edit']
response = submit_and_follow(app, form, env, 'save', 'go-dataset')
assert_true('dataset-edit' in response.forms)
def test_previous_button_populates_form(self):
app = self._get_test_app()
env, response = _get_package_new_page(app)
form = response.forms['dataset-edit']
form['name'] = u'previous-button-populates-form'
response = submit_and_follow(app, form, env, 'save')
form = response.forms['resource-edit']
response = submit_and_follow(app, form, env, 'save', 'go-dataset')
form = response.forms['dataset-edit']
assert_true('title' in form.fields)
assert_equal(form['name'].value, u'previous-button-populates-form')
def test_previous_next_maintains_draft_state(self):
app = self._get_test_app()
env, response = _get_package_new_page(app)
form = response.forms['dataset-edit']
form['name'] = u'previous-next-maintains-draft'
response = submit_and_follow(app, form, env, 'save')
form = response.forms['resource-edit']
response = submit_and_follow(app, form, env, 'save', 'go-dataset')
form = response.forms['dataset-edit']
webtest_submit(form, 'save', status=302, extra_environ=env)
pkg = model.Package.by_name(u'previous-next-maintains-draft')
assert_equal(pkg.state, 'draft')
def test_dataset_edit_org_dropdown_visible_to_normal_user_with_orgs_available(self):
'''
The 'Organization' dropdown is available on the dataset create/edit
page to normal (non-sysadmin) users who have organizations available
to them.
'''
user = factories.User()
# user is admin of org.
org = factories.Organization(
name="my-org",
users=[{'name': user['id'], 'capacity': 'admin'}]
)
app = self._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url=url_for(controller='package', action='new'),
extra_environ=env,
)
# organization dropdown available in create page.
form = response.forms['dataset-edit']
assert 'owner_org' in form.fields
# create dataset
form['name'] = u'my-dataset'
form['owner_org'] = org['id']
response = submit_and_follow(app, form, env, 'save')
# add a resource to make the pkg active
resource_form = response.forms['resource-edit']
resource_form['url'] = u'http://example.com/resource'
submit_and_follow(app, resource_form, env, 'save', 'go-metadata')
pkg = model.Package.by_name(u'my-dataset')
assert_equal(pkg.state, 'active')
# edit package page response
url = url_for(controller='package',
action='edit',
id=pkg.id)
pkg_edit_response = app.get(url=url, extra_environ=env)
# A field with the correct id is in the response
form = pkg_edit_response.forms['dataset-edit']
assert 'owner_org' in form.fields
# The organization id is in the response in a value attribute
owner_org_options = [value for (value, _) in form['owner_org'].options]
assert org['id'] in owner_org_options
def test_dataset_edit_org_dropdown_normal_user_can_remove_org(self):
'''
A normal user (non-sysadmin) can remove an organization from a dataset
have permissions on.
'''
user = factories.User()
# user is admin of org.
org = factories.Organization(name="my-org",
users=[{'name': user['id'], 'capacity': 'admin'}])
app = self._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url=url_for(controller='package', action='new'),
extra_environ=env,
)
# create dataset with owner_org
form = response.forms['dataset-edit']
form['name'] = u'my-dataset'
form['owner_org'] = org['id']
response = submit_and_follow(app, form, env, 'save')
# add a resource to make the pkg active
resource_form = response.forms['resource-edit']
resource_form['url'] = u'http://example.com/resource'
submit_and_follow(app, resource_form, env, 'save', 'go-metadata')
pkg = model.Package.by_name(u'my-dataset')
assert_equal(pkg.state, 'active')
assert_equal(pkg.owner_org, org['id'])
assert_not_equal(pkg.owner_org, None)
# edit package page response
url = url_for(controller='package',
action='edit',
id=pkg.id)
pkg_edit_response = app.get(url=url, extra_environ=env)
# edit dataset
edit_form = pkg_edit_response.forms['dataset-edit']
edit_form['owner_org'] = ''
submit_and_follow(app, edit_form, env, 'save')
post_edit_pkg = model.Package.by_name(u'my-dataset')
assert_equal(post_edit_pkg.owner_org, None)
assert_not_equal(post_edit_pkg.owner_org, org['id'])
def test_dataset_edit_org_dropdown_not_visible_to_normal_user_with_no_orgs_available(self):
'''
The 'Organization' dropdown is not available on the dataset
create/edit page to normal (non-sysadmin) users who have no
organizations available to them.
'''
user = factories.User()
# user isn't admin of org.
org = factories.Organization(name="my-org")
app = self._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url=url_for(controller='package', action='new'),
extra_environ=env,
)
# organization dropdown not in create page.
form = response.forms['dataset-edit']
assert 'owner_org' not in form.fields
# create dataset
form['name'] = u'my-dataset'
response = submit_and_follow(app, form, env, 'save')
# add a resource to make the pkg active
resource_form = response.forms['resource-edit']
resource_form['url'] = u'http://example.com/resource'
submit_and_follow(app, resource_form, env, 'save', 'go-metadata')
pkg = model.Package.by_name(u'my-dataset')
assert_equal(pkg.state, 'active')
# edit package response
url = url_for(controller='package',
action='edit',
id=model.Package.by_name(u'my-dataset').id)
pkg_edit_response = app.get(url=url, extra_environ=env)
# A field with the correct id is in the response
form = pkg_edit_response.forms['dataset-edit']
assert 'owner_org' not in form.fields
# The organization id is in the response in a value attribute
assert 'value="{0}"'.format(org['id']) not in pkg_edit_response
def test_dataset_edit_org_dropdown_visible_to_sysadmin_with_no_orgs_available(self):
'''
The 'Organization' dropdown is available to sysadmin users regardless
of whether they personally have an organization they administrate.
'''
user = factories.User()
sysadmin = factories.Sysadmin()
# user is admin of org.
org = factories.Organization(name="my-org",
users=[{'name': user['id'], 'capacity': 'admin'}])
app = self._get_test_app()
# user in env is sysadmin
env = {'REMOTE_USER': sysadmin['name'].encode('ascii')}
response = app.get(
url=url_for(controller='package', action='new'),
extra_environ=env,
)
# organization dropdown available in create page.
assert 'id="field-organizations"' in response
# create dataset
form = response.forms['dataset-edit']
form['name'] = u'my-dataset'
form['owner_org'] = org['id']
response = submit_and_follow(app, form, env, 'save')
# add a resource to make the pkg active
resource_form = response.forms['resource-edit']
resource_form['url'] = u'http://example.com/resource'
submit_and_follow(app, resource_form, env, 'save', 'go-metadata')
pkg = model.Package.by_name(u'my-dataset')
assert_equal(pkg.state, 'active')
# edit package page response
url = url_for(controller='package',
action='edit',
id=pkg.id)
pkg_edit_response = app.get(url=url, extra_environ=env)
# A field with the correct id is in the response
assert 'id="field-organizations"' in pkg_edit_response
# The organization id is in the response in a value attribute
assert 'value="{0}"'.format(org['id']) in pkg_edit_response
def test_unauthed_user_creating_dataset(self):
app = self._get_test_app()
# provide REMOTE_ADDR to idenfity as remote user, see
# ckan.views.identify_user() for details
response = app.post(url=url_for(controller='package', action='new'),
extra_environ={'REMOTE_ADDR': '127.0.0.1'},
status=403)
class TestPackageEdit(helpers.FunctionalTestBase):
def test_organization_admin_can_edit(self):
user = factories.User()
organization = factories.Organization(
users=[{'name': user['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=organization['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(controller='package',
action='edit',
id=dataset['name']),
extra_environ=env,
)
form = response.forms['dataset-edit']
form['notes'] = u'edited description'
submit_and_follow(app, form, env, 'save')
result = helpers.call_action('package_show', id=dataset['id'])
assert_equal(u'edited description', result['notes'])
def test_organization_editor_can_edit(self):
user = factories.User()
organization = factories.Organization(
users=[{'name': user['id'], 'capacity': 'editor'}]
)
dataset = factories.Dataset(owner_org=organization['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(controller='package',
action='edit',
id=dataset['name']),
extra_environ=env,
)
form = response.forms['dataset-edit']
form['notes'] = u'edited description'
submit_and_follow(app, form, env, 'save')
result = helpers.call_action('package_show', id=dataset['id'])
assert_equal(u'edited description', result['notes'])
def test_organization_member_cannot_edit(self):
user = factories.User()
organization = factories.Organization(
users=[{'name': user['id'], 'capacity': 'member'}]
)
dataset = factories.Dataset(owner_org=organization['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(controller='package',
action='edit',
id=dataset['name']),
extra_environ=env,
status=403,
)
def test_user_not_in_organization_cannot_edit(self):
user = factories.User()
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(controller='package',
action='edit',
id=dataset['name']),
extra_environ=env,
status=403,
)
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.post(
url_for(controller='package',
action='edit',
id=dataset['name']),
{'notes': 'edited description'},
extra_environ=env,
status=403,
)
def test_anonymous_user_cannot_edit(self):
organization = factories.Organization()
dataset = factories.Dataset(owner_org=organization['id'])
app = helpers._get_test_app()
response = app.get(
url_for(controller='package',
action='edit',
id=dataset['name']),
status=403,
)
response = app.post(
url_for(controller='package',
action='edit',
id=dataset['name']),
{'notes': 'edited description'},
status=403,
)
def test_validation_errors_for_dataset_name_appear(self):
'''fill out a bad dataset set name and make sure errors appear'''
user = factories.User()
organization = factories.Organization(
users=[{'name': user['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=organization['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(controller='package',
action='edit',
id=dataset['name']),
extra_environ=env,
)
form = response.forms['dataset-edit']
form['name'] = u'this is not a valid name'
response = webtest_submit(form, 'save', status=200, extra_environ=env)
assert_in('The form contains invalid entries', response.body)
assert_in('Name: Must be purely lowercase alphanumeric (ascii) '
'characters and these symbols: -_', response.body)
def test_edit_a_dataset_that_does_not_exist_404s(self):
user = factories.User()
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(controller='package',
action='edit',
id='does-not-exist'),
extra_environ=env,
expect_errors=True
)
assert_equal(404, response.status_int)
class TestPackageRead(helpers.FunctionalTestBase):
def test_read(self):
dataset = factories.Dataset()
app = helpers._get_test_app()
response = app.get(url_for(controller='package', action='read',
id=dataset['name']))
response.mustcontain('Test Dataset')
response.mustcontain('Just another test dataset')
def test_organization_members_can_read_private_datasets(self):
members = {
'member': factories.User(),
'editor': factories.User(),
'admin': factories.User(),
'sysadmin': factories.Sysadmin()
}
organization = factories.Organization(
users=[
{'name': members['member']['id'], 'capacity': 'member'},
{'name': members['editor']['id'], 'capacity': 'editor'},
{'name': members['admin']['id'], 'capacity': 'admin'},
]
)
dataset = factories.Dataset(
owner_org=organization['id'],
private=True,
)
app = helpers._get_test_app()
for user, user_dict in members.items():
response = app.get(
url_for(
controller='package',
action='read',
id=dataset['name']
),
extra_environ={
'REMOTE_USER': user_dict['name'].encode('ascii'),
},
)
assert_in('Test Dataset', response.body)
assert_in('Just another test dataset', response.body)
def test_anonymous_users_cannot_read_private_datasets(self):
organization = factories.Organization()
dataset = factories.Dataset(
owner_org=organization['id'],
private=True,
)
app = helpers._get_test_app()
response = app.get(
url_for(controller='package', action='read', id=dataset['name']),
status=404
)
assert_equal(404, response.status_int)
def test_user_not_in_organization_cannot_read_private_datasets(self):
user = factories.User()
organization = factories.Organization()
dataset = factories.Dataset(
owner_org=organization['id'],
private=True,
)
app = helpers._get_test_app()
response = app.get(
url_for(controller='package', action='read', id=dataset['name']),
extra_environ={'REMOTE_USER': user['name'].encode('ascii')},
status=404
)
assert_equal(404, response.status_int)
def test_read_rdf(self):
''' The RDF outputs now live in ckanext-dcat'''
dataset1 = factories.Dataset()
offset = url_for(controller='package', action='read',
id=dataset1['name']) + ".rdf"
app = self._get_test_app()
app.get(offset, status=404)
def test_read_n3(self):
''' The RDF outputs now live in ckanext-dcat'''
dataset1 = factories.Dataset()
offset = url_for(controller='package', action='read',
id=dataset1['name']) + ".n3"
app = self._get_test_app()
app.get(offset, status=404)
class TestPackageDelete(helpers.FunctionalTestBase):
def test_owner_delete(self):
user = factories.User()
owner_org = factories.Organization(
users=[{'name': user['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=owner_org['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.post(
url_for(controller='package', action='delete', id=dataset['name']),
extra_environ=env,
)
response = response.follow()
assert_equal(200, response.status_int)
deleted = helpers.call_action('package_show', id=dataset['id'])
assert_equal('deleted', deleted['state'])
def test_delete_on_non_existing_dataset(self):
app = helpers._get_test_app()
response = app.post(
url_for(controller='package', action='delete',
id='schrodingersdatset'),
expect_errors=True,
)
assert_equal(404, response.status_int)
def test_sysadmin_can_delete_any_dataset(self):
owner_org = factories.Organization()
dataset = factories.Dataset(owner_org=owner_org['id'])
app = helpers._get_test_app()
user = factories.Sysadmin()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.post(
url_for(controller='package', action='delete', id=dataset['name']),
extra_environ=env,
)
response = response.follow()
assert_equal(200, response.status_int)
deleted = helpers.call_action('package_show', id=dataset['id'])
assert_equal('deleted', deleted['state'])
def test_anon_user_cannot_delete_owned_dataset(self):
user = factories.User()
owner_org = factories.Organization(
users=[{'name': user['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=owner_org['id'])
app = helpers._get_test_app()
response = app.post(
url_for(controller='package', action='delete', id=dataset['name']),
status=403,
)
response.mustcontain('Unauthorized to delete package')
deleted = helpers.call_action('package_show', id=dataset['id'])
assert_equal('active', deleted['state'])
def test_logged_in_user_cannot_delete_owned_dataset(self):
owner = factories.User()
owner_org = factories.Organization(
users=[{'name': owner['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=owner_org['id'])
app = helpers._get_test_app()
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.post(
url_for(controller='package', action='delete', id=dataset['name']),
extra_environ=env,
expect_errors=True
)
assert_equal(403, response.status_int)
response.mustcontain('Unauthorized to delete package')
def test_confirm_cancel_delete(self):
'''Test confirmation of deleting datasets
When package_delete is made as a get request, it should return a
'do you want to delete this dataset? confirmation page'''
user = factories.User()
owner_org = factories.Organization(
users=[{'name': user['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=owner_org['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(controller='package', action='delete', id=dataset['name']),
extra_environ=env,
)
assert_equal(200, response.status_int)
message = 'Are you sure you want to delete dataset - {name}?'
response.mustcontain(message.format(name=dataset['title']))
form = response.forms['confirm-dataset-delete-form']
response = form.submit('cancel')
response = helpers.webtest_maybe_follow(
response,
extra_environ=env,
)
assert_equal(200, response.status_int)
class TestResourceNew(helpers.FunctionalTestBase):
def test_manage_dataset_resource_listing_page(self):
user = factories.User()
organization = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=organization['id'])
resource = factories.Resource(package_id=dataset['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(
controller='package',
action='resources',
id=dataset['name'],
),
extra_environ=env
)
assert_in(resource['name'], response)
assert_in(resource['description'], response)
assert_in(resource['format'], response)
def test_unauth_user_cannot_view_manage_dataset_resource_listing_page(self):
user = factories.User()
organization = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=organization['id'])
resource = factories.Resource(package_id=dataset['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(
controller='package',
action='resources',
id=dataset['name'],
),
extra_environ=env
)
assert_in(resource['name'], response)
assert_in(resource['description'], response)
assert_in(resource['format'], response)
def test_404_on_manage_dataset_resource_listing_page_that_does_not_exist(self):
user = factories.User()
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(
controller='package',
action='resources',
id='does-not-exist'
),
extra_environ=env,
expect_errors=True
)
assert_equal(404, response.status_int)
def test_add_new_resource_with_link_and_download(self):
user = factories.User()
dataset = factories.Dataset()
env = {'REMOTE_USER': user['name'].encode('ascii')}
app = helpers._get_test_app()
response = app.get(
url_for(
controller='package',
action='new_resource',
id=dataset['id'],
),
extra_environ=env
)
form = response.forms['resource-edit']
form['url'] = u'http://test.com/'
response = submit_and_follow(app, form, env, 'save',
'go-dataset-complete')
result = helpers.call_action('package_show', id=dataset['id'])
response = app.get(
url_for(
controller='package',
action='resource_download',
id=dataset['id'],
resource_id=result['resources'][0]['id']
),
extra_environ=env,
)
assert_equal(302, response.status_int)
def test_editor_can_add_new_resource(self):
user = factories.User()
organization = factories.Organization(
users=[{'name': user['id'], 'capacity': 'editor'}]
)
dataset = factories.Dataset(
owner_org=organization['id'],
)
env = {'REMOTE_USER': user['name'].encode('ascii')}
app = helpers._get_test_app()
response = app.get(
url_for(
controller='package',
action='new_resource',
id=dataset['id'],
),
extra_environ=env
)
form = response.forms['resource-edit']
form['name'] = u'test resource'
form['url'] = u'http://test.com/'
response = submit_and_follow(app, form, env, 'save',
'go-dataset-complete')
result = helpers.call_action('package_show', id=dataset['id'])
assert_equal(1, len(result['resources']))
assert_equal(u'test resource', result['resources'][0]['name'])
def test_admin_can_add_new_resource(self):
user = factories.User()
organization = factories.Organization(
users=[{'name': user['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(
owner_org=organization['id'],
)
env = {'REMOTE_USER': user['name'].encode('ascii')}
app = helpers._get_test_app()
response = app.get(
url_for(
controller='package',
action='new_resource',
id=dataset['id'],
),
extra_environ=env
)
form = response.forms['resource-edit']
form['name'] = u'test resource'
form['url'] = u'http://test.com/'
response = submit_and_follow(app, form, env, 'save',
'go-dataset-complete')
result = helpers.call_action('package_show', id=dataset['id'])
assert_equal(1, len(result['resources']))
assert_equal(u'test resource', result['resources'][0]['name'])
def test_member_cannot_add_new_resource(self):
user = factories.User()
organization = factories.Organization(
users=[{'name': user['id'], 'capacity': 'member'}]
)
dataset = factories.Dataset(
owner_org=organization['id'],
)
env = {'REMOTE_USER': user['name'].encode('ascii')}
app = helpers._get_test_app()
response = app.get(
url_for(
controller='package',
action='new_resource',
id=dataset['id'],
),
extra_environ=env,
status=403,
)
response = app.post(
url_for(
controller='package',
action='new_resource',
id=dataset['id'],
),
{'name': 'test', 'url': 'test', 'save': 'save', 'id': ''},
extra_environ=env,
status=403,
)
def test_non_organization_users_cannot_add_new_resource(self):
'''on an owned dataset'''
user = factories.User()
organization = factories.Organization()
dataset = factories.Dataset(
owner_org=organization['id'],
)
env = {'REMOTE_USER': user['name'].encode('ascii')}
app = helpers._get_test_app()
response = app.get(
url_for(
controller='package',
action='new_resource',
id=dataset['id'],
),
extra_environ=env,
status=403,
)
response = app.post(
url_for(
controller='package',
action='new_resource',
id=dataset['id'],
),
{'name': 'test', 'url': 'test', 'save': 'save', 'id': ''},
extra_environ=env,
status=403,
)
def test_anonymous_users_cannot_add_new_resource(self):
organization = factories.Organization()
dataset = factories.Dataset(
owner_org=organization['id'],
)
app = helpers._get_test_app()
response = app.get(
url_for(
controller='package',
action='new_resource',
id=dataset['id'],
),
status=403,
)
response = app.post(
url_for(
controller='package',
action='new_resource',
id=dataset['id'],
),
{'name': 'test', 'url': 'test', 'save': 'save', 'id': ''},
status=403,
)
class TestResourceView(helpers.FunctionalTestBase):
@classmethod
def setup_class(cls):
super(cls, cls).setup_class()
if not p.plugin_loaded('image_view'):
p.load('image_view')
helpers.reset_db()
@classmethod
def teardown_class(cls):
p.unload('image_view')
def test_existent_resource_view_page_returns_ok_code(self):
resource_view = factories.ResourceView()
url = url_for(controller='package',
action='resource_read',
id=resource_view['package_id'],
resource_id=resource_view['resource_id'],
view_id=resource_view['id'])
app = self._get_test_app()
app.get(url, status=200)
def test_inexistent_resource_view_page_returns_not_found_code(self):
resource_view = factories.ResourceView()
url = url_for(controller='package',
action='resource_read',
id=resource_view['package_id'],
resource_id=resource_view['resource_id'],
view_id='inexistent-view-id')
app = self._get_test_app()
app.get(url, status=404)
def test_resource_view_description_is_rendered_as_markdown(self):
resource_view = factories.ResourceView(description="Some **Markdown**")
url = url_for(controller='package',
action='resource_read',
id=resource_view['package_id'],
resource_id=resource_view['resource_id'],
view_id=resource_view['id'])
app = self._get_test_app()
response = app.get(url)
response.mustcontain('Some <strong>Markdown</strong>')
class TestResourceRead(helpers.FunctionalTestBase):
def test_existing_resource_with_not_associated_dataset(self):
dataset = factories.Dataset()
resource = factories.Resource()
url = url_for(controller='package',
action='resource_read',
id=dataset['id'],
resource_id=resource['id'])
app = self._get_test_app()
app.get(url, status=404)
def test_resource_read_logged_in_user(self):
'''
A logged-in user can view resource page.
'''
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
dataset = factories.Dataset()
resource = factories.Resource(package_id=dataset['id'])
url = url_for(controller='package',
action='resource_read',
id=dataset['id'],
resource_id=resource['id'])
app = self._get_test_app()
app.get(url, status=200, extra_environ=env)
def test_resource_read_anon_user(self):
'''
An anon user can view resource page.
'''
dataset = factories.Dataset()
resource = factories.Resource(package_id=dataset['id'])
url = url_for(controller='package',
action='resource_read',
id=dataset['id'],
resource_id=resource['id'])
app = self._get_test_app()
app.get(url, status=200)
def test_resource_read_sysadmin(self):
'''
A sysadmin can view resource page.
'''
sysadmin = factories.Sysadmin()
env = {'REMOTE_USER': sysadmin['name'].encode('ascii')}
dataset = factories.Dataset()
resource = factories.Resource(package_id=dataset['id'])
url = url_for(controller='package',
action='resource_read',
id=dataset['id'],
resource_id=resource['id'])
app = self._get_test_app()
app.get(url, status=200, extra_environ=env)
def test_user_not_in_organization_cannot_read_private_dataset(self):
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
organization = factories.Organization()
dataset = factories.Dataset(
owner_org=organization['id'],
private=True,
)
resource = factories.Resource(package_id=dataset['id'])
url = url_for(controller='package',
action='resource_read',
id=dataset['id'],
resource_id=resource['id'])
app = self._get_test_app()
response = app.get(url,
status=404,
extra_environ=env)
def test_organization_members_can_read_resources_in_private_datasets(self):
members = {
'member': factories.User(),
'editor': factories.User(),
'admin': factories.User(),
'sysadmin': factories.Sysadmin()
}
organization = factories.Organization(
users=[
{'name': members['member']['id'], 'capacity': 'member'},
{'name': members['editor']['id'], 'capacity': 'editor'},
{'name': members['admin']['id'], 'capacity': 'admin'},
]
)
dataset = factories.Dataset(
owner_org=organization['id'],
private=True,
)
resource = factories.Resource(package_id=dataset['id'])
app = helpers._get_test_app()
for user, user_dict in members.items():
response = app.get(
url_for(
controller='package',
action='resource_read',
id=dataset['name'],
resource_id=resource['id'],
),
extra_environ={
'REMOTE_USER': user_dict['name'].encode('ascii'),
},
)
assert_in('Just another test resource', response.body)
def test_anonymous_users_cannot_read_private_datasets(self):
organization = factories.Organization()
dataset = factories.Dataset(
owner_org=organization['id'],
private=True,
)
app = helpers._get_test_app()
response = app.get(
url_for(controller='package', action='read', id=dataset['name']),
status=404
)
assert_equal(404, response.status_int)
class TestResourceDelete(helpers.FunctionalTestBase):
def test_dataset_owners_can_delete_resources(self):
user = factories.User()
owner_org = factories.Organization(
users=[{'name': user['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=owner_org['id'])
resource = factories.Resource(package_id=dataset['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.post(
url_for(controller='package', action='resource_delete',
id=dataset['name'], resource_id=resource['id']),
extra_environ=env,
)
response = response.follow()
assert_equal(200, response.status_int)
response.mustcontain('This dataset has no data')
assert_raises(p.toolkit.ObjectNotFound, helpers.call_action,
'resource_show', id=resource['id'])
def test_deleting_non_existing_resource_404s(self):
user = factories.User()
owner_org = factories.Organization(
users=[{'name': user['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=owner_org['id'])
env = {'REMOTE_USER': user['name'].encode('ascii')}
app = helpers._get_test_app()
response = app.post(
url_for(controller='package', action='resource_delete',
id=dataset['name'], resource_id='doesnotexist'),
extra_environ=env,
expect_errors=True
)
assert_equal(404, response.status_int)
def test_anon_users_cannot_delete_owned_resources(self):
user = factories.User()
owner_org = factories.Organization(
users=[{'name': user['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=owner_org['id'])
resource = factories.Resource(package_id=dataset['id'])
app = helpers._get_test_app()
response = app.post(
url_for(controller='package', action='resource_delete',
id=dataset['name'], resource_id=resource['id']),
status=403,
)
response.mustcontain('Unauthorized to delete package')
def test_logged_in_users_cannot_delete_resources_they_do_not_own(self):
# setup our dataset
owner = factories.User()
owner_org = factories.Organization(
users=[{'name': owner['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=owner_org['id'])
resource = factories.Resource(package_id=dataset['id'])
# access as another user
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
app = helpers._get_test_app()
response = app.post(
url_for(controller='package', action='resource_delete',
id=dataset['name'], resource_id=resource['id']),
extra_environ=env,
expect_errors=True
)
assert_equal(403, response.status_int)
response.mustcontain('Unauthorized to delete package')
def test_sysadmins_can_delete_any_resource(self):
owner_org = factories.Organization()
dataset = factories.Dataset(owner_org=owner_org['id'])
resource = factories.Resource(package_id=dataset['id'])
sysadmin = factories.Sysadmin()
app = helpers._get_test_app()
env = {'REMOTE_USER': sysadmin['name'].encode('ascii')}
response = app.post(
url_for(controller='package', action='resource_delete',
id=dataset['name'], resource_id=resource['id']),
extra_environ=env,
)
response = response.follow()
assert_equal(200, response.status_int)
response.mustcontain('This dataset has no data')
assert_raises(p.toolkit.ObjectNotFound, helpers.call_action,
'resource_show', id=resource['id'])
def test_confirm_and_cancel_deleting_a_resource(self):
'''Test confirmation of deleting resources
When resource_delete is made as a get request, it should return a
'do you want to delete this reource? confirmation page'''
user = factories.User()
owner_org = factories.Organization(
users=[{'name': user['id'], 'capacity': 'admin'}]
)
dataset = factories.Dataset(owner_org=owner_org['id'])
resource = factories.Resource(package_id=dataset['id'])
app = helpers._get_test_app()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url_for(controller='package', action='resource_delete',
id=dataset['name'], resource_id=resource['id']),
extra_environ=env,
)
assert_equal(200, response.status_int)
message = 'Are you sure you want to delete resource - {name}?'
response.mustcontain(message.format(name=resource['name']))
# cancelling sends us back to the resource edit page
form = response.forms['confirm-resource-delete-form']
response = form.submit('cancel')
response = response.follow()
assert_equal(200, response.status_int)
class TestSearch(helpers.FunctionalTestBase):
def test_search_basic(self):
dataset1 = factories.Dataset()
offset = url_for(controller='package', action='search')
app = self._get_test_app()
page = app.get(offset)
assert dataset1['name'] in page.body.decode('utf8')
def test_search_sort_by_blank(self):
factories.Dataset()
# ?sort has caused an exception in the past
offset = url_for(controller='package', action='search') + '?sort'
app = self._get_test_app()
app.get(offset)
def test_search_sort_by_bad(self):
factories.Dataset()
# bad spiders try all sorts of invalid values for sort. They should get
# a 400 error with specific error message. No need to alert the
# administrator.
offset = url_for(controller='package', action='search') + \
'?sort=gvgyr_fgevat+nfp'
app = self._get_test_app()
response = app.get(offset, status=[200, 400])
if response.status == 200:
import sys
sys.stdout.write(response.body)
raise Exception("Solr returned an unknown error message. "
"Please check the error handling "
"in ckan/lib/search/query.py:run")
def test_search_solr_syntax_error(self):
factories.Dataset()
# SOLR raises SyntaxError when it can't parse q (or other fields?).
# Whilst this could be due to a bad user input, it could also be
# because CKAN mangled things somehow and therefore we flag it up to
# the administrator and give a meaningless error, just in case
offset = url_for(controller='package', action='search') + \
'?q=--included'
app = self._get_test_app()
search_response = app.get(offset)
search_response_html = BeautifulSoup(search_response.body)
err_msg = search_response_html.select('#search-error')
err_msg = ''.join([n.text for n in err_msg])
assert_in('error while searching', err_msg)
def test_search_plugin_hooks(self):
with p.use_plugin('test_package_controller_plugin') as plugin:
offset = url_for(controller='package', action='search')
app = self._get_test_app()
app.get(offset)
# get redirected ...
assert plugin.calls['before_search'] == 1, plugin.calls
assert plugin.calls['after_search'] == 1, plugin.calls
def test_search_page_request(self):
'''Requesting package search page returns list of datasets.'''
app = self._get_test_app()
factories.Dataset(name="dataset-one", title='Dataset One')
factories.Dataset(name="dataset-two", title='Dataset Two')
factories.Dataset(name="dataset-three", title='Dataset Three')
search_url = url_for(controller='package', action='search')
search_response = app.get(search_url)
assert_true('3 datasets found' in search_response)
search_response_html = BeautifulSoup(search_response.body)
ds_titles = search_response_html.select('.dataset-list '
'.dataset-item '
'.dataset-heading a')
ds_titles = [n.string for n in ds_titles]
assert_equal(len(ds_titles), 3)
assert_true('Dataset One' in ds_titles)
assert_true('Dataset Two' in ds_titles)
assert_true('Dataset Three' in ds_titles)
def test_search_page_results(self):
'''Searching for datasets returns expected results.'''
app = self._get_test_app()
factories.Dataset(name="dataset-one", title='Dataset One')
factories.Dataset(name="dataset-two", title='Dataset Two')
factories.Dataset(name="dataset-three", title='Dataset Three')
search_url = url_for(controller='package', action='search')
search_response = app.get(search_url)
search_form = search_response.forms['dataset-search-form']
search_form['q'] = 'One'
search_results = webtest_submit(search_form)
assert_true('1 dataset found' in search_results)
search_response_html = BeautifulSoup(search_results.body)
ds_titles = search_response_html.select('.dataset-list '
'.dataset-item '
'.dataset-heading a')
ds_titles = [n.string for n in ds_titles]
assert_equal(len(ds_titles), 1)
assert_true('Dataset One' in ds_titles)
def test_search_page_no_results(self):
'''Search with non-returning phrase returns no results.'''
app = self._get_test_app()
factories.Dataset(name="dataset-one", title='Dataset One')
factories.Dataset(name="dataset-two", title='Dataset Two')
factories.Dataset(name="dataset-three", title='Dataset Three')
search_url = url_for(controller='package', action='search')
search_response = app.get(search_url)
search_form = search_response.forms['dataset-search-form']
search_form['q'] = 'Nout'
search_results = webtest_submit(search_form)
assert_true('No datasets found for "Nout"' in search_results)
search_response_html = BeautifulSoup(search_results.body)
ds_titles = search_response_html.select('.dataset-list '
'.dataset-item '
'.dataset-heading a')
ds_titles = [n.string for n in ds_titles]
assert_equal(len(ds_titles), 0)
def test_search_page_results_tag(self):
'''Searching with a tag returns expected results.'''
app = self._get_test_app()
factories.Dataset(name="dataset-one", title='Dataset One',
tags=[{'name': 'my-tag'}])
factories.Dataset(name="dataset-two", title='Dataset Two')
factories.Dataset(name="dataset-three", title='Dataset Three')
search_url = url_for(controller='package', action='search')
search_response = app.get(search_url)
assert_true('/dataset?tags=my-tag' in search_response)
tag_search_response = app.get('/dataset?tags=my-tag')
assert_true('1 dataset found' in tag_search_response)
search_response_html = BeautifulSoup(tag_search_response.body)
ds_titles = search_response_html.select('.dataset-list '
'.dataset-item '
'.dataset-heading a')
ds_titles = [n.string for n in ds_titles]
assert_equal(len(ds_titles), 1)
assert_true('Dataset One' in ds_titles)
def test_search_page_results_private(self):
'''Private datasets don't show up in dataset search results.'''
app = self._get_test_app()
org = factories.Organization()
factories.Dataset(name="dataset-one", title='Dataset One',
owner_org=org['id'], private=True)
factories.Dataset(name="dataset-two", title='Dataset Two')
factories.Dataset(name="dataset-three", title='Dataset Three')
search_url = url_for(controller='package', action='search')
search_response = app.get(search_url)
search_response_html = BeautifulSoup(search_response.body)
ds_titles = search_response_html.select('.dataset-list '
'.dataset-item '
'.dataset-heading a')
ds_titles = [n.string for n in ds_titles]
assert_equal(len(ds_titles), 2)
assert_true('Dataset One' not in ds_titles)
assert_true('Dataset Two' in ds_titles)
assert_true('Dataset Three' in ds_titles)
def test_user_not_in_organization_cannot_search_private_datasets(self):
app = helpers._get_test_app()
user = factories.User()
organization = factories.Organization()
dataset = factories.Dataset(
owner_org=organization['id'],
private=True,
)
env = {'REMOTE_USER': user['name'].encode('ascii')}
search_url = url_for(controller='package', action='search')
search_response = app.get(search_url, extra_environ=env)
search_response_html = BeautifulSoup(search_response.body)
ds_titles = search_response_html.select('.dataset-list '
'.dataset-item '
'.dataset-heading a')
assert_equal([n.string for n in ds_titles], [])
def test_user_in_organization_can_search_private_datasets(self):
app = helpers._get_test_app()
user = factories.User()
organization = factories.Organization(
users=[{'name': user['id'], 'capacity': 'member'}])
dataset = factories.Dataset(
title='A private dataset',
owner_org=organization['id'],
private=True,
)
env = {'REMOTE_USER': user['name'].encode('ascii')}
search_url = url_for(controller='package', action='search')
search_response = app.get(search_url, extra_environ=env)
search_response_html = BeautifulSoup(search_response.body)
ds_titles = search_response_html.select('.dataset-list '
'.dataset-item '
'.dataset-heading a')
assert_equal([n.string for n in ds_titles], ['A private dataset'])
def test_user_in_different_organization_cannot_search_private_datasets(self):
app = helpers._get_test_app()
user = factories.User()
org1 = factories.Organization(
users=[{'name': user['id'], 'capacity': 'member'}])
org2 = factories.Organization()
dataset = factories.Dataset(
title='A private dataset',
owner_org=org2['id'],
private=True,
)
env = {'REMOTE_USER': user['name'].encode('ascii')}
search_url = url_for(controller='package', action='search')
search_response = app.get(search_url, extra_environ=env)
search_response_html = BeautifulSoup(search_response.body)
ds_titles = search_response_html.select('.dataset-list '
'.dataset-item '
'.dataset-heading a')
assert_equal([n.string for n in ds_titles], [])
@helpers.change_config('ckan.search.default_include_private', 'false')
def test_search_default_include_private_false(self):
app = helpers._get_test_app()
user = factories.User()
organization = factories.Organization(
users=[{'name': user['id'], 'capacity': 'member'}])
dataset = factories.Dataset(
owner_org=organization['id'],
private=True,
)
env = {'REMOTE_USER': user['name'].encode('ascii')}
search_url = url_for(controller='package', action='search')
search_response = app.get(search_url, extra_environ=env)
search_response_html = BeautifulSoup(search_response.body)
ds_titles = search_response_html.select('.dataset-list '
'.dataset-item '
'.dataset-heading a')
assert_equal([n.string for n in ds_titles], [])
def test_sysadmin_can_search_private_datasets(self):
app = helpers._get_test_app()
user = factories.Sysadmin()
organization = factories.Organization()
dataset = factories.Dataset(
title='A private dataset',
owner_org=organization['id'],
private=True,
)
env = {'REMOTE_USER': user['name'].encode('ascii')}
search_url = url_for(controller='package', action='search')
search_response = app.get(search_url, extra_environ=env)
search_response_html = BeautifulSoup(search_response.body)
ds_titles = search_response_html.select('.dataset-list '
'.dataset-item '
'.dataset-heading a')
assert_equal([n.string for n in ds_titles], ['A private dataset'])
class TestPackageFollow(helpers.FunctionalTestBase):
def test_package_follow(self):
app = self._get_test_app()
user = factories.User()
package = factories.Dataset()
env = {'REMOTE_USER': user['name'].encode('ascii')}
follow_url = url_for(controller='package',
action='follow',
id=package['id'])
response = app.post(follow_url, extra_environ=env, status=302)
response = response.follow()
assert_true('You are now following {0}'
.format(package['title'])
in response)
def test_package_follow_not_exist(self):
'''Pass an id for a package that doesn't exist'''
app = self._get_test_app()
user_one = factories.User()
env = {'REMOTE_USER': user_one['name'].encode('ascii')}
follow_url = url_for(controller='package',
action='follow',
id='not-here')
response = app.post(follow_url, extra_environ=env, status=302)
response = response.follow(status=404)
assert_true('Dataset not found' in response)
def test_package_unfollow(self):
app = self._get_test_app()
user_one = factories.User()
package = factories.Dataset()
env = {'REMOTE_USER': user_one['name'].encode('ascii')}
follow_url = url_for(controller='package',
action='follow',
id=package['id'])
app.post(follow_url, extra_environ=env, status=302)
unfollow_url = url_for(controller='package', action='unfollow',
id=package['id'])
unfollow_response = app.post(unfollow_url, extra_environ=env,
status=302)
unfollow_response = unfollow_response.follow()
assert_true('You are no longer following {0}'
.format(package['title'])
in unfollow_response)
def test_package_unfollow_not_following(self):
'''Unfollow a package not currently following'''
app = self._get_test_app()
user_one = factories.User()
package = factories.Dataset()
env = {'REMOTE_USER': user_one['name'].encode('ascii')}
unfollow_url = url_for(controller='package', action='unfollow',
id=package['id'])
unfollow_response = app.post(unfollow_url, extra_environ=env,
status=302)
unfollow_response = unfollow_response.follow()
assert_true('You are not following {0}'.format(package['id'])
in unfollow_response)
def test_package_unfollow_not_exist(self):
'''Unfollow a package that doesn't exist.'''
app = self._get_test_app()
user_one = factories.User()
env = {'REMOTE_USER': user_one['name'].encode('ascii')}
unfollow_url = url_for(controller='package', action='unfollow',
id='not-here')
unfollow_response = app.post(unfollow_url, extra_environ=env,
status=302)
unfollow_response = unfollow_response.follow(status=404)
assert_true('Dataset not found' in unfollow_response)
def test_package_follower_list(self):
'''Following users appear on followers list page.'''
app = self._get_test_app()
user_one = factories.Sysadmin()
package = factories.Dataset()
env = {'REMOTE_USER': user_one['name'].encode('ascii')}
follow_url = url_for(controller='package',
action='follow',
id=package['id'])
app.post(follow_url, extra_environ=env, status=302)
followers_url = url_for(controller='package', action='followers',
id=package['id'])
# Only sysadmins can view the followers list pages
followers_response = app.get(followers_url, extra_environ=env,
status=200)
assert_true(user_one['display_name'] in followers_response)
class TestDatasetRead(helpers.FunctionalTestBase):
def test_dataset_read(self):
app = self._get_test_app()
dataset = factories.Dataset()
url = url_for(controller='package',
action='read',
id=dataset['id'])
response = app.get(url)
assert_in(dataset['title'], response)
|
# %% __init__(), no argument
import parent_import
from tune_easy import LGBMRegressorTuning
import pandas as pd
# Load dataset
df_reg = pd.read_csv(f'../sample_data/osaka_metropolis_english.csv')
TARGET_VARIABLE = 'approval_rate' # Target variable
USE_EXPLANATORY = ['2_between_30to60', '3_male_ratio', '5_household_member', 'latitude'] # Explanatory variables
y = df_reg[TARGET_VARIABLE].values
X = df_reg[USE_EXPLANATORY].values
###### __init() ######
tuning = LGBMRegressorTuning(X, y, USE_EXPLANATORY)
# %% __init__(), for LeaveOneGroupOut
import parent_import
from tune_easy import XGBRegressorTuning
from sklearn.model_selection import LeaveOneGroupOut
import pandas as pd
# Load dataset
df_reg = pd.read_csv(f'../sample_data/osaka_metropolis_english.csv')
TARGET_VARIABLE = 'approval_rate' # Target variable
USE_EXPLANATORY = ['2_between_30to60', '3_male_ratio', '5_household_member', 'latitude'] # Explanatory variables
X = df_reg[USE_EXPLANATORY].values
y = df_reg[TARGET_VARIABLE].values
###### __init() ######
tuning = XGBRegressorTuning(X, y, USE_EXPLANATORY, # Required argument
cv_group=df_reg['ward_after'].values) # Grouping data for LeaveOneGroupOut
# %% __init__(), use validation data as eval_data in fit_params
import parent_import
from tune_easy import LGBMRegressorTuning
import pandas as pd
# Load dataset
df_reg = pd.read_csv(f'../sample_data/osaka_metropolis_english.csv')
TARGET_VARIABLE = 'approval_rate' # Target variable
USE_EXPLANATORY = ['2_between_30to60', '3_male_ratio', '5_household_member', 'latitude'] # Explanatory variables
X = df_reg[USE_EXPLANATORY].values
y = df_reg[TARGET_VARIABLE].values
###### __init() ######
tuning = LGBMRegressorTuning(X, y, USE_EXPLANATORY, # Required argument
eval_data_source='valid') # Use valid data as eval_set
# %%
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Ron Scott-Adams'
SITENAME = 'MD Options Test'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
MARKDOWN = {
'extension_configs': {
'markdown.extensions.toc': {
'marker': '[TableOfContents]',
'title': 'Table of Contents',
'anchorlink': True,
'permalink': True,
'baselevel': 2,
}
}
}
|
# Rewritten by RayzoR
import sys
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "269_InventionAmbition"
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [10866]
def onEvent (self,event,st) :
htmltext = event
if event == "32486-03.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "32486-05.htm" :
st.exitQuest(1)
st.playSound("ItemSound.quest_finish")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
EnergyOres = st.getQuestItemsCount(10866)
if id == CREATED :
if player.getLevel() < 18 :
htmltext = "32486-00.htm"
st.exitQuest(1)
else :
htmltext = "32486-01.htm"
elif EnergyOres > 0:
htmltext = "32486-07.htm"
bonus = 0
if EnergyOres >= 20:
bonus = 2044
st.giveItems(57,EnergyOres*50+bonus)
st.takeItems(10866,-1)
else :
htmltext = "32486-04.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
if st.getRandom(10)<6 :
st.giveItems(10866,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(269,qn,"Invention Ambition")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(32486)
QUEST.addTalkId(32486)
for mob in range(21124,21132) :
QUEST.addKillId(mob)
|
from ipykernel.kernelapp import IPKernelApp
from . import BakeryKernel
IPKernelApp.launch_instance(kernel_class=BakeryKernel)
|
"""
Entry point
"""
from repl import Repl
def run():
Repl().loop()
|
import click
from ..alias import AliasedGroup
from .. import wrapboto, display
@click.group(cls=AliasedGroup, short_help='Delete resources.')
def delete():
pass
@delete.command(name='service')
@click.argument('service', required=True)
@click.option('--force', is_flag=True, default=False, help='Scale down count to 0 before deleting.')
@click.option('-o', '--output', type=click.Choice(['raw']),
help="Output format.")
@click.option('-c', '--cluster',
help="Specify cluster to execute command. Default usage cluster from context.")
@click.pass_context
def delete_service(ctx, service, cluster, force, output):
"""
\b
# Delete running service my-app if don't have any running task
cmd::ecsctl delete service my-app
\b
# Fore stopping all task and then delete running service
cmd::ecsctl delete service my-app --force
"""
if not cluster:
cluster = ctx.obj['cluster']
bw = ctx.obj['bw']
resp, err = bw.delete_service(service, cluster=cluster, force=force)
if err:
click.echo(click.style(resp, fg='red'))
else:
if output == 'raw':
click.echo(display.de_unicode(resp['service']))
else:
click.echo(resp['service']['serviceArn'])
@delete.command(name='task-definition')
@click.argument('task-definition', required=True)
@click.option('-c', '--cluster',
help="Specify cluster to execute command. Default usage cluster from context.")
@click.pass_context
def delete_task_definition(ctx, task_definition, cluster):
"""
\b
# Delete task definition
cmd::ecsctl delete task-definition my-app:1
"""
bw = ctx.obj['bw']
resp, err = bw.deregister_task_definition(task_definition)
if err:
click.echo(click.style(resp, fg='red'))
else:
click.echo(resp['taskDefinitionArn'])
@delete.command(name='task-definition-family')
@click.argument('task-definition-family', required=True)
@click.option('-c', '--cluster',
help="Specify cluster to execute command. Default usage cluster from context.")
@click.pass_context
def delete_task_definition_family(ctx, task_definition_family, cluster):
"""
\b
# Delete all task definition and deregister task definition family
cmd::ecsctl delete task-definition-family my-app
"""
bw = ctx.obj['bw']
resp, err = bw.deregister_task_definition_family(task_definition_family)
if err:
click.echo(click.style(resp, fg='red'))
else:
click.echo('\n'.join(td['taskDefinitionArn'] for td in resp))
@delete.command(name='secret')
@click.argument('task-definition-family', required=True)
@click.option('-e', '--variable-name', multiple=True,
help="Set one element to delete from parameter store.")
@click.option('-c', '--cluster',
help="Specify cluster to execute command. Default usage cluster from context.")
@click.pass_context
def delete_task_definition_family(ctx, task_definition_family, variable_name, cluster):
"""
\b
# Delete all variables from selected task definition family
cmd::ecsctl delete secret my-app
\b
# Delete one variable from selected task definition family
cmd::ecsctl delete secret my-app -e SECRET_NAME -e SECRET_NAME_2
"""
if not cluster:
cluster = ctx.obj['cluster']
bw = ctx.obj['bw']
resp = bw.delete_secret(cluster, task_definition_family, variable_name)
if resp.get('DeletedParameters'):
click.echo('Deleted environment variables:')
click.echo('\n'.join(resp.get('DeletedParameters')))
if resp.get('InvalidParameters'):
click.echo(click.style('Invalid environment variables:', fg="red"))
click.echo(click.style('\n'.join(resp.get('InvalidParameters')), fg='red'))
|
from collections.abc import Sequence
from pathlib import Path
import os
import copy
import pytest
from regolith.schemas import SCHEMAS, validate, EXEMPLARS
from pprint import pprint
@pytest.mark.parametrize("key", SCHEMAS.keys())
def test_validation(key):
if isinstance(EXEMPLARS[key], Sequence):
for e in EXEMPLARS[key]:
validate(key, e, SCHEMAS)
else:
validate(key, EXEMPLARS[key], SCHEMAS)
@pytest.mark.parametrize("key", SCHEMAS.keys())
def test_exemplars(key):
if isinstance(EXEMPLARS[key], Sequence):
for e in EXEMPLARS[key]:
v = validate(key, e, SCHEMAS)
assert v[0]
else:
v = validate(key, EXEMPLARS[key], SCHEMAS)
if not v[0]:
for vv, reason in v[1].items():
print(vv, reason)
print(type(EXEMPLARS[key][vv]))
pprint(EXEMPLARS[key][vv])
assert v[0]
BAD_PROJECTUM = {
"_id": "sb_firstprojectum",
"begin_date": "2020-04-28",
"collaborators": ["aeinstein", "pdirac"],
"deliverable": {
"audience": ["beginning grad in chemistry"],
"due_date": "2021-05-05",
"success_def": "audience is happy",
"scope": ["UCs that are supported or some other scope description "
"if it is software", "sketch of science story if it is paper"
],
"platform": "description of how and where the audience will access "
"the deliverable. Journal if it is a paper",
"roll_out": [
"steps that the audience will take to access and interact with "
"the deliverable", "not needed for paper submissions"],
"notes": ["deliverable note"],
"status": "proposed"
}
}
def test_mongo_invalid_insertion(make_mongodb):
# proof that valid insertion is allowed is provided by helper tests on mongo
if make_mongodb is False:
pytest.skip("Mongoclient failed to start")
else:
repo = Path(make_mongodb)
from regolith.database import connect
from regolith.runcontrol import DEFAULT_RC, load_rcfile
os.chdir(repo)
rc = copy.copy(DEFAULT_RC)
rc.schemas = SCHEMAS
rc._update(load_rcfile("regolithrc.json"))
with connect(rc) as rc.client:
only_database_in_test = rc.databases[0]['name']
try:
rc.client.insert_one(only_database_in_test, 'projecta', BAD_PROJECTUM)
except ValueError as e:
result = e.args[0]
expected = 'ERROR in sb_firstprojectum:\n{\'lead\': [\'required field\'], \'status\': [\'required field\']}\nNone\nNone\n---------------\n'
assert result == expected
|
from .entity import EntityType, Entity
from .validator import PropertyValidator
from calm.dsl.store import Cache
from calm.dsl.constants import CACHE
# Ref
class RefType(EntityType):
__schema_name__ = "Ref"
__openapi_type__ = "app_ref"
@classmethod
def pre_decompile(mcls, cdict, context, prefix=""):
cdict = super().pre_decompile(cdict, context, prefix=prefix)
# Class name for ref objects should always be taken randomly
cdict["__name__"] = None
return cdict
def get_user_attrs(cls):
"""returns user attrs for ref class"""
attrs = super().get_user_attrs()
attrs.pop("__self__", None) # Not a user attr for reference object
return attrs
class RefValidator(PropertyValidator, openapi_type="app_ref"):
__default__ = None
__kind__ = RefType
def _ref(**kwargs):
name = kwargs.get("name", None)
bases = (Entity,)
return RefType(name, bases, kwargs)
def ref(cls):
if isinstance(cls, RefType):
return cls
return cls.get_ref()
|
import unittest
from kdap import analysis
import os
import shutil
import json
class TestAnalysis(unittest.TestCase):
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__), 'testOutput/')
self.ropar_filename = 'Indian_Institute_of_Technology_Ropar.knolml'
self.zinc_filename = 'Zinc.knolml'
self.k = analysis.knol()
self.wiki_classes = ['fa', 'fl', 'fm', 'a', 'ga', 'b', 'c', 'start', 'stub', 'list']
with open(os.path.dirname(__file__)+'/test_data.txt', 'r') as infile:
self.frames_data = json.loads(infile.read())
self.class_data = {}
for wclass in self.wiki_classes:
with open(os.path.dirname(__file__)+'/wiki_class_data/'+wclass+'.txt', 'r') as infile:
self.class_data[wclass] = {(int(x.split('!@!')[0]), x.split('!@!')[1][:-1]) for x in infile.readlines()}
with open(os.path.dirname(__file__)+'/pageviews_data.json', 'r') as infile:
self.views_data = json.loads(infile.read())[0]
with open(os.path.dirname(__file__)+'/test_instance_dates.txt', 'r') as infile:
self.instance_dates = infile.read().split(',')
with open(os.path.dirname(__file__)+'/author_edits.txt', 'r') as infile:
self.author_edits = json.load(infile)
def get_wiki_article(self):
article_name = 'IIT Ropar'
self.k.get_wiki_article(article_name=article_name, output_dir=self.test_dir)
self.assertTrue(os.path.exists(self.test_dir + self.ropar_filename))
def test_download_dataset_article_list(self):
self.k.download_dataset(sitename='wikipedia', article_list=['IIT Ropar', 'Zinc'], destdir=self.test_dir)
self.assertTrue(os.path.exists(self.test_dir + self.ropar_filename))
self.assertTrue(os.path.exists(self.test_dir + self.zinc_filename))
self.frame_test()
self.get_instance_date_test()
def frame_test(self):
test_data = {
'id': [],
'title': [],
'bytes': [],
'editor': [],
'time': [],
'text': [],
'stats': []
}
for inst in self.k.frame(file_name=self.test_dir+self.ropar_filename):
test_data['id'].append(inst.instanceId)
test_data['title'].append(inst.get_title())
test_data['bytes'].append(inst.get_bytes())
test_data['editor'].append(inst.get_editor())
test_data['time'].append(inst.get_timestamp())
test_data['text'].append(inst.get_text())
test_data['stats'].append(inst.get_text_stats())
for key in self.frames_data.keys():
self.assertTrue(all(x in self.frames_data[key] for x in test_data[key]))
def test_get_pageviews(self):
views = self.k.get_pageviews(site_name='wikipedia', article_name='Zinc', granularity='daily', start='20200828',
end='20200917')
for date in views:
str_date = date.strftime('%Y-%m-%d')
self.assertIn(str_date, self.views_data.keys())
self.assertEqual(views[date]['Zinc'], self.views_data[str_date])
def test_wiki_article_by_class(self):
for wclass in self.wiki_classes:
articles = self.k.get_wiki_article_by_class(wiki_class=wclass)
self.assertTrue(all(article in self.class_data[wclass] for article in articles))
def get_instance_date_test(self):
dates = self.k.get_instance_date(file_list=self.test_dir+self.ropar_filename).values()[0]
self.assertLessEqual(len(dates), len(self.instance_dates))
self.assertTrue(all(date in self.instance_dates for date in dates))
def author_edits_test(self):
edits = self.k.get_author_edits(article_list=self.test_dir+self.ropar_filename, editor_list='Awadh2020')
for key in edits.keys():
edits[key] = edits[key][self.test_dir+self.ropar_filename]
self.assertIn(key, self.author_edits.keys())
self.assertEqual(edits[key], self.author_edits[key])
def tearDown(self):
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
if os.path.exists('outputD/'):
shutil.rmtree('outputD/')
|
#!/usr/bin/env python
'''
It can save the contents of the file to the mysql database.
'''
import os
import random
import MySQLdb
#import string
f = open(os.getcwd()+'\\2','w')
for x in range(200):
words = [chr(a) for a in range(65,91)]+[chr(a) for a in range(97,122)]+[str(a) for a in range(0,11)]
# It is equal to string.ascii_letters + string.digits
slices = random.sample(words,10)
#temp = str(slices)
temp = "".join(slices)+'\n'
#print temp
f.write(temp)
f.close()
f = open(os.getcwd()+'\\2','r')
words = f.readlines()
try:
conn = MySQLdb.connect(host = 'localhost', user = 'root', passwd = '******', port = 3306)#connet to your local database of mysql
cur = conn.cursor()
conn.select_db('python')
count = cur.executemany('insert into test(name) values(%s)',words)#executemant need two parameters sql statement and list
conn.commit()#commit the implementation results
cur.close()#close the cursor
conn.close()# close the connect
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
|
# -*- coding: utf-8 -*-
#列表页相关
# @Author : joker
# @Date : 2019-01-11
from flask import render_template, jsonify, request, current_app
from flask_login import current_user
from app.common.response_code import RET
from ...common import constants
from app.models import House
from ..v1 import api
@api.route('/search_view',methods = ['GET'])
def search_view():
return render_template('search/search.html')
@api.route('/houses_list',methods = ['GET'])
def houses_list():
try:
# 分页
page = request.args.get('page', 1, type=int)
# 区域Id
aid = request.args.get('aid')
# 开始日期
sd = request.args.get('sd')
# 结束日期
ed = request.args.get('ed')
# 排序
sk = request.args.get('sk')
filters = []
if aid :
filters.append(House.area_id == aid)
if sd :
filters.append(House.create_time >= sd)
if ed :
filters.append(House.create_time <= ed)
if sk == "booking":
order = House.order_count.desc()
elif sk == "price-inc":
order = House.price.asc()
elif sk == "price des":
order = House.price.desc()
else:
order = House.create_time.desc()
pagination = House.query.filter(*filters).order_by(order).paginate(
page,per_page = constants.HOME_POSTS_PER_PAGE,
error_out= False
)
houses = pagination.items
return jsonify(
status = RET.OK,
errmsg = "请求成功",
data = {
'houses':[house.to_basic_dict() for house in houses]
},
total_page = pagination.total
)
except Exception as e:
current_app.logger.error(e)
return jsonify(status=RET.DBERR, errmsg="程序异常,请联系管理员")
@api.route('/show_detail/<int:ids>',methods = ['GET'])
def show_detail(ids):
houses = House.query.get_or_404(ids)
data = {'house':houses.to_full_dict()}
return render_template('search/detail.html',data = data)
|
import xml.etree.ElementTree as etree
from html import unescape
from textwrap import dedent
from markdown.extensions import Extension
from markdown.treeprocessors import Treeprocessor
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from pygments.styles import get_style_by_name
from core.utils import check_and_remove_prefix
from core.runner.utils import is_valid_syntax
from core import translation as t
lexer = get_lexer_by_name("python3")
monokai = get_style_by_name("monokai")
html_formatter = HtmlFormatter(nowrap=True)
class HighlightPythonTreeProcessor(Treeprocessor):
codes = None
def run(self, root):
for node in root.findall(".//pre/code"):
text = unescape(node.text)
# TODO: this assumes that __copyable__ never comes after __no_auto_translate__
text, copyable = check_and_remove_prefix(text, "__copyable__\n")
text, no_auto_translate = check_and_remove_prefix(text, "__no_auto_translate__\n")
for code in [text, text + "\n 0", dedent(text)]:
if is_valid_syntax(code):
self.highlight_node(node, text)
self.codes.append(dict(
code=code,
text=text,
no_auto_translate=no_auto_translate,
))
break
else:
node.text = text
if copyable:
node.append(
etree.fromstring(
f'<button class="btn btn-primary copy-button">{t.Terms.copy_button}</button>'
)
)
node.set("class", node.get("class", "") + " copyable")
@staticmethod
def highlight_node(node, text):
import xml.etree.ElementTree as etree
import pygments
highlighted = pygments.highlight(text, lexer, html_formatter)
tail = node.tail
node.clear()
node.set("class", "codehilite")
node.append(etree.fromstring(f"<span>{highlighted}</span>"))
node.tail = tail
class HighlightPythonExtension(Extension):
codes = None
def extendMarkdown(self, md):
processor = HighlightPythonTreeProcessor()
processor.codes = self.codes
md.treeprocessors.register(processor, "highlight_python", 0)
|
from typing import List
"""
# Definition for a Node.
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
"""
class Solution:
def treeToDoublyList(self, root: 'Node') -> 'Node':
if root == None:
return root
else:
headNode, tailNode = self.treeToList(root)
headNode.left = tailNode
tailNode.right = headNode
return headNode
def treeToList(self, root):
if root == None:
return None, None
else:
headNode = root
tailNode = root
if root.left != None:
leftHeadNode, leftTailNode = self.treeToList(root.left)
root.left = leftTailNode
leftTailNode.right = root
headNode = leftHeadNode
if root.right != None:
rightHeadNode, rightTailNode = self.treeToList(root.right)
root.right = rightHeadNode
rightHeadNode.left = root
tailNode = rightTailNode
return headNode, tailNode
|
# Generated by Django 2.0.7 on 2018-11-15 05:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('eclass', '0008_auto_20181109_2243'),
]
operations = [
migrations.AddField(
model_name='list',
name='Subject_list',
field=models.ManyToManyField(to='eclass.Subject'),
),
]
|
import torch
import torch.linalg
class HalfSquared:
def solve_dual(self, P, c):
m = P.shape[1] # number of columns = batch size
# construct lhs matrix P* P + m I
lhs_mat = torch.mm(P.t(), P)
lhs_mat.diagonal().add_(m)
# solve positive-definite linear system using Cholesky factorization
lhs_factor = torch.linalg.cholesky(lhs_mat)
rhs_col = c.unsqueeze(1) # make rhs a column vector, so that cholesky_solve works
return torch.cholesky_solve(rhs_col, lhs_factor)
def eval(self, lin):
return 0.5 * (lin ** 2)
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
import neurotransmitter_pb2 as neurotransmitter__pb2
class NeuronAPIStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Ping = channel.unary_unary(
'/neuron.neuron.NeuronAPI/Ping',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Echo = channel.stream_stream(
'/neuron.neuron.NeuronAPI/Echo',
request_serializer=neurotransmitter__pb2.SerializedStuff.SerializeToString,
response_deserializer=neurotransmitter__pb2.SerializedStuff.FromString,
)
self.Session = channel.stream_stream(
'/neuron.neuron.NeuronAPI/Session',
request_serializer=neurotransmitter__pb2.SerializedStuff.SerializeToString,
response_deserializer=neurotransmitter__pb2.SerializedStuff.FromString,
)
class NeuronAPIServicer(object):
"""Missing associated documentation comment in .proto file."""
def Ping(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Echo(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Session(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NeuronAPIServicer_to_server(servicer, server):
rpc_method_handlers = {
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Echo': grpc.stream_stream_rpc_method_handler(
servicer.Echo,
request_deserializer=neurotransmitter__pb2.SerializedStuff.FromString,
response_serializer=neurotransmitter__pb2.SerializedStuff.SerializeToString,
),
'Session': grpc.stream_stream_rpc_method_handler(
servicer.Session,
request_deserializer=neurotransmitter__pb2.SerializedStuff.FromString,
response_serializer=neurotransmitter__pb2.SerializedStuff.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'neuron.neuron.NeuronAPI', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class NeuronAPI(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Ping(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/neuron.neuron.NeuronAPI/Ping',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Echo(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/neuron.neuron.NeuronAPI/Echo',
neurotransmitter__pb2.SerializedStuff.SerializeToString,
neurotransmitter__pb2.SerializedStuff.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Session(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/neuron.neuron.NeuronAPI/Session',
neurotransmitter__pb2.SerializedStuff.SerializeToString,
neurotransmitter__pb2.SerializedStuff.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
import matplotlib.pyplot as plt
_COLORS = """
#a6cee3
#1f78b4
#b2df8a
#33a02c
#fb9a99
#e31a1c
#fdbf6f
#ff7f00
#cab2d6
#6a3d9a
#ffff99
#b15928""".split()
def plot_concellation(data_dict):
corners = data_dict.corners
pattern_id = data_dict.pattern_id.squeeze()
c = [_COLORS[i] for i in pattern_id]
plt.scatter(corners[:, 0], corners[:, 1], c=c)
plt.show()
def plot_concellation_compare(x, y, y_pre, name=None):
"""
batch plot for original data and predicted data(label is unsupervisedly classified)
Args:
x: (B,M,2)
y: (B,M)
y_pre: (B,M)
Returns:
"""
B, M = y.shape
assert x.shape[-1] == 2
plt.clf()
fig, axs = plt.subplots(2, B, figsize=(15, 15))
for i in range(B):
axs[0, i].scatter(x[i, :, 0], x[i, :, 1], c=[_COLORS[j] for j in y[i]])
axs[1, i].scatter(x[i, :, 0], x[i, :, 1], c=[_COLORS[j] for j in y_pre[i]])
if name:
plt.savefig(name)
else:
plt.show()
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/msg/LogBlock.msg;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/msg/GenericLogData.msg;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/msg/FullState.msg;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/msg/TrajectoryPolynomialPiece.msg;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/msg/crtpPacket.msg;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/msg/Hover.msg;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/msg/Position.msg"
services_str = "/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/AddCrazyflie.srv;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/GoTo.srv;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/Land.srv;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/RemoveCrazyflie.srv;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/SetGroupMask.srv;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/StartTrajectory.srv;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/Stop.srv;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/Takeoff.srv;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/UpdateParams.srv;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/UploadTrajectory.srv;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/srv/sendPacket.srv"
pkg_name = "crazyflie_driver"
dependencies_str = "std_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "crazyflie_driver;/home/robproj/code/convoy/airforce_ws/src/crazyflie_ros/crazyflie_driver/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
#!/usr/bin/env python3
import sys, yaml
from jinja2 import Environment, FileSystemLoader
if __name__ == "__main__":
root_dir = sys.argv[1]
template_filename = sys.argv[2]
yaml_filename = sys.argv[3]
with open('{}/{}'.format(root_dir, yaml_filename)) as y:
config_data = yaml.safe_load(y)
# print(config_data)
env = Environment(loader = FileSystemLoader(root_dir), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(template_filename)
print(template.render(config_data))
|
import unittest
from test_env import TestEnv
class TestSet(TestEnv):
def test_cpy_constructor(self):
code="""
def are_equal(s1):
s2 = set(s1)
return s2 == s1
"""
self.run_test(code, {'jack', 'sjoerd'}, are_equal=[{str}])
def test_in(self):
self.run_test("def _in(a,b):\n return b in a", {'aze', 'qsd'},'qsd', _in=[{str},str])
def test_empty_in(self):
self.run_test("def empty_in(b):\n return b in set()",'qsd', empty_in=[str])
def test_len(self):
self.run_test("def _len(a):\n return len(a)", {'aze', 'qsd', 'azeqsd'}, _len=[{str}])
def test_disjoint(self):
self.run_test("def _isdisjoint(a,b):\n return a.isdisjoint(b)", {1,3,2}, {7,2,5}, _isdisjoint=[{int},{float}])
def test_operator_le(self):
self.run_test("def _le(a,b):\n return a <= b", {1.,5.}, {1,2,5}, _le=[{float},{int}])
def test_issubset(self):
self.run_test("def _issubset(a,b):\n return a.issubset(b)", {1.,5.}, {1,2,5}, _issubset=[{float},{int}])
def test_operator_lt(self):
self.run_test("def _lt(a,b):\n return a < b", {1.,5.}, {1,2,5}, _lt=[{float},{int}])
def test_operator_ge(self):
self.run_test("def _ge(a,b):\n return a >= b", {1.,5.}, {1,2,5}, _ge=[{float},{int}])
def test_issuperset(self):
self.run_test("def _issuperset(a,b):\n return a.issuperset(b)", {1.,5.}, {1,2,5}, _issuperset=[{float},{int}])
def test_operator_gt(self):
self.run_test("def _gt(a,b):\n return a > b", {1.,5.}, {1,2,5}, _gt=[{float},{int}])
def test_clear(self):
self.run_test("def _clear(a):\n a.clear()\n return a", {1.,5.}, _clear=[{float}])
def test_pop(self):
self.run_test("def _pop(a):\n a.pop()\n return a", {1.,5.}, _pop=[{float}])
def test_remove(self):
self.run_test("def _remove(a,b):\n a.remove(b)\n return a", {1,3}, 1., _remove=[{int}, float])
def test_remove_strict(self):
self.run_test("def _remove_strict(a,b):\n a.remove(b)\n return a <= {3} and a >= {3}", {1,3}, 1., _remove_strict=[{int}, float])
def test_discard(self):
self.run_test("def _discard(a ,b):\n a.discard(b)\n return a", {1,3}, 1., _discard=[{int},float])
def test_copy(self):
self.run_test("def _copy(a):\n b=a.copy()\n return a <= {3} and a >= {3} and not a is b", {1,3}, _copy=[{int}])
def test_fct_union(self):
self.run_test("def _fct_union(b, c):\n a={1.}\n return a.union(b, c)", {1,3}, {1.,3.,4.,5.,6.} , _fct_union=[{int},{float}])
def test_fct_union_empty_set(self):
self.run_test("def _fct_union_empty_set(b, c):\n a=set()\n return a.union(b, c)", {1,3}, {1.,3.,4.,5.,6.} , _fct_union_empty_set=[{int},{float}])
def test_fct_union_empty_set_list(self):
self.run_test("def _fct_union_empty_set_list(b, c):\n a=set()\n return a.union(b, c)", {1,3}, [1.,3.,4.,5.,6.] , _fct_union_empty_set_list=[{int},[float]])
def test_fct_union_list(self):
self.run_test("def _fct_union_list(b, c):\n a={1.}\n return a.union(b, c)", [1,3], {1.,3.,4.,5.,6.} , _fct_union_list=[[int],{float}])
def test_fct_union_1arg(self):
self.run_test("def _fct_union_1arg(b):\n a={1.}\n return a.union(b)", {1,3,4,5,6}, _fct_union_1arg=[{int}])
def test_operator_union(self):
self.run_test("def _operator_union(b, c):\n a={1.}\n return (a | b | c)", {1,3,4,5,6}, {1.,2.,4.}, _operator_union=[{int},{float}])
def test_update(self):
self.run_test("def _update(b, c):\n a={1.}\n a.update(b, c)\n return a", {1,3}, {1.,3.,4.,5.,6.} , _update=[{int},{float}])
def test_update_list(self):
self.run_test("def _update_list(b, c):\n a={1.}; a.update(b, c); return a", {1,3}, [1.,3.,4.,5.,6.] , _update_list=[{int},[float]])
def test_update_empty_set_list(self):
self.run_test("def _update_empty_set_list(b, c):\n a=set()\n a.update(b, c)\n return a", {1,3}, [1.,3.,4.,5.,6.] , _update_empty_set_list=[{int},[float]])
def test_operator_update(self):
self.run_test("def _operator_update(b, c):\n a={1.,10.}\n a |= b | c\n return a", {1,3,4,5,6}, {1.,2.,4.}, _operator_update=[{int},{float}])
def test_fct_intersection(self):
self.run_test("def _fct_intersection(b, c):\n a={1.}\n return a.intersection(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_intersection=[{int},{float}])
def test_fct_intersection_empty_set(self):
self.run_test("def _fct_intersection_empty_set(b, c):\n a=set()\n return a.intersection(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_intersection_empty_set=[{int},{float}])
def test_fct_intersection_list(self):
self.run_test("def _fct_intersection_list(b, c):\n a={1.}\n return a.intersection(b,c)", {1,3,4,5,6}, [1.,2.,4.], _fct_intersection_list=[{int},[float]])
def test_operator_intersection(self):
self.run_test("def _operator_intersection(b, c):\n a={1.}\n return (a & b & c)", {1,3,4,5,6}, {1.,2.,4.}, _operator_intersection=[{int},{float}])
def test_fct_intersection_update(self):
self.run_test("def _fct_intersection_update(b, c):\n a={1.,10.}\n return a.intersection_update(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_intersection_update=[{int},{float}])
def test_fct_intersection_update_empty_set(self):
self.run_test("def _fct_intersection_update_empty_set(b, c):\n a=set()\n return a.intersection_update(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_intersection_update_empty_set=[{int},{float}])
def test_fct_intersection_empty_set_update(self):
self.run_test("def _fct_intersection_empty_set_update(c):\n a={1}\n b=set()\n return a.intersection_update(b,c)", {1.,2.,4.}, _fct_intersection_empty_set_update=[{float}])
def test_fct_intersection_update_list(self):
self.run_test("def _fct_intersection_update_list(b, c):\n a={1.,10.}\n return a.intersection_update(b,c)", [1,3,4,5,6], {1.,2.,4.}, _fct_intersection_update_list=[[int],{float}])
def test_operator_intersection_update(self):
self.run_test("def _operator_intersection_update(b, c):\n a={1.}\n a &= b & c\n return a", {1,3,4,5,6}, {1.,2.,4.}, _operator_intersection_update=[{int},{float}])
@unittest.skip("pythran -E + pythran success")
def test_operator_intersection_update_empty_set(self):
self.run_test("def _operator_intersection_update_empty_set(b, c):\n a=set()\n a &= b & c\n return a", {1,3,4,5,6}, {1.,2.,4.}, _operator_intersection_update_empty_set=[{int},{float}])
def test_fct_difference(self):
self.run_test("def _fct_difference(b, c):\n a={1.,5.,10.}\n return a.difference(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_difference=[{int},{float}])
def test_fct_difference_empty_set(self):
self.run_test("def _fct_difference_empty_set(b, c):\n a=set()\n return a.difference(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_difference_empty_set=[{int},{float}])
def test_fct_difference_list(self):
self.run_test("def _fct_difference_list(b, c):\n a={1.,5.,10.}\n return a.difference(b,c)", [1,3,4,5,6], {1.,2.,4.}, _fct_difference_list=[[int],{float}])
def test_operator_difference(self):
self.run_test("def _operator_difference(b, c):\n a={1.}\n return (a - b - c)", {1,3,4,5,6}, {1.,2.,4.}, _operator_difference=[{int},{float}])
def test_operator_difference_1arg(self):
self.run_test("def _operator_difference_1arg(b):\n a={1.,2.,5.}\n return (b - a)", {1,3,4,5,6}, _operator_difference_1arg=[{int}])
def test_fct_difference_update(self):
self.run_test("def _fct_difference_update(b, c):\n a={1.,5.,10.}\n return a.difference_update(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_difference_update=[{int},{float}])
def test_fct_difference_update_empty_set(self):
self.run_test("def _fct_difference_update_empty_set(b, c):\n a=set()\n return a.difference_update(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_difference_update_empty_set=[{int},{float}])
def test_fct_difference_update_list(self):
self.run_test("def _fct_difference_update_list(b, c):\n a={1.,5.,10.}\n return a.difference_update(b,c)", {1,3,4,5,6}, [1.,2.,4.], _fct_difference_update_list=[{int},[float]])
def test_operator_difference_update(self):
self.run_test("def _operator_difference_update(b, c):\n a={1.}\n a -= b - c\n return a", {1,3,4,5,6}, {1.,2.,4.}, _operator_difference_update=[{int},{float}])
def test_fct_symmetric_difference(self):
self.run_test("def _fct_symmetric_difference(b, c):\n return (b.symmetric_difference(c))", {1,3,6}, {1.,2.,5.}, _fct_symmetric_difference=[{int},{float}])
def test_fct_symmetric_difference_empty_set(self):
self.run_test("def _fct_symmetric_difference_empty_set(c):\n b=set()\n return (b.symmetric_difference(c))", {1.,2.,5.}, _fct_symmetric_difference_empty_set=[{float}])
def test_fct_symmetric_difference_list(self):
self.run_test("def _fct_symmetric_difference_list(b, c):\n return (b.symmetric_difference(c))", {1,3,6}, [1.,2.,5.], _fct_symmetric_difference_list=[{int},[float]])
def test_operator_symmetric_difference(self):
self.run_test("def _operator_symmetric_difference(b, c):\n return (b ^ c)", {1,3,6}, {1.,2.,5.}, _operator_symmetric_difference=[{int},{float}])
def test_fct_symmetric_difference_update(self):
self.run_test("def _fct_symmetric_difference_update(b, c):\n return (c.symmetric_difference_update(b))", {1,3,6}, {1.,2.,5.}, _fct_symmetric_difference_update=[{int},{float}])
def test_fct_symmetric_difference_update_empty_set(self):
self.run_test("def _fct_symmetric_difference_update_empty_set(b):\n c=set()\n return (c.symmetric_difference_update(b))", {1.,2.,5.}, _fct_symmetric_difference_update_empty_set=[{float}])
def test_fct_symmetric_difference_update2(self):
self.run_test("def _fct_symmetric_difference_update2(b, c):\n return (b.symmetric_difference_update(c))", {1,3,6}, {1.,2.,5.}, _fct_symmetric_difference_update2=[{int},{float}])
def test_fct_symmetric_difference_update_list(self):
self.run_test("def _fct_symmetric_difference_update_list(b, c):\n return (b.symmetric_difference_update(c))", {1,3,6}, [1.,2.,5.], _fct_symmetric_difference_update_list=[{int},[float]])
def test_operator_symmetric_difference_update(self):
self.run_test("def _operator_symmetric_difference_update(b, c):\n b ^= c\n return b", {1,3,6}, {1.,2.,5.}, _operator_symmetric_difference_update=[{int},{float}])
def test_operator_symmetric_difference_update2(self):
self.run_test("def _operator_symmetric_difference_update2(b, c):\n c ^= b\n return c", {1,3,6}, {1.,2.,5.}, _operator_symmetric_difference_update2=[{int},{float}])
# Check if conflict between set.pop() & list.pop()
def test_conflict_pop(self):
self.run_test("def _conflict_pop(a,b):\n a.pop()\n b.pop()\n return len(a)+len(b)", {1.,5.}, [1,2], _conflict_pop=[{float},[int]])
def test_set_to_bool_conversion(self):
self.run_test("def set_to_bool_conversion(s, t): return (1 if s else 0), (t if t else set())",
set(), {1, 2},set_to_bool_conversion=[{int}, {int}])
def test_print_set(self):
self.run_test("def print_set(s): return str(s)", {1, 2}, print_set=[{int}])
def test_print_empty_set(self):
self.run_test("def print_empty_set(s): return str(s)", set(), print_empty_set=[{int}])
|
import os, csv, requests, json, time, sys
import scraper, config
from multiprocessing.dummy import Pool as ThreadPool
def get_recent_run(pv_path):
#find the most timestamp of shops added to the archive
scraper.pprint('--get recent run')
files = os.listdir(pv_path)
dates = []
for f in files:
sub_f = f[:-15]
if sub_f == "shops":
lcl_t = f[6:]
lcl_t = lcl_t[:-4]
dates.append(lcl_t)
dates = sorted(dates, reverse=True)
try:
date = dates[0]
return date
except IndexError:
scraper.pprint('Data Folder is missing shop lists')
return 0
def get_shops(pv_timestamp, key, pv_path):
#get the shop list from ./data/shops_'timestamp'
scraper.pprint("--get shops, pv_timestamp: {}".format(pv_timestamp))
lcl_path = "{}/shops_{}.csv".format(pv_path,pv_timestamp)
shops = []
try:
with open(lcl_path, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
count = 0
for row in reader:
shop = {}
if count > 0:
shop['count'] = row[0]
shop['id'] = row[1]
shop['name'] = row[2]
shop['listings'] = row[3]
shop['key'] = key
shops.append(shop)
count+=1
except FileNotFoundError:
scraper.pprint("FILE NOT FOUND ERROR!! Did you provide an incorrect timestamp?")
scraper.pprint(" ")
shop = {}
shop['count'] = 0
shop['id'] = 0
shop['name'] = "Error, FILE NOT FOUND"
shop['listings'] = "0"
shop['key'] = "KEY NOT SAVED"
shops.append(shop)
return shops
def strip_punctuation(pv_list):
scraper.pprint("--strip punctuation")
clean_words = []
for item in pv_list:
item = item.strip(',')
item = item.strip('.')
item = item.strip('?')
item = item.strip('!')
item = item.strip('@')
item = item.strip('#')
item = item.replace('\n','')
item = item.replace('\r','')
item = item.rstrip()
clean_words.append(item)
return clean_words
def get_unique_words(pv_list):
word_set = set()
for word in pv_list:
word_set.add(word)
return word_set
def count_words(word_set, word_list):
word_gram = []
#use word set to build set of unique words
for w_set in word_set:
tmp_word = w_set
count = 0
#count the occurances of the clean words
for w_word in word_list:
if w_set == w_word:
count+=1
word_frame = {}
word_frame['word'] = tmp_word
word_frame['count'] = count
word_gram.append(word_frame)
return word_gram
def word_counter(shop):
print("shop id: {}, name: {}, listings: {}".format(shop['id'],shop['name'], shop['listings']))
shop_id = shop['id']
key = shop['key']
#returns a distribution chart of the 5 most common terms related to one shop
scraper.pprint("--word counter, shop_id: {}".format(shop_id))
#gather data
url = "https://openapi.etsy.com/v2/shops/{}/listings/active?limit=25&offset=0&api_key={}".format(shop_id, key)
headers = {'user-agent': 'my-app/0.0.1'}
r = requests.get(url, headers=headers)
r_status=r.status_code
scraper.pprint("API RESPONSE: {}".format(r_status))
return_list = []
if r_status == 200:
content = r.content
d_content = content.decode("utf-8")
content_json = json.loads(d_content)
result_string = ''
#create string
for result in content_json['results']:
result_string+=str(result['title'])
result_string+=str(result['description'])
result_list = result_string.lower().split(' ')
clean_words = strip_punctuation(result_list)
word_set = get_unique_words(clean_words)
return_frame = {}
return_frame["shop_id"] = shop_id
return_frame["shop_name"] = shop["name"]
return_list.append(return_frame)
for w in filter_gram(count_words(word_set, clean_words)):
return_list.append(w)
scraper.pprint(" Top Terms: {}".format(return_list))
else:
return_dict = {}
return_dict["word"] = 'Error Code Status {}'.format(r_status)
return_dict["count"] = 1
return_list.append(return_dict)
return return_list
def filter_gram(pv_word_gram):
scraper.pprint("--filter gram")
return_list = []
sorted_gram = sorted(pv_word_gram, key = lambda i:i["count"], reverse=True)
r_count = 0
for i in sorted_gram:
if r_count < 5:
if not i["word"] in config.stop_words:
return_list.append(i)
r_count+=1
if r_count ==config.term_count:
break
return return_list
def read_distros(pv_list):
scraper.pprint("--read distros")
scraper.pprint("== DISTRIBUTIONS == [TOP 5 WORDS]")
for i in pv_list:
tmp_string = ' SHOP {}, ID {} = ({}:{}, {}:{}, {}:{}, {}:{}, {}:{})'.format(i[0]['shop_name'],i[0]['shop_id'], i[1]['word'], i[1]['count'],i[2]['word'], i[2]['count'], i[3]['word'], i[3]['count'], i[4]['word'], i[4]['count'], i[5]['word'], i[5]['count'])
scraper.pprint(tmp_string)
scraper.pprint(' ')
return pv_list
def save(distributions, save_path, pv_timestamp):
scraper.pprint("--save")
s_t = '{}'.format(time.time())
time_split = s_t.split('.')
e = time_split[0]
lcl_path = save_path+'_{}${}.csv'.format(e,pv_timestamp)
scraper.pprint('make {}'.format(lcl_path))
with open(lcl_path, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(
[
"id"
,"distribution"
,"e_time"
]
)
lcl_id = 1
for d in distributions:
lcl_list = []
lcl_list.append(lcl_id)
lcl_list.append(d)
lcl_list.append(str(time.time()))
writer.writerow(lcl_list)
lcl_id+=1
def threaded_counter(shops):
scraper.pprint('--threader')
pool = ThreadPool(4)
results = pool.map(word_counter, shops)
return results
def main():
scraper.pprint("--'main, analyzer.py'")
key = scraper.get_key()
distributions = []
data_path = config.data_path
save_path = config.save_path
arg_length = len(sys.argv)
if not arg_length == 2:
timestamp = get_recent_run(data_path)
if arg_length == 2:
timestamp = sys.argv[1]
scraper.pprint('timestamp: {}'.format(timestamp))
if not timestamp == 0:
shops = get_shops(timestamp,key, data_path)
lcl_distributions = threaded_counter(shops)
for lcl in lcl_distributions:
distributions.append(lcl)
save(read_distros(distributions), save_path, timestamp)
else:
scraper.pprint("Please run 'scraper.py' first.")
if __name__ == "__main__":
main()
|
# Third step
import cv2
import glob
fps = 30
image_folder = '/home/duy/Documents/mmdetection/result_images/result_4'
video_name = './' + str(fps) + '_fps_video.avi'
img_array = []
frameSize = ()
for filename in sorted(glob.glob(image_folder + '/*.png')):
img = cv2.imread(filename)
height, width, layers = img.shape
image_size = (width, height)
frameSize = image_size
img_array.append(img)
video = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'DIVX'), fps, frameSize)
for i in range(len(img_array)):
video.write(img_array[i])
video.release()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 11 14:52:44 2018
@author: Alexander Hadjiivanov
@license: MIT ((https://opensource.org/licence/MIT)
"""
import cortex.cortex as ctx
import cortex.network as cn
import cortex.layer as cl
import time
import math
import torch
import torch.nn as tn
import torch.nn.functional as tnf
from torch.distributions import Categorical
import gym
import cv2
import numpy as np
import matplotlib.pyplot as mpl
import matplotlib.animation as anim
from collections import defaultdict
ROM = 'Pong-v0'
Actions = {
'Pong-v0' : {0:0, 1:2, 2:5},
'Breakout-v0' : {0:0, 1:3, 2:4}
}
def preprocess(_state1, _state2):
'''
Crop, resize and return a grayscale version of the frame.
'''
# state = cv2.cvtColor(cv2.resize(_state, (84, 110)), cv2.COLOR_BGR2GRAY)[16:100,:]
state = np.maximum(cv2.cvtColor(cv2.resize(_state1, (84, 110)), cv2.COLOR_BGR2GRAY)[16:100,:],
cv2.cvtColor(cv2.resize(_state1, (84, 110)), cv2.COLOR_BGR2GRAY)[16:100,:])
return torch.from_numpy(np.reshape(state, (1,84,84))).float() / 255
def update_buffer(_env, _buffer, _action):
'''
Take a step and append it to the state buffer.
'''
###################################################
# Skip 2, take the max of the next two, visual mode
###################################################
# step = 0
# state1 = None
# state2 = None
# total_reward = 0.0
# for it in range(4):
# state, reward, done, _ = _env.step(_action)
# total_reward += reward
# if done:
# break
# step += 1
# if step == 3:
# state1 = state
# if step == 4:
# state2 = state
# if (state1 is not None and
# state2 is not None):
# _buffer.append(preprocess(state1, state2))
# return total_reward, done
##################################
# Skip 2, take the third, RAM mode
##################################
# total_reward = 0.0
# for it in range(2):
# state, reward, done, _ = _env.step(_action)
# total_reward += reward
# if done:
# break
# _buffer.append(torch.from_numpy(_env.unwrapped._get_ram()).float() / 255)
# return total_reward, done
###############################
# One state at a time, RAM mode
###############################
state, reward, done, _ = _env.step(_action)
_buffer.append(torch.from_numpy(_env.unwrapped._get_ram()).float() / 255)
return reward, done
def init_buffer(_env, _buffer_size):
buffer = ctx.Cont.Ring(_buffer_size)
while len(buffer) < _buffer_size:
# Update state buffer
reward, done = update_buffer(_env, buffer, 0)
if done:
break
# for n in range(len(buffer)):
# mpl.matshow(buffer.data[n].data[0].numpy(), cmap='gray')
# mpl.show()
# ctx.pause()
return buffer, done
def select_action(_net, _input):
output = tnf.log_softmax(_net(_input), dim = 1)
# print(f'Output: {output}')
###############################################
# Choose an action from a weighted distribution
###############################################
action_dist = Categorical(torch.exp(output))
# action_dist = Categorical(-1 / output)
action = action_dist.sample()
###############################
# Always choose a greedy aciton
###############################
# action = torch.argmax(output)
return action.item(), output
def optimise(_net, _conf, _history, _optimiser, _lr_scheduler):
# Zero out the optimiser gradients
def closure():
_optimiser.zero_grad()
discounted_reward = 0
raw_rewards = np.array(_history['reward'])
# print(f'Raw rewards: {raw_rewards}')
scaled_rewards = torch.zeros(len(raw_rewards))
factor = 1.0 - _conf.discount_factor
for idx, reward in reversed(list(enumerate(raw_rewards))):
discounted_reward = reward + factor * discounted_reward
scaled_rewards[idx] = discounted_reward
mean = scaled_rewards.mean()
sd = scaled_rewards.std()
scaled_rewards = (scaled_rewards - mean) / (sd + _conf.epsilon)
# print(f'Scaled rewards: {scaled_rewards}')
# baseline = mean / sd
baseline = 0
# print(f'Normalised rewards: {scaled_rewards}')
mask = torch.zeros_like(_history['output'])
for idx, val in enumerate(_history['action']):
mask[idx][val] = scaled_rewards[idx].item() - baseline
# print(f'Mask: {mask}')
losses = -torch.mul(mask, _history['output'])
# print(f'Losses: {losses}')
loss = (torch.sum(losses, 1)).mean()
# print(f'Loss: {loss}')
loss.backward()
return loss
if _lr_scheduler is not None:
_lr_scheduler.step()
_net.optimise(closure, _optimiser)
# for param in _net.parameters():
# print(param.grad)
def run_episode(_net,
_conf,
_env,
_optimiser = None,
_lr_scheduler = None,
_train = False,
_render = False,
_animate = False):
state = _env.reset()
done = False
buffer, done = init_buffer(_env, _conf.buffer_size)
steps = 0
total_reward = 0.0
if _animate:
frames = []
if _train:
history = {
'output': torch.zeros(0, *ctx.cn.Net.Output.Shape),
'action': [],
'reward': []
}
while not done:
action, output = select_action(_net, torch.cat(buffer.dump()).unsqueeze(0))
# reward, done = update_buffer(_env, buffer, Actions[ROM][action])
reward, done = update_buffer(_env, buffer, action)
total_reward += reward
if _train:
history['action'].append(action)
history['output'] = torch.cat((history['output'], output))
history['reward'].append(reward)
if _render:
_env.render()
time.sleep(0.02)
if _animate:
frame = mpl.imshow(buffer.data[buffer.head].data[0].numpy(), cmap='gray', animated=True)
frames.append([frame])
steps += 1
if _train:
optimise(_net, _conf, history, _optimiser, _lr_scheduler)
_net.reset_recurrent_layers()
history['output'] = torch.zeros(0, *ctx.cn.Net.Output.Shape)
history['action'] = []
history['reward'] = []
if _animate:
fig = mpl.figure()
ani = anim.ArtistAnimation(fig, frames, interval=50, blit=True, repeat_delay=1000)
mpl.show()
return steps, total_reward
def train(_net, _env, _conf):
net = _net.to(_conf.device)
# Train
score = ctx.Stat.SMAStat()
optimiser = _conf.optimiser(net.parameters(), **_conf.optimiser_args)
# lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimiser, 0.99)
lr_scheduler = None
for episode in range(1, _conf.episodes + 1):
steps, total_reward = run_episode(net, _conf, _env, optimiser, lr_scheduler, _train = True)
# Update the running score
score.update(steps)
print(f'[Episode {episode}] Steps: {steps:5d}\tMean loss: {net.fitness.loss_stat.mean:.3f}\tTotal reward: {total_reward:.0f}\tMean score: {score.mean:.2f}')
# Render an episode
run_episode(net, _conf, _env, _render = True)
return net
def main():
if ctx.get_rank() == 0:
# This is the master process.
# Parse command line arguments and set the default parameters.
ctx.init_conf()
# Temporary environment to get the input dimensions and other parameters
env = gym.make(ROM)
state1 = env.reset()
# state2, _, _, _ = env.step(0)
buffer_size = 4
# Set the initial parameters
# cn.Net.Input.Shape = [buffer_size, *list(preprocess(state1, state2).size())[1:]]
cn.Net.Input.Shape = [buffer_size * 128]
# cn.Net.Output.Shape = [len(Actions[ROM])]
cn.Net.Output.Shape = [env.action_space.n]
# cn.Net.Init.Layers = [ctx.cl.Layer.Def([10,3,3], [2,2])]
cn.Net.Init.Layers = [ctx.cl.Layer.Def([64])]
ctx.Conf.OptimiserArgs['lr'] = 0.1
ctx.Conf.DiscountFactor = 0.01
ctx.Conf.Epsilon = np.finfo(np.float32).eps.item()
ctx.Conf.Episodes = 200
# Allow recurrence for FC layers
# cl.Layer.RecurrentFC = True
ctx.print_conf()
conf = ctx.Conf(0, 0)
conf.buffer_size = buffer_size
net = cn.Net()
train(net, env, conf)
# ctx.init()
# # Run Cortex
# ctx.execute()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Import standard modules ...
import glob
import os
# Import special modules ...
try:
import PIL
import PIL.Image
except:
raise Exception("\"PIL\" is not installed; run \"pip install --user Pillow\"") from None
# Import my modules ...
try:
import pyguymer3
import pyguymer3.image
except:
raise Exception("\"pyguymer3\" is not installed; you need to have the Python module from https://github.com/Guymer/PyGuymer3 located somewhere in your $PYTHONPATH") from None
# Configure PIL to open images up to 1 GiP ...
PIL.Image.MAX_IMAGE_PIXELS = 1024 * 1024 * 1024 # [px]
# Loop over PGMs ...
for pgm in sorted(glob.glob("data/scale=??km/elev=????m.pgm")):
print(f"Converting \"{pgm}\" ...")
# Deduce PNG name ...
png = f"{pgm[:-4]}.png"
# Open PGM and save it as a PNG ...
PIL.Image.open(pgm).convert("RGB").save(png, optimize = True)
# Remove PGM ...
os.remove(pgm)
# Optimize PNG ...
pyguymer3.image.optimize_image(png, strip = True)
|
"""
FactSet ESG API
FactSet ESG (powered by FactSet Truvalue Labs) applies machine learning to uncover risks and opportunities from companies' Environmental, Social and Governance (ESG) behavior, which are aggregated and categorized into continuously updated, material ESG scores. The service focuses on company ESG behavior from external sources and includes both positive and negative events that go beyond traditional sources of ESG risk data.<p> FactSet ESG extracts, analyzes, and generates scores from millions of documents each month collected from more than 100,000 data sources in over 13 languages. Sources include news, trade journals, NGOs, watchdog groups, trade blogs, industry reports and social media. Products deliver investable insights by revealing value and risk factors from unstructured data at the speed of current events.</p> # noqa: E501
The version of the OpenAPI document: 1.3.0
Contact: api@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FactSetESG.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FactSetESG.exceptions import ApiAttributeError
class SasbScoresAll(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'date': (date,), # noqa: E501
'fsym_id': (str,), # noqa: E501
'request_id': (str,), # noqa: E501
'score_type': (str,), # noqa: E501
'access_and_affordability': (float,), # noqa: E501
'air_quality': (float,), # noqa: E501
'all_categories': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501
'business_ethics': (float,), # noqa: E501
'business_model_resilience': (float,), # noqa: E501
'competitive_behavior': (float,), # noqa: E501
'critical_incident_risk_management': (float,), # noqa: E501
'customer_privacy': (float,), # noqa: E501
'customer_welfare': (float,), # noqa: E501
'data_security': (float,), # noqa: E501
'ecological_impacts': (float,), # noqa: E501
'employee_engagement_diversityand_inclusion': (float,), # noqa: E501
'employee_health_and_safety': (float,), # noqa: E501
'energy_management': (float,), # noqa: E501
'g_hg_emissions': (float,), # noqa: E501
'human_rightsand_community_relations': (float,), # noqa: E501
'labor_practices': (float,), # noqa: E501
'management_of_the_legal_and_regulatory_environment': (float,), # noqa: E501
'materials_sourcing_and_efficiency': (float,), # noqa: E501
'materiality': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501
'physical_impacts_of_climate_change': (float,), # noqa: E501
'product_quality_and_safety': (float,), # noqa: E501
'product_design_and_lifecycle_management': (float,), # noqa: E501
'selling_practices_and_product_labeling': (float,), # noqa: E501
'supply_chain_management': (float,), # noqa: E501
'systemic_risk_management': (float,), # noqa: E501
'waste_and_hazardous_materials_management': (float,), # noqa: E501
'water_and_wastewater_management': (float,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'date': 'date', # noqa: E501
'fsym_id': 'fsymId', # noqa: E501
'request_id': 'requestId', # noqa: E501
'score_type': 'scoreType', # noqa: E501
'access_and_affordability': 'accessAndAffordability', # noqa: E501
'air_quality': 'airQuality', # noqa: E501
'all_categories': 'allCategories', # noqa: E501
'business_ethics': 'businessEthics', # noqa: E501
'business_model_resilience': 'businessModelResilience', # noqa: E501
'competitive_behavior': 'competitiveBehavior', # noqa: E501
'critical_incident_risk_management': 'criticalIncidentRiskManagement', # noqa: E501
'customer_privacy': 'customerPrivacy', # noqa: E501
'customer_welfare': 'customerWelfare', # noqa: E501
'data_security': 'dataSecurity', # noqa: E501
'ecological_impacts': 'ecologicalImpacts', # noqa: E501
'employee_engagement_diversityand_inclusion': 'employeeEngagementDiversityandInclusion', # noqa: E501
'employee_health_and_safety': 'employeeHealthAndSafety', # noqa: E501
'energy_management': 'energyManagement', # noqa: E501
'g_hg_emissions': 'gHGEmissions', # noqa: E501
'human_rightsand_community_relations': 'humanRightsandCommunityRelations', # noqa: E501
'labor_practices': 'laborPractices', # noqa: E501
'management_of_the_legal_and_regulatory_environment': 'managementOfTheLegalAndRegulatoryEnvironment', # noqa: E501
'materials_sourcing_and_efficiency': 'materialsSourcingAndEfficiency', # noqa: E501
'materiality': 'materiality', # noqa: E501
'physical_impacts_of_climate_change': 'physicalImpactsOfClimateChange', # noqa: E501
'product_quality_and_safety': 'productQualityAndSafety', # noqa: E501
'product_design_and_lifecycle_management': 'productDesignAndLifecycleManagement', # noqa: E501
'selling_practices_and_product_labeling': 'sellingPracticesAndProductLabeling', # noqa: E501
'supply_chain_management': 'supplyChainManagement', # noqa: E501
'systemic_risk_management': 'systemicRiskManagement', # noqa: E501
'waste_and_hazardous_materials_management': 'wasteAndHazardousMaterialsManagement', # noqa: E501
'water_and_wastewater_management': 'waterAndWastewaterManagement', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""SasbScoresAll - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
date (date): Date for the period requested expressed in YYYY-MM-DD format.. [optional] # noqa: E501
fsym_id (str): FactSet Entity Identifier. Six alpha-numeric characters, excluding vowels, with a -E suffix (XXXXXX-E).. [optional] # noqa: E501
request_id (str): Identifier that was used for the request.. [optional] # noqa: E501
score_type (str): The name of the specific SASB Score type being shown in the response. This will be represented by the scoreTypes input: PULSE, INSIGHT, MOMENTUM, ART_VOL_TTM, CAT_VOL_TTM, or DYNAMIC_MAT.. [optional] # noqa: E501
access_and_affordability (float): The Access and Affordability SASB Category.. [optional] # noqa: E501
air_quality (float): The Air Quality SASB Category.. [optional] # noqa: E501
all_categories ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The All Categories Category. This represents the overall SASB Score for the given 'scoreType'. When scoreType = ESG_RANK, the data type will be string, otherwise data type will be a number with double format.. [optional] # noqa: E501
business_ethics (float): The Business Ethics Category.. [optional] # noqa: E501
business_model_resilience (float): The Business Model Resilience Category.. [optional] # noqa: E501
competitive_behavior (float): The Competitive Behavior Category.. [optional] # noqa: E501
critical_incident_risk_management (float): The Critical Incident Risk Management Category.. [optional] # noqa: E501
customer_privacy (float): The Customer Privacy Category.. [optional] # noqa: E501
customer_welfare (float): The Customer Welfare Category.. [optional] # noqa: E501
data_security (float): The Data Security Category.. [optional] # noqa: E501
ecological_impacts (float): The Ecological Impacts Category.. [optional] # noqa: E501
employee_engagement_diversityand_inclusion (float): The Employee Engagement Diversity and Inclusion Category.. [optional] # noqa: E501
employee_health_and_safety (float): The Employee Health And Safety Category.. [optional] # noqa: E501
energy_management (float): The Energy Management Category.. [optional] # noqa: E501
g_hg_emissions (float): The Greenhouse Gases Emissions Category.. [optional] # noqa: E501
human_rightsand_community_relations (float): The Human Rights and Community Relations Category.. [optional] # noqa: E501
labor_practices (float): The Labor Practices Category.. [optional] # noqa: E501
management_of_the_legal_and_regulatory_environment (float): The Management of the Legal and Regulatory Environment Category.. [optional] # noqa: E501
materials_sourcing_and_efficiency (float): The Materials Sourcing and Efficiency Category.. [optional] # noqa: E501
materiality ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The Materiality Category. When scoreType = ESG_RANK, the data type will be string, otherwise data type will be a number with double format.. [optional] # noqa: E501
physical_impacts_of_climate_change (float): The Physical Impacts of Climate Change Category.. [optional] # noqa: E501
product_quality_and_safety (float): The Product Quality and Safety Category.. [optional] # noqa: E501
product_design_and_lifecycle_management (float): The Product Design And Lifecycle Management Category.. [optional] # noqa: E501
selling_practices_and_product_labeling (float): The Selling Practices And Product Labeling Category.. [optional] # noqa: E501
supply_chain_management (float): The Supply Chain Management Category.. [optional] # noqa: E501
systemic_risk_management (float): The Systemic Risk Management Category.. [optional] # noqa: E501
waste_and_hazardous_materials_management (float): The Waste and Hazardous Materials Management Category.. [optional] # noqa: E501
water_and_wastewater_management (float): The Water and Wastewater Management Category.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SasbScoresAll - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
date (date): Date for the period requested expressed in YYYY-MM-DD format.. [optional] # noqa: E501
fsym_id (str): FactSet Entity Identifier. Six alpha-numeric characters, excluding vowels, with a -E suffix (XXXXXX-E).. [optional] # noqa: E501
request_id (str): Identifier that was used for the request.. [optional] # noqa: E501
score_type (str): The name of the specific SASB Score type being shown in the response. This will be represented by the scoreTypes input: PULSE, INSIGHT, MOMENTUM, ART_VOL_TTM, CAT_VOL_TTM, or DYNAMIC_MAT.. [optional] # noqa: E501
access_and_affordability (float): The Access and Affordability SASB Category.. [optional] # noqa: E501
air_quality (float): The Air Quality SASB Category.. [optional] # noqa: E501
all_categories ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The All Categories Category. This represents the overall SASB Score for the given 'scoreType'. When scoreType = ESG_RANK, the data type will be string, otherwise data type will be a number with double format.. [optional] # noqa: E501
business_ethics (float): The Business Ethics Category.. [optional] # noqa: E501
business_model_resilience (float): The Business Model Resilience Category.. [optional] # noqa: E501
competitive_behavior (float): The Competitive Behavior Category.. [optional] # noqa: E501
critical_incident_risk_management (float): The Critical Incident Risk Management Category.. [optional] # noqa: E501
customer_privacy (float): The Customer Privacy Category.. [optional] # noqa: E501
customer_welfare (float): The Customer Welfare Category.. [optional] # noqa: E501
data_security (float): The Data Security Category.. [optional] # noqa: E501
ecological_impacts (float): The Ecological Impacts Category.. [optional] # noqa: E501
employee_engagement_diversityand_inclusion (float): The Employee Engagement Diversity and Inclusion Category.. [optional] # noqa: E501
employee_health_and_safety (float): The Employee Health And Safety Category.. [optional] # noqa: E501
energy_management (float): The Energy Management Category.. [optional] # noqa: E501
g_hg_emissions (float): The Greenhouse Gases Emissions Category.. [optional] # noqa: E501
human_rightsand_community_relations (float): The Human Rights and Community Relations Category.. [optional] # noqa: E501
labor_practices (float): The Labor Practices Category.. [optional] # noqa: E501
management_of_the_legal_and_regulatory_environment (float): The Management of the Legal and Regulatory Environment Category.. [optional] # noqa: E501
materials_sourcing_and_efficiency (float): The Materials Sourcing and Efficiency Category.. [optional] # noqa: E501
materiality ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The Materiality Category. When scoreType = ESG_RANK, the data type will be string, otherwise data type will be a number with double format.. [optional] # noqa: E501
physical_impacts_of_climate_change (float): The Physical Impacts of Climate Change Category.. [optional] # noqa: E501
product_quality_and_safety (float): The Product Quality and Safety Category.. [optional] # noqa: E501
product_design_and_lifecycle_management (float): The Product Design And Lifecycle Management Category.. [optional] # noqa: E501
selling_practices_and_product_labeling (float): The Selling Practices And Product Labeling Category.. [optional] # noqa: E501
supply_chain_management (float): The Supply Chain Management Category.. [optional] # noqa: E501
systemic_risk_management (float): The Systemic Risk Management Category.. [optional] # noqa: E501
waste_and_hazardous_materials_management (float): The Waste and Hazardous Materials Management Category.. [optional] # noqa: E501
water_and_wastewater_management (float): The Water and Wastewater Management Category.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
#!/usr/bin/env python
import os
import sys
import unittest
from collections import namedtuple
sys.path = [ os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')) ] + sys.path
from pluckit import pluck
class CornerCasesTest(unittest.TestCase):
def test_null_handle(self):
data = [1, 2, 3]
self.assertEqual([ None, None, None ], pluck(data, None))
def test_empty(self):
self.assertEqual([], pluck([], 'k'))
self.assertEqual({}, pluck({}, 'k'))
self.assertEqual(set(), pluck(set(), 'k'))
def test_null(self):
self.assertEqual(None, pluck(None, None))
self.assertEqual(None, pluck(None, 123))
def test_null_values(self):
data = {
None : [1, 2],
'b' : [3, 4],
'c' : [None, 5]
}
self.assertEqual(
{
None : 1,
'b' : 3,
'c' : None,
},
pluck(data, 0)
)
if __name__ == '__main__':
unittest.main()
|
import json
import requests
import xml.etree.ElementTree as ET
import webbrowser
from const import *
import urllib.request
class Wrapper():
payload="""<scan:ScanSettings xmlns:scan="http://schemas.hp.com/imaging/escl/2011/05/03" xmlns:dd="http://www.hp.com/schemas/imaging/con/dictionaries/1.0/" xmlns:dd3="http://www.hp.com/schemas/imaging/con/dictionaries/2009/04/06" xmlns:fw="http://www.hp.com/schemas/imaging/con/firewall/2011/01/05" xmlns:scc="http://schemas.hp.com/imaging/escl/2011/05/03" xmlns:pwg="http://www.pwg.org/schemas/2010/12/sm"><pwg:Version>2.1</pwg:Version><scan:Intent>Photo</scan:Intent><pwg:ScanRegions><pwg:ScanRegion><pwg:Height>3507</pwg:Height><pwg:Width>2481</pwg:Width><pwg:XOffset>0</pwg:XOffset><pwg:YOffset>0</pwg:YOffset></pwg:ScanRegion></pwg:ScanRegions><pwg:InputSource>Platen</pwg:InputSource><scan:DocumentFormatExt>image/jpeg</scan:DocumentFormatExt><scan:XResolution>300</scan:XResolution><scan:YResolution>300</scan:YResolution><scan:ColorMode>RGB24</scan:ColorMode><scan:CompressionFactor>25</scan:CompressionFactor><scan:Brightness>1000</scan:Brightness><scan:Contrast>1000</scan:Contrast></scan:ScanSettings>"""
rootIp = ""
def __init__(self, _rip):
self.rootIp = _rip
def ScanDocument(self, outputfile):
requests.post(self.rootIp + '/eSCL/ScanJobs', data=self.payload, headers=headers, verify=False)
for job in self.GetJobs():
if not job.State in {"Aborted", "Completed"}:
urllib.request.urlretrieve(job.GetLink(), outputfile)
def GetJobs(self):
raw = requests.get(self.rootIp + "/eSCL/ScannerStatus", headers=headers, verify=False)
status = ET.fromstring(raw.content)
jobs_raw = list(status.find("scan:Jobs", ns))
parsed_job_list = []
for job in jobs_raw:
cj = ScanJob(self.rootIp)
cj.ParseJob(job)
parsed_job_list.append(cj)
return parsed_job_list
class ScanJob:
RootDomain = ""
URI = ""
UUID = ""
Age = ""
State = ""
StateReasons = []
ImagesCompleted = 0
ImagesToTransfer = 0
def __init__(self, rd):
self.StateReasons = []
self.RootDomain = rd
def ParseJob(self, val):
self.State = val.find("pwg:JobState", ns).text
self.URI = val.find("pwg:JobUri", ns).text
self.UUID = val.find("pwg:JobUuid", ns).text
self.Age = val.find("scan:Age", ns).text
self.ImagesCompleted = val.find("pwg:ImagesCompleted", ns).text
self.ImagesToTransfer = val.find("pwg:ImagesToTransfer", ns).text
for x in list(val.find("pwg:JobStateReasons",ns)):
self.StateReasons.append(x.text)
def GetLink(self):
return self.RootDomain + self.URI + "/NextDocument"
|
"""Exception classes used throughout the FindTrajectory project.
AUTHOR: Max Birkett <m.birkett@liverpool.ac.uk>, Department of Chemistry, University of Liverpool, UK.
RELEASE: 2022-02-04, version 0.1. DEPENDENCIES: Python 3.6. LICENCE: Please see LICENCE.txt for more details.
"""
class AppError(Exception):
"""General friendly application error"""
def __init__(self,myErrMsg,*arglist): # Python semantics include optional arglist
super(AppError,self).__init__(myErrMsg,*arglist)
# additional details set at higher levels in the call stack:
self.sDatasetId=None
self.sHostCrystalId=None # CIF filename of crystal related to error.
self.sHostCrystalCifFileName=None # CIF filename of crystal related to error.
def __str__(self):
sErrMsg= super(AppError,self).args[0]
return sErrMsg if self.sHostCrystalCifFileName is None else "%s [dataset %s; crystal %s; CIF %s]" % (sErrMsg,self.sDatasetId,self.sHostCrystalId,self.sHostCrystalCifFileName)
class FailedFit(Exception):
def __init__(self,myErrMsg,*arglist):
super(FailedFit,self).__init__(myErrMsg,*arglist)
|
from selenium_ui.confluence import modules
from extension.confluence import extension_ui # noqa F401
# this action should be the first one
def test_0_selenium_a_login(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.login(confluence_webdriver, confluence_datasets)
def test_1_selenium_view_page(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.view_page(confluence_webdriver, confluence_datasets)
def test_1_selenium_view_meetical_page(confluence_webdriver, confluence_datasets, confluence_screen_shots):
extension_ui.view_meetical_page(confluence_webdriver, confluence_datasets)
def test_1_selenium_create_page(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.create_confluence_page(confluence_webdriver, confluence_datasets)
def test_1_selenium_edit_page(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.edit_confluence_page(confluence_webdriver, confluence_datasets)
def test_1_selenium_create_comment(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.create_comment(confluence_webdriver, confluence_datasets)
def test_1_selenium_view_blog(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.view_blog(confluence_webdriver, confluence_datasets)
def test_1_selenium_view_dashboard(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.view_dashboard(confluence_webdriver, confluence_datasets)
"""
Add custom actions anywhere between login and log out action. Move this to a different line as needed.
Write your custom selenium scripts in `app/extension/confluence/extension_ui.py`.
Refer to `app/selenium_ui/confluence/modules.py` for examples.
"""
# def test_1_selenium_custom_action(confluence_webdriver, confluence_datasets, confluence_screen_shots):
# extension_ui.app_specific_action(confluence_webdriver, confluence_datasets)
# this action should be the last one
def test_2_selenium_z_log_out(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.log_out(confluence_webdriver, confluence_datasets)
|
from django.urls import path,include
from .views import (
HomeView,
ItemDetailView,
)
app_name = 'mysalon'
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('home/',HomeView.as_view(),name='home'),
path('product/<slug>/',ItemDetailView.as_view(),name="detail"),
path('',views.posts,name="posts"),
path('what_we_do/' ,views.what_we_do,name='what-we-do'),
path('comment/<int:id>',views.add_comments,name='comment'),
path('add-to-cart/<slug>/',views.add_to_cart,name='add-to-cart'),
path('remove-from-cart/<slug>',views.remove_from_cart,name='remove_from_cart'),
path('order_summary/',views.order_summary,name='order-summary'),
path('remove_item/<slug>',views.remove_single_item_from_cart,name='remove_one'),
path('create_appointment/',views.create_appointment,name="appointment"),
path('paypal_return/',views.paypal_return,name="return"),
#Dashboard
path('activate/user/<int:user_id>', views.user_activate, name='activate_user'),
path('deactivate/user/<int:user_id>', views.user_deactivate, name='deactivate_user'),
path('dashboard/', views.user_dashboard, name='user_dashboard'),
path('users/', views.registered_users, name='system_users'),
#paypal
path('go/',include,('paypal.standard.ipn.urls')),
path('pay_total/',views.total_order,name='make_payment'),
]
|
# -*- coding: utf-8 -*-
"""
Time Series Reader for the SMAP Time Series
"""
from pygeogrids.netcdf import load_grid
from pynetcf.time_series import GriddedNcOrthoMultiTs
import os
from netCDF4 import num2date
import pandas as pd
from io_utils.read.geo_ts_readers.mixins import OrthoMultiTsCellReaderMixin
class SMAPTs(GriddedNcOrthoMultiTs, OrthoMultiTsCellReaderMixin):
_t0_var = 'tb_time_seconds'
_t0_unit = 'seconds since 2000-01-01T12:00' # from tb_time_seconds long_name
def __init__(self, ts_path=None, grid_path=None, exact_index=False, **kwargs):
if grid_path is None:
grid_path = os.path.join(ts_path, "grid.nc")
grid = load_grid(grid_path)
super(SMAPTs, self).__init__(ts_path, grid, **kwargs)
self.exact_index = exact_index
if self.exact_index and \
(self.parameters is not None and self._t0_var not in self.parameters):
self.parameters.append(self._t0_var)
def _to_datetime(self, df):
df['_date'] = df.index.values
num = df[self._t0_var].dropna()
if len(num) == 0:
df.loc[num.index, '_datetime'] = []
else:
df.loc[num.index, '_datetime'] = \
pd.DatetimeIndex(num2date(num.values, units=self._t0_unit,
calendar='standard', only_use_cftime_datetimes=False))
df = df.set_index('_datetime')
df = df[df.index.notnull()]
return df
def read(self, *args, **kwargs):
df = super(SMAPTs, self).read(*args, **kwargs)
if self.exact_index:
df = self._to_datetime(df)
return df
|
#!/usr/bin/python3
import os
import sys
import re
def execute(command):
cmd = command.split()
#print('PID: ', os.getpid(), 'about to execute: ', cmd)
# Check if command[0] is itself a path
if os.path.exists(cmd[0]):
os.execve(cmd[0], cmd, os.environ.copy())
else:
for directory in path:
executable = directory + '/' + cmd[0]
if os.path.exists(executable):
os.execve(executable, cmd, os.environ.copy())
def redirect(input):
cmd, output = input.split('>')
output = output.strip()
os.close(1)
os.open(output, os.O_CREAT | os.O_WRONLY)
os.set_inheritable(1, True)
#print('')
execute(cmd)
#TODO: Fix pipe output. Ex case: ls -a | grep shell.py
def pipe(command_set, type='write', file_descriptor=None):
if type == 'write':
#print('Child 1: Creating read and write ends')
cmd, cmd2 = command_set.split('|')
read_pipe, write_pipe = os.pipe()
for fd in (read_pipe, write_pipe):
os.set_inheritable(fd, True)
#print('Child 1: Pipes created. Forking new process to run process connected to read end.')
fork_new_process(cmd2, option='pipe_complete', file_descriptors=read_pipe)
#print('Child 1: Setting stdout to write_pipe')
os.close(1)
os.dup(write_pipe)
os.set_inheritable(1, True)
#print('Child 1: Executing command', file=sys.stderr)
execute(cmd)
if type == 'read' and file_descriptor:
#print('Child 2: Set stdin to read_pipe (file_descriptor)', file=sys.stderr)
os.set_inheritable(file_descriptor, True)
os.close(0)
os.dup(file_descriptor)
os.set_inheritable(0, True)
#print('Child 2: Executing command', file=sys.stderr)
execute(command_set)
def fork_new_process(input, option=None, file_descriptors=None):
pid = os.fork()
if pid == 0:
if option is None or option == 'run_in_background':
#print('PID: ', os.getpid(), ' option=none')
execute(input.strip('&').strip())
if option == 'redirect':
#print('PID: ', os.getpid(), ' option=redirect')
redirect(input)
if option == 'pipe_begin':
#print('PID: ', os.getpid(), ' option=pipe_begin')
pipe(input)
if option == 'pipe_complete' and file_descriptors:
#print('PID: ', os.getpid(), ' option=pipe_complete')
pipe(input, type='read', file_descriptor=file_descriptors)
if pid != 0 and option != 'pipe_complete' and option != 'run_in_background':
#print('PID: ', os.getpid(), ' Parent waiting.')
os.wait()
def initialize():
try:
sys.ps1
except AttributeError:
sys.ps1 = '$ '
global path
path = os.environ['PATH'].split(':')
def shell():
initialize()
count = 0
while True:
count += 1
#print('PID: ', os.getpid(), ' shell cycle ', count)
user_input = input(sys.ps1)
if user_input == '':
continue
elif user_input == 'exit':
sys.exit(1)
elif 'cd' in user_input:
os.chdir(user_input.split()[1])
elif '>' in user_input:
#print('Redirection')
fork_new_process(user_input, option='redirect')
elif '|' in user_input:
fork_new_process(user_input, option='pipe_begin')
elif '&' in user_input:
fork_new_process(user_input, option='run_in_background')
else:
fork_new_process(user_input)
shell()
|
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Dan Persons <dpersonsdev@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import string
import logdissect.parsers
import logdissect.filters
import logdissect.output
from logdissect import __version__
from argparse import ArgumentParser
import gettext
gettext.install('logdissect')
class LogDissectCore:
def __init__(self):
"""Initialize logdissect job"""
self.input_files = []
self.parse_modules = {}
self.filter_modules = {}
self.output_modules = {}
self.data_set = {}
self.args = None
self.arg_parser = ArgumentParser()
self.parse_args = \
self.arg_parser.add_argument_group('parse options')
self.filter_args = \
self.arg_parser.add_argument_group('filter options')
self.output_args = \
self.arg_parser.add_argument_group('output options')
# run_job does the actual job using the other functions.
def run_job(self):
"""Execute a logdissect job"""
try:
self.load_parsers()
self.load_filters()
self.load_outputs()
self.config_args()
if self.args.list_parsers:
self.list_parsers()
if self.args.verbosemode: print('Loading input files')
self.load_inputs()
if self.args.verbosemode: print('Running parsers')
self.run_parse()
if self.args.verbosemode: print('Merging data')
self.data_set['finalized_data'] = \
logdissect.utils.merge_logs(
self.data_set['data_set'], sort=True)
if self.args.verbosemode: print('Running filters')
self.run_filters()
if self.args.verbosemode: print('Running output')
self.run_output()
except KeyboardInterrupt:
sys.exit(1)
def run_parse(self):
"""Parse one or more log files"""
# Data set already has source file names from load_inputs
parsedset = {}
parsedset['data_set'] = []
for log in self.input_files:
parsemodule = self.parse_modules[self.args.parser]
try:
if self.args.tzone:
parsemodule.tzone = self.args.tzone
except NameError: pass
parsedset['data_set'].append(parsemodule.parse_file(log))
self.data_set = parsedset
del(parsedset)
def run_filters(self):
for m in self.filter_modules:
ourfilter = self.filter_modules[m]
ourlog = ourfilter.filter_data(
self.data_set['finalized_data'],
args=self.args)
self.data_set['finalized_data'] = ourlog
del(ourlog)
del(ourfilter)
def run_output(self):
"""Output finalized data"""
for f in logdissect.output.__formats__:
ouroutput = self.output_modules[f]
ouroutput.write_output(self.data_set['finalized_data'],
args=self.args)
del(ouroutput)
# Output to terminal if silent mode is not set:
if not self.args.silentmode:
if self.args.verbosemode:
print('\n==== ++++ ==== Output: ==== ++++ ====\n')
for line in self.data_set['finalized_data']['entries']:
print(line['raw_text'])
def config_args(self):
"""Set config options"""
# Module list options:
self.arg_parser.add_argument('--version', action='version',
version='%(prog)s ' + str(__version__))
self.arg_parser.add_argument('--verbose',
action='store_true', dest = 'verbosemode',
help=_('set verbose terminal output'))
self.arg_parser.add_argument('-s',
action='store_true', dest = 'silentmode',
help=_('silence terminal output'))
self.arg_parser.add_argument('--list-parsers',
action='store_true', dest='list_parsers',
help=_('return a list of available parsers'))
self.arg_parser.add_argument('-p',
action='store', dest='parser', default='syslog',
help=_('select a parser (default: syslog)'))
self.arg_parser.add_argument('-z', '--unzip',
action='store_true', dest='unzip',
help=_('include files compressed with gzip'))
self.arg_parser.add_argument('-t',
action='store', dest='tzone',
help=_('specify timezone offset to UTC (e.g. \'+0500\')'))
self.arg_parser.add_argument('files',
# nargs needs to be * not + so --list-filters/etc
# will work without file arg
metavar='file', nargs='*',
help=_('specify input files'))
# self.arg_parser.add_argument_group(self.parse_args)
self.arg_parser.add_argument_group(self.filter_args)
self.arg_parser.add_argument_group(self.output_args)
self.args = self.arg_parser.parse_args()
# Load input files:
def load_inputs(self):
"""Load the specified inputs"""
for f in self.args.files:
if os.path.isfile(f):
fparts = str(f).split('.')
if fparts[-1] == 'gz':
if self.args.unzip:
fullpath = os.path.abspath(str(f))
self.input_files.append(fullpath)
else:
return 0
elif fparts[-1] == 'bz2' or fparts[-1] == 'zip':
return 0
else:
fullpath = os.path.abspath(str(f))
self.input_files.append(fullpath)
else:
print('File '+ f + ' not found')
return 1
# Parsing modules:
def list_parsers(self, *args):
"""Return a list of available parsing modules"""
print('==== Available parsing modules: ====\n')
for parser in sorted(self.parse_modules):
print(self.parse_modules[parser].name.ljust(16) + \
': ' + self.parse_modules[parser].desc)
sys.exit(0)
def load_parsers(self):
"""Load parsing module(s)"""
for parser in sorted(logdissect.parsers.__all__):
self.parse_modules[parser] = \
__import__('logdissect.parsers.' + parser, globals(), \
locals(), [logdissect]).ParseModule()
def load_filters(self):
"""Load filter module(s)"""
for f in sorted(logdissect.filters.__filters__):
self.filter_modules[f] = \
__import__('logdissect.filters.' + f, globals(), \
locals(), [logdissect]).FilterModule(args=self.filter_args)
def load_outputs(self):
"""Load output module(s)"""
for output in sorted(logdissect.output.__formats__):
self.output_modules[output] = \
__import__('logdissect.output.' + output, globals(), \
locals(), [logdissect]).OutputModule(args=self.output_args)
def main():
dissect = LogDissectCore()
dissect.run_job()
if __name__ == "__main__":
dissect = LogDissectCore()
dissect.run_job()
|
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import argparse
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from dataset import *
from model import *
from tqdm import tqdm
from envsetter import EnvSetter
from helper_functions import *
from fid import get_fid
from logger import Logger
opt = EnvSetter("vae").get_parser()
logger = Logger(opt.log_path, opt)
save_path = opt.save_path
torch.manual_seed(opt.seed)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_loader, val_loader, test_loader = get_data_loader(opt)
model = VAE(opt=opt)
model = model.to(device)
model = torch.nn.DataParallel(model)
model.apply(weights_init)
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
MSE = F.mse_loss(recon_x, x, reduction='sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return MSE + KLD
def train(epoch):
model.train()
train_loss = 0
for batch_idx, (data, _) in tqdm(enumerate(train_loader)):
data = data.to(device)
optimizer.zero_grad()
recon_batch, mu, logvar = model(data)
loss = loss_function(recon_batch.to(device), data, mu.to(device), logvar.to(device))
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % opt.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item() / len(data)))
# Generate Samples
avg_loss = train_loss / len(train_loader.dataset)
return avg_loss
def load_model(path):
checkpoint = torch.load(path)
model.module.load_state_dict(checkpoint['VAE_model'])
optimizer.load_state_dict(checkpoint['optimizer'])
return checkpoint['epoch']
if __name__ == "__main__":
start_epoch = 0
if opt.load_path and len(opt.load_path) < 2:
start_epoch = load_model(opt.load_path[0])
if opt.to_train:
for epoch in tqdm(range(start_epoch, opt.epochs)):
avg_loss = train(epoch)
with torch.no_grad():
# First thing save
torch.save({
'epoch': epoch + 1,
'VAE_model': model.module.state_dict(),
'optimizer': optimizer.state_dict()}, opt.model_path + f"model_{str(epoch+1)}.tar")
# Calculate FID
fid = "N/A"
if opt.calc_fid:
fn = lambda x: model.module.decode(x).cpu()
generate_fid_samples(fn, epoch, opt.n_samples, opt.n_hidden, opt.fid_path_samples, device=device)
fid = get_fid(opt.fid_path_samples, opt.fid_path_pretrained)
print('====> Epoch: {} Average loss: {:.4f} FID: {}'.format(
epoch, avg_loss, fid))
# Log results
logger.log({
"Epoch": epoch,
"Avg Loss": avg_loss,
"FID": fid
})
tmp_epoch = 0
for m in opt.load_path:
epoch = load_model(m)
# Quick fix to load multiple models and not have overwriting happening
epoch = epoch if epoch is not tmp_epoch and tmp_epoch < epoch else tmp_epoch + 1
tmp_epoch = epoch
if opt.calc_fid:
fn = lambda x: model.module.decode(x).cpu()
generate_fid_samples(fn, epoch, opt.n_samples, opt.n_hidden, opt.fid_path_samples, device=device)
fid = get_fid(opt.fid_path_samples, opt.fid_path_pretrained)
if opt.test_recons:
fn = lambda x: model(x.to(device))[0]
gen_reconstructions(fn, test_loader, epoch, opt.test_results_path_recons, nrow=1, path_for_originals=opt.test_results_path_originals)
print("Generated reconstructions")
if opt.test_samples:
fn = lambda x: model.module.decode(x).cpu()
generate_samples(fn, epoch, 5, opt.n_hidden, opt.test_results_path_samples, nrow=1, device=device)
print("Generated samples")
|
"""setup file for pycsdl2"""
import os
import re
import shlex
import subprocess
from os.path import join
from glob import glob
import distutils.util
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
def pkg_config(packages, cflags=False, libs=False):
"""Runs pkg-config and returns its output
:param packages: list of package names
:type packages: list of str
:param cflags bool: Include compile flags
:param libs bool: Include link flags
:return: output of pkg-config
"""
args = ['pkg-config']
if cflags:
args.append('--cflags')
if libs:
args.append('--libs')
args.extend(packages)
return subprocess.check_output(args, universal_newlines=True)
def sdl2_config(cflags=False, libs=False):
"""Runs sdl2-config and returns its output
:param cflags bool: Include compile flags
:param libs bool: Include link flags
:return: output of sdl2-config
"""
args = ['sdl2-config']
if cflags:
args.append('--cflags')
if libs:
args.append('--libs')
return subprocess.check_output(args, universal_newlines=True)
def parse_flags(flags, flag_map, extra_flags_key):
"""Parses flags from str `flags`.
:param flags str: str of flags and their arguments
:param flag_map dict: Map flags (as str) to a 2-tuple, the first is the
output dict key and the second is a callable
that takes a single argument, which is the entire
argument excluding the flag, and return the value to
append to the list.
:param extra_flags_key str: Output dict key to use if flag is not present
in `flag_map`.
:return: dict
"""
out = {}
for arg in shlex.split(flags):
for flag, (k, f) in flag_map.items():
if arg.startswith(flag):
out.setdefault(k, []).append(f(arg[len(flag):]))
break
else:
out.setdefault(extra_flags_key, []).append(arg)
return out
def parse_cflags(flags):
"""Parse compile flags
:param flags str: str of flags and their arguments.
:return: dict
"""
flag_map = {
'-I': ('include_dirs', lambda x: x),
'-D': ('define_macros',
lambda x: (x.partition('=')[0], x.partition('=')[2] or None)),
'-U': ('undef_macros', lambda x: x)
}
return parse_flags(flags, flag_map, 'extra_compile_args')
def parse_libs(flags):
"""Parse link flags
:param flags str: str of flags and their arguments.
:return: dict
"""
flag_map = {
'-L': ('library_dirs', lambda x: x),
'-l': ('libraries', lambda x: x),
'-Wl,-rpath,': ('runtime_library_dirs', lambda x: x),
'-R': ('runtime_library_dirs', lambda x: x)
}
return parse_flags(flags, flag_map, 'extra_link_args')
def update_ext(ext, sources=None, include_dirs=None, define_macros=None,
undef_macros=None, library_dirs=None, libraries=None,
runtime_library_dirs=None, extra_objects=None,
extra_compile_args=None, extra_link_args=None,
export_symbols=None, swig_opts=None, depends=None,
language=None, optional=None):
"""Updates Extension `ext`"""
if sources:
ext.sources.extend(sources)
if include_dirs:
ext.include_dirs.extend(include_dirs)
if define_macros:
ext.define_macros.extend(define_macros)
if undef_macros:
ext.undef_macros.extend(undef_macros)
if library_dirs:
ext.library_dirs.extend(library_dirs)
if libraries:
ext.libraries.extend(libraries)
if runtime_library_dirs:
ext.runtime_library_dirs.extend(runtime_library_dirs)
if extra_objects:
ext.extra_objects.extend(extra_objects)
if extra_compile_args:
ext.extra_compile_args.extend(extra_compile_args)
if extra_link_args:
ext.extra_link_args.extend(extra_link_args)
if export_symbols:
ext.export_symbols.extend(export_symbols)
if swig_opts:
ext.swig_opts.extend(swig_opts)
if depends:
ext.depends.extend(depends)
if language is not None:
ext.language = language
if optional is not None:
ext.optional = optional
def cstringify(x):
"""Converts str `x` into a C string literal
:param x str: input string
:return: C string literal
"""
return '"{0}"'.format(re.sub(r'[\\"]', r'\\\0', x))
def get_csdl2_base_ext(platform):
"""Returns csdl2 Extension that is not linked to SDL2
:param platform str: Platform string
:return: 2-tuple ``(Extension, headers)``
"""
ext = Extension(name='csdl2',
sources=[join('src', 'csdl2.c')])
headers = glob(join('include', '*.h'))
headers += glob(join('include', '*.pxd'))
return ext, headers
def get_csdl2_system_ext(platform):
"""Returns csdl2 Extension dynamically-linked to system SDL2.
Requires either pkg-config or sdl2-config to be present in $PATH.
:param platform str: Platform string
:return: 2-tuple ``(Extension, headers)``
"""
PYCSDL2_LIB = os.getenv('PYCSDL2_LIB', 'auto')
ext, headers = get_csdl2_base_ext(platform)
if PYCSDL2_LIB in ('auto', 'system'):
try:
cflags = sdl2_config(cflags=True)
ldflags = sdl2_config(libs=True)
except (FileNotFoundError, subprocess.CalledProcessError):
cflags = pkg_config(['sdl2'], cflags=True)
ldflags = pkg_config(['sdl2'], libs=True)
elif PYCSDL2_LIB == 'sdl2-config':
cflags = sdl2_config(cflags=True)
ldflags = sdl2_config(libs=True)
elif PYCSDL2_LIB == 'pkg-config':
cflags = pkg_config(['sdl2'], cflags=True)
ldflags = pkg_config(['sdl2'], libs=True)
else:
raise ValueError('Unknown PYCSDL2_LIB value {0!r}'.format(PYCSDL2_LIB))
cflags = parse_cflags(cflags)
ldflags = parse_libs(ldflags)
update_ext(ext, **cflags)
update_ext(ext, **ldflags)
# Define PYCSDL2_INCLUDE_DIRS
include_dirs = [cstringify(x) for x in cflags.get('include_dirs', [])]
include_dirs = ','.join(include_dirs)
if include_dirs:
ext.define_macros.append(('PYCSDL2_INCLUDE_DIRS',
include_dirs + ','))
# Define PYCSDL2_DEFINE_MACROS
define_macros = []
for k, v in cflags.get('define_macros', []):
define_macros.append(cstringify(k))
if v is None:
define_macros.append('NULL')
else:
define_macros.append(cstringify(v))
define_macros = ','.join(define_macros)
if define_macros:
ext.define_macros.append(('PYCSDL2_DEFINE_MACROS',
define_macros + ','))
# Define PYCSDL2_UNDEF_MACROS
undef_macros = [cstringify(x) for x in cflags.get('undef_macros', [])]
undef_macros = ','.join(undef_macros)
if undef_macros:
ext.define_macros.append(('PYCSDL2_UNDEF_MACROS',
undef_macros + ','))
# Define PYCSDL2_EXTRA_COMPILE_ARGS
extra_compile_args = [cstringify(x) for x in
cflags.get('extra_compile_args', [])]
extra_compile_args = ','.join(extra_compile_args)
if extra_compile_args:
ext.define_macros.append(('PYCSDL2_EXTRA_COMPILE_ARGS',
extra_compile_args + ','))
# Define PYCSDL2_LIBRARY_DIRS
library_dirs = [cstringify(x) for x in ldflags.get('library_dirs', [])]
library_dirs = ','.join(library_dirs)
if library_dirs:
ext.define_macros.append(('PYCSDL2_LIBRARY_DIRS',
library_dirs + ','))
# Define PYCSDL2_LIBRARIES
libraries = [cstringify(x) for x in ldflags.get('libraries', [])]
libraries = ','.join(libraries)
if libraries:
ext.define_macros.append(('PYCSDL2_LIBRARIES',
libraries + ','))
# Define PYCSDL2_RUNTIME_LIBRARY_DIRS
runtime_library_dirs = [stringify(x) for x in
ldflags.get('runtime_library_dirs', [])]
runtime_library_dirs = ','.join(runtime_library_dirs)
if runtime_library_dirs:
ext.define_macros.append(('PYCSDL2_RUNTIME_LIBRARY_DIRS',
runtime_library_dirs + ','))
# Define PYCSDL2_EXTRA_LINK_ARGS
extra_link_args = [stringify(x) for x in
ldflags.get('extra_link_args', [])]
extra_link_args = ','.join(extra_link_args)
if extra_link_args:
ext.define_macros.append(('PYCSDL2_EXTRA_LINK_ARGS',
extra_link_args + ','))
return ext, headers
def get_csdl2_bundled_ext(platform):
"""Returns csdl2 Extension static-linked to bundled SDL2 source code.
:param platform str: Platform string
:return: 2-tuple ``(Extension, headers)``
"""
ext, headers = get_csdl2_base_ext(platform)
ext.sources += glob(join('deps', 'SDL', 'src', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'core', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'atomic', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'audio', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'cpuinfo', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'dynapi', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'events', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'file', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'libm', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'stdlib', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'thread', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'timer', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'video', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'video', 'dummy', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'audio', 'dummy', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'audio', 'disk', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'joystick', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'haptic', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'power', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'render', '*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'render', '*', '*.c'))
ext.include_dirs.append(join('deps', 'SDL', 'include'))
headers.extend(glob(join('deps', 'SDL', 'include', '*.h')))
if platform in ('win-amd64', 'win32'):
if 'DXSDK_DIR' not in os.environ:
raise RuntimeError('DXSDK_DIR environment variable not defined. '
'Install the standalone DirectX SDK')
ext.include_dirs.append(join(os.environ['DXSDK_DIR'], 'Include'))
if platform == 'win32':
ext.library_dirs.append(join(os.environ['DXSDK_DIR'], 'Lib',
'x86'))
elif platform == 'win-amd64':
ext.library_dirs.append(join(os.environ['DXSDK_DIR'], 'Lib',
'x64'))
else:
raise NotImplementedError('Unsupported platform '
'{0}'.format(platform))
ext.sources += glob(join('deps', 'SDL', 'src', 'core', 'windows',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'video', 'windows',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'audio', 'winmm',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'audio', 'directsound',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'audio', 'xaudio2',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'joystick', 'windows',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'haptic', 'windows',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'power', 'windows',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'filesystem', 'windows',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'timer', 'windows',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'loadso', 'windows',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'thread', 'windows',
'*.c'))
ext.sources.append(join('deps', 'SDL', 'src', 'thread', 'generic',
'SDL_syscond.c'))
ext.libraries += ['user32', 'gdi32', 'winmm', 'imm32', 'ole32',
'oleaut32', 'shell32', 'version', 'uuid', 'd3d9',
'd3dx9', 'kernel32']
elif platform.startswith('macosx-'):
ext.define_macros += [('_THREAD_SAFE', None)]
ext.sources += glob(join('deps', 'SDL', 'src', 'file', 'cocoa', '*.m'))
ext.sources += glob(join('deps', 'SDL', 'src', 'audio', 'coreaudio',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'joystick', 'darwin',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'haptic', 'darwin',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'power', 'macosx',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'timer', 'unix',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'filesystem', 'cocoa',
'*.m'))
ext.sources += glob(join('deps', 'SDL', 'src', 'video', 'cocoa',
'*.m'))
ext.sources += glob(join('deps', 'SDL', 'src', 'thread', 'pthread',
'*.c'))
ext.sources += glob(join('deps', 'SDL', 'src', 'loadso', 'dlopen',
'*.c'))
ext.extra_link_args += ['-framework', 'Cocoa',
'-framework', 'ForceFeedback',
'-framework', 'Carbon',
'-framework', 'CoreAudio',
'-framework', 'AudioUnit',
'-framework', 'OpenGL']
else:
raise NotImplementedError('Unsupported platform {0}'.format(platform))
return ext, headers
def get_csdl2_ext(platform):
"""Returns csdl2 Extension appropriate for `platform`.
:param platform str: Platform string
:return: 2-tuple ``(Extension, headers)``
"""
PYCSDL2_LIB = os.getenv('PYCSDL2_LIB', 'auto')
if PYCSDL2_LIB == 'auto':
try:
return get_csdl2_bundled_ext(platform)
except NotImplementedError:
return get_csdl2_system_ext(platform)
elif PYCSDL2_LIB == 'bundled':
return get_csdl2_system_ext(platform)
elif PYCSDL2_LIB in ('system', 'pkg-config', 'sdl2-config'):
return get_csdl2_system_ext(platform)
else:
raise ValueError('Unknown PYCSDL2_LIB value {0!r}'.format(PYCSDL2_LIB))
def get_csdl2test_ext(csdl2_ext):
"""Returns an appropriate _csdl2test Extension for csdl2_ext
:param csdl2_ext: The csdl2 Extension returned from get_csdl2_ext().
:type csdl2_ext: distutils.extension.Extension
"""
ext = Extension(name='_csdl2test',
sources=[join('ctest', '_csdl2test.c')])
# Copy C flags from csdl2_ext to ext
update_ext(ext, include_dirs=csdl2_ext.include_dirs,
define_macros=csdl2_ext.define_macros,
undef_macros=csdl2_ext.undef_macros,
extra_compile_args=csdl2_ext.extra_compile_args)
return ext
extension, headers = get_csdl2_ext(distutils.util.get_platform())
setup(name='pycsdl2',
version='2.0.0.0.dev5',
description='Simple DirectMedia Layer',
long_description=open('README.rst').read(),
url='https://github.com/pyokagan/pycsdl2',
author='Paul Tan',
author_email='pyokagan@pyokagan.name',
license='zlib',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: MacOS X :: Cocoa',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'License :: OSI Approved :: zlib/libpng License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: C',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Topic :: Games/Entertainment',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Players',
'Topic :: Multimedia :: Sound/Audio :: Capture/Recording',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords='sdl sdl2 opengl opengles opengles2',
ext_modules=[extension, get_csdl2test_ext(extension)],
headers=headers)
|
import os.path
def main(request, response):
type = request.GET.first("type", None)
if type != None and "svg" in type:
filename = "green-96x96.svg"
else:
filename = "blue96x96.png"
path = os.path.join(os.path.dirname(__file__), "../../../images", filename)
body = open(path, "rb").read()
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("x-content-type-options", "nosniff")
response.writer.write_header("content-length", len(body))
if(type != None):
response.writer.write_header("content-type", type)
response.writer.end_headers()
response.writer.write(body)
|
from django.contrib.admin import AdminSite
from django.test import TestCase
from django.test.client import RequestFactory
from ltilaunch.admin import LTIToolConsumerAdmin, LTIToolProviderAdmin
from ltilaunch.models import LTIToolConsumer, LTIToolProvider
class LTIToolConsumerAdminTestCase(TestCase):
def setUp(self):
self.request_factory = RequestFactory()
def test_form_widgets(self):
admin = LTIToolConsumerAdmin(LTIToolConsumer, AdminSite())
req = self.request_factory.get('/')
form = admin.get_form(req)
self.assertIsNotNone(form)
for field in ('tool_consumer_instance_guid',
'oauth_consumer_secret',
'oauth_consumer_key'):
self.assertIn(field, form.base_fields)
widget = form.base_fields[field].widget
self.assertEquals(1, widget.attrs['rows'])
class LTIToolProviderAdminTestCase(TestCase):
def setUp(self):
self.request_factory = RequestFactory()
def test_form_widgets(self):
admin = LTIToolProviderAdmin(LTIToolProvider, AdminSite())
req = self.request_factory.get('/')
form = admin.get_form(req)
self.assertIsNotNone(form)
self.assertIn('launch_path', form.base_fields)
widget = form.base_fields['launch_path'].widget
self.assertEquals(1, widget.attrs['rows'])
|
from os import path
this_dir = path.dirname(path.realpath(__file__))
input_file = path.join(this_dir, "input.txt")
diff_count = {1: 0, 2: 0, 3: 0}
distinct_arrangements = 0
def get_num_combintations_ending_with(num, input_dict):
return input_dict.get(num - 1, 0) \
+ input_dict.get(num - 2, 0) \
+ input_dict.get(num - 3, 0)
with open(input_file) as f:
input_list = sorted([int(entry.strip()) for entry in f])
input_dict = {0: 1}
input_dict.update({item: 0 for item in input_list})
input_dict[input_list[-1] + 3] = 0
prev_item = 0
for key in input_dict:
if prev_item:
diff_count[key - prev_item] += 1
else:
diff_count[key] = 1
if key:
input_dict[key] = get_num_combintations_ending_with(key, input_dict)
distinct_arrangements = input_dict[key]
prev_item = key
print (f"Part one: {diff_count[1] * diff_count[3]}")
print (f"Part two: {distinct_arrangements}")
|
import requests
import json
import time
def getETHValue():
# GraphQL Query
query = f'''{{
token(id: "0x6b175474e89094c44da98b954eedeac495271d0f"){{
name
symbol
decimals
derivedETH
tradeVolumeUSD
totalLiquidity
}}
}}'''
url = 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2'
dai = requests.post(url, json={'query': query})
dai= dai.json()
dai_value = float(dai['data']['token']['derivedETH'])
eth_value = 1/dai_value
return(eth_value)
def getTokenValue(address: str) -> list:
# GraphQL Query
query = f'''{{
token(id: "{address}"){{
name
symbol
decimals
derivedETH
tradeVolumeUSD
totalLiquidity
}}
}}'''
url = 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2'
try:
token = requests.post(url, json={'query': query})
token = token.json()
token_eth = float(token['data']['token']['derivedETH'])
except TypeError:
token_eth = 0
eth_usd = getETHValue()
token_usd = token_eth * eth_usd
return(token_usd)
if __name__ == "__main__":
token = "0xdacd69347de42babfaecd09dc88958378780fb62"
eth_value = getETHValue()
print(f"ETH/USD: {eth_value}")
token_value = getTokenValue(token)
print(f"Token/USD: {token_value}")
|
"""
Wallet Exceptions
"""
class PassParameterException(Exception):
"""
Parameter based Exception
"""
|
dj-database-url==0.5.0
Django==2.2.7
django-bootstrap3==11.1.0
django-heroku==0.3.1
gunicorn==20.0.0
Pillow==6.2.1
psycopg2==2.8.4
pytz==2019.3
sqlparse==0.3.0
whitenoise==4.1.4
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-01-09 00:06
import hanlp
recognizer = hanlp.load(hanlp.pretrained.ner.MSRA_NER_BERT_BASE_ZH)
print(recognizer([list('孽债 (上海话)')]))
print(recognizer(['超', '长'] * 256))
|
#!/usr/bin/python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from urlparse import urlparse
from resource_management.core.resources.system import Execute
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions import format
from resource_management.libraries.resources.properties_file import PropertiesFile
from resource_management.core.logger import Logger
def create_atlas_configs():
import params
if params.sac_enabled:
atlas_application_properties = params.application_properties
atlas_application_properties_override = params.application_properties_override
atlas_application_properties_yarn = params.application_properties_yarn
for property_name in params.atlas_application_properties_to_include:
if property_name in atlas_application_properties and not property_name in atlas_application_properties_override:
atlas_application_properties_override[property_name] = atlas_application_properties[property_name]
if params.security_enabled:
for property_name in params.secure_atlas_application_properties_to_include.keys():
if not property_name in atlas_application_properties_override:
atlas_application_properties_override[property_name] = params.secure_atlas_application_properties_to_include[property_name]
PropertiesFile(params.atlas_properties_path,
properties = atlas_application_properties_override,
mode=0644,
owner=params.spark_user,
group=params.user_group
)
atlas_application_properties_override_copy = atlas_application_properties_override.copy()
if params.security_enabled:
atlas_application_properties_override_copy.pop("atlas.jaas.KafkaClient.option.keyTab")
atlas_application_properties_override_copy.update(atlas_application_properties_yarn)
atlas_application_properties_yarn = atlas_application_properties_override_copy
PropertiesFile(params.atlas_properties_for_yarn_path,
properties = atlas_application_properties_yarn,
mode=0644,
owner=params.spark_user,
group=params.user_group
)
def check_sac_jar():
import params
if params.sac_enabled:
sac_jar_exists = False
if os.path.isdir(params.spark_atlas_jar_dir):
for file in os.listdir(params.spark_atlas_jar_dir):
if str(file).startswith("spark-atlas-connector-assembly"):
sac_jar_exists = True
if not sac_jar_exists:
raise Exception("Please check that SAC jar is available in " + params.spark_atlas_jar_dir)
else:
Logger.info("SAC jar is available.")
|
import datetime as dt
def datas():
data = dt.datetime.now()
data_br = data.strftime('%d/%m/%Y')
return data_br
def horas():
hora = dt.datetime.now()
hora_br = hora.strftime('%H:%M:%S')
return hora_br
|
"""
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import json
import logging
import requests
import urllib3
import traceback
import elasticsearch
import elasticsearch.helpers
import commons.launch_objects
from elasticsearch import RequestsHttpConnection
import utils.utils as utils
from time import time
from commons.log_merger import LogMerger
from queue import Queue
from commons.log_preparation import LogPreparation
from amqp.amqp import AmqpClient
from typing import List
logger = logging.getLogger("analyzerApp.esclient")
class EsClient:
"""Elasticsearch client implementation"""
def __init__(self, app_config={}, search_cfg={}):
self.app_config = app_config
self.host = app_config["esHost"]
self.search_cfg = search_cfg
self.es_client = self.create_es_client(app_config)
self.log_preparation = LogPreparation()
self.log_merger = LogMerger()
self.tables_to_recreate = ["rp_aa_stats", "rp_model_train_stats",
"rp_suggestions_info_metrics"]
def create_es_client(self, app_config):
if not app_config["esVerifyCerts"]:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if app_config["turnOffSslVerification"]:
return elasticsearch.Elasticsearch(
[self.host], timeout=30,
max_retries=5, retry_on_timeout=True,
http_auth=(app_config["esUser"], app_config["esPassword"]),
use_ssl=app_config["esUseSsl"],
verify_certs=app_config["esVerifyCerts"],
ssl_show_warn=app_config["esSslShowWarn"],
ca_certs=app_config["esCAcert"],
client_cert=app_config["esClientCert"],
client_key=app_config["esClientKey"],
connection_class=RequestsHttpConnection)
return elasticsearch.Elasticsearch(
[self.host], timeout=30,
max_retries=5, retry_on_timeout=True,
http_auth=(app_config["esUser"], app_config["esPassword"]),
use_ssl=app_config["esUseSsl"],
verify_certs=app_config["esVerifyCerts"],
ssl_show_warn=app_config["esSslShowWarn"],
ca_certs=app_config["esCAcert"],
client_cert=app_config["esClientCert"],
client_key=app_config["esClientKey"])
def get_test_item_query(self, test_item_ids, is_merged, full_log):
"""Build test item query"""
if full_log:
return {
"size": self.app_config["esChunkNumber"],
"query": {
"bool": {
"filter": [
{"terms": {"test_item": [str(_id) for _id in test_item_ids]}},
{"term": {"is_merged": is_merged}}
]
}
}}
else:
return {
"_source": ["test_item"],
"size": self.app_config["esChunkNumber"],
"query": {
"bool": {
"filter": [
{"terms": {"test_item": [str(_id) for _id in test_item_ids]}},
{"term": {"is_merged": is_merged}}
]
}
}}
def build_search_test_item_ids_query(self, log_ids):
"""Build search test item ids query"""
return {
"_source": ["test_item"],
"size": self.app_config["esChunkNumber"],
"query": {
"bool": {
"filter": [
{"range": {"log_level": {"gte": utils.ERROR_LOGGING_LEVEL}}},
{"exists": {"field": "issue_type"}},
{"term": {"is_merged": False}},
{"terms": {"_id": [str(log_id) for log_id in log_ids]}},
]
}
}}
def is_healthy(self, es_host_name):
"""Check whether elasticsearch is healthy"""
try:
url = utils.build_url(self.host, ["_cluster/health"])
res = utils.send_request(url, "GET", self.app_config["esUser"], self.app_config["esPassword"])
return res["status"] in ["green", "yellow"]
except Exception as err:
logger.error("Elasticsearch is not healthy")
logger.error(err)
return False
def update_settings_after_read_only(self, es_host):
try:
requests.put(
"{}/_all/_settings".format(
es_host
),
headers={"Content-Type": "application/json"},
data="{\"index.blocks.read_only_allow_delete\": null}"
).raise_for_status()
except Exception as err:
logger.error(err)
logger.error("Can't reset read only mode for elastic indices")
def create_index(self, index_name):
"""Create index in elasticsearch"""
logger.debug("Creating '%s' Elasticsearch index", str(index_name))
logger.info("ES Url %s", utils.remove_credentials_from_url(self.host))
try:
response = self.es_client.indices.create(index=str(index_name), body={
'settings': utils.read_json_file("", "index_settings.json", to_json=True),
'mappings': utils.read_json_file("", "index_mapping_settings.json", to_json=True)
})
logger.debug("Created '%s' Elasticsearch index", str(index_name))
return commons.launch_objects.Response(**response)
except Exception as err:
logger.error("Couldn't create index")
logger.error("ES Url %s", utils.remove_credentials_from_url(self.host))
logger.error(err)
return commons.launch_objects.Response()
def list_indices(self):
"""Get all indices from elasticsearch"""
url = utils.build_url(self.host, ["_cat", "indices?format=json"])
res = utils.send_request(url, "GET", self.app_config["esUser"], self.app_config["esPassword"])
return res
def index_exists(self, index_name, print_error=True):
"""Checks whether index exists"""
try:
index = self.es_client.indices.get(index=str(index_name))
return index is not None
except Exception as err:
if print_error:
logger.error("Index %s was not found", str(index_name))
logger.error("ES Url %s", self.host)
logger.error(err)
return False
def delete_index(self, index_name):
"""Delete the whole index"""
try:
self.es_client.indices.delete(index=str(index_name))
logger.info("ES Url %s", utils.remove_credentials_from_url(self.host))
logger.debug("Deleted index %s", str(index_name))
return True
except Exception as err:
logger.error("Not found %s for deleting", str(index_name))
logger.error("ES Url %s", utils.remove_credentials_from_url(self.host))
logger.error(err)
return False
def create_index_if_not_exists(self, index_name):
"""Creates index if it doesn't not exist"""
if not self.index_exists(index_name, print_error=False):
return self.create_index(index_name)
return True
def index_logs(self, launches):
"""Index launches to the index with project name"""
launch_ids = set()
logger.info("Indexing logs for %d launches", len(launches))
logger.info("ES Url %s", utils.remove_credentials_from_url(self.host))
t_start = time()
bodies = []
test_item_ids = []
project = None
test_item_queue = Queue()
for launch in launches:
project = str(launch.project)
test_items = launch.testItems
launch.testItems = []
for test_item in test_items:
for log in test_item.logs:
if str(log.clusterId) in launch.clusters:
log.clusterMessage = launch.clusters[str(log.clusterId)]
test_item_queue.put((launch, test_item))
launch_ids.add(launch.launchId)
del launches
if project is None:
return commons.launch_objects.BulkResponse(took=0, errors=False)
project_with_prefix = utils.unite_project_name(
project, self.app_config["esProjectIndexPrefix"])
self.create_index_if_not_exists(project_with_prefix)
while not test_item_queue.empty():
launch, test_item = test_item_queue.get()
logs_added = False
for log in test_item.logs:
if log.logLevel < utils.ERROR_LOGGING_LEVEL or not log.message.strip():
continue
bodies.append(self.log_preparation._prepare_log(
launch, test_item, log, project_with_prefix))
logs_added = True
if logs_added:
test_item_ids.append(str(test_item.testItemId))
logs_with_exceptions = utils.extract_all_exceptions(bodies)
result = self._bulk_index(bodies)
result.logResults = logs_with_exceptions
_, num_logs_with_defect_types = self._merge_logs(test_item_ids, project_with_prefix)
try:
if "amqpUrl" in self.app_config and self.app_config["amqpUrl"].strip():
AmqpClient(self.app_config["amqpUrl"]).send_to_inner_queue(
self.app_config["exchangeName"], "train_models", json.dumps({
"model_type": "defect_type",
"project_id": project,
"gathered_metric_total": num_logs_with_defect_types
}))
except Exception as err:
logger.error(err)
logger.info("Finished indexing logs for %d launches %s. It took %.2f sec.",
len(launch_ids), launch_ids, time() - t_start)
return result
def _merge_logs(self, test_item_ids, project):
bodies = []
batch_size = 1000
self._delete_merged_logs(test_item_ids, project)
num_logs_with_defect_types = 0
for i in range(int(len(test_item_ids) / batch_size) + 1):
test_items = test_item_ids[i * batch_size: (i + 1) * batch_size]
if not test_items:
continue
test_items_dict = {}
for r in elasticsearch.helpers.scan(self.es_client,
query=self.get_test_item_query(
test_items, False, True),
index=project):
test_item_id = r["_source"]["test_item"]
if test_item_id not in test_items_dict:
test_items_dict[test_item_id] = []
test_items_dict[test_item_id].append(r)
for test_item_id in test_items_dict:
merged_logs, _ = self.log_merger.decompose_logs_merged_and_without_duplicates(
test_items_dict[test_item_id])
for log in merged_logs:
if log["_source"]["is_merged"]:
bodies.append(log)
else:
bodies.append({
"_op_type": "update",
"_id": log["_id"],
"_index": log["_index"],
"doc": {"merged_small_logs": log["_source"]["merged_small_logs"]}
})
log_issue_type = log["_source"]["issue_type"]
if log_issue_type.strip() and not log_issue_type.lower().startswith("ti"):
num_logs_with_defect_types += 1
return self._bulk_index(bodies), num_logs_with_defect_types
def _delete_merged_logs(self, test_items_to_delete, project):
logger.debug("Delete merged logs for %d test items", len(test_items_to_delete))
bodies = []
batch_size = 1000
for i in range(int(len(test_items_to_delete) / batch_size) + 1):
test_item_ids = test_items_to_delete[i * batch_size: (i + 1) * batch_size]
if not test_item_ids:
continue
for log in elasticsearch.helpers.scan(self.es_client,
query=self.get_test_item_query(
test_item_ids, True, False),
index=project):
bodies.append({
"_op_type": "delete",
"_id": log["_id"],
"_index": project
})
if bodies:
self._bulk_index(bodies)
def _recreate_index_if_needed(self, bodies, formatted_exception):
index_name = ""
if bodies:
index_name = bodies[0]["_index"]
if not index_name.strip():
return
if "'type': 'mapper_parsing_exception'" in formatted_exception or\
"RequestError(400, 'illegal_argument_exception'" in formatted_exception:
if index_name in self.tables_to_recreate:
self.delete_index(index_name)
self.create_index_for_stats_info(index_name)
def _bulk_index(self, bodies, host=None, es_client=None, refresh=True, chunk_size=None):
if host is None:
host = self.host
if es_client is None:
es_client = self.es_client
if not bodies:
return commons.launch_objects.BulkResponse(took=0, errors=False)
start_time = time()
logger.debug("Indexing %d logs...", len(bodies))
es_chunk_number = self.app_config["esChunkNumber"]
if chunk_size is not None:
es_chunk_number = chunk_size
try:
try:
success_count, errors = elasticsearch.helpers.bulk(es_client,
bodies,
chunk_size=es_chunk_number,
request_timeout=30,
refresh=refresh)
except: # noqa
formatted_exception = traceback.format_exc()
self._recreate_index_if_needed(bodies, formatted_exception)
self.update_settings_after_read_only(host)
success_count, errors = elasticsearch.helpers.bulk(es_client,
bodies,
chunk_size=es_chunk_number,
request_timeout=30,
refresh=refresh)
logger.debug("Processed %d logs", success_count)
if errors:
logger.debug("Occured errors %s", errors)
logger.debug("Finished indexing for %.2f s", time() - start_time)
return commons.launch_objects.BulkResponse(took=success_count, errors=len(errors) > 0)
except Exception as err:
logger.error("Error in bulk")
logger.error("ES Url %s", utils.remove_credentials_from_url(host))
logger.error(err)
return commons.launch_objects.BulkResponse(took=0, errors=True)
def delete_logs(self, clean_index):
"""Delete logs from elasticsearch"""
index_name = utils.unite_project_name(
str(clean_index.project), self.app_config["esProjectIndexPrefix"])
logger.info("Delete logs %s for the project %s",
clean_index.ids, index_name)
logger.info("ES Url %s", utils.remove_credentials_from_url(self.host))
t_start = time()
if not self.index_exists(index_name):
return 0
test_item_ids = set()
try:
search_query = self.build_search_test_item_ids_query(
clean_index.ids)
for res in elasticsearch.helpers.scan(self.es_client,
query=search_query,
index=index_name,
scroll="5m"):
test_item_ids.add(res["_source"]["test_item"])
except Exception as err:
logger.error("Couldn't find test items for logs")
logger.error(err)
bodies = []
for _id in clean_index.ids:
bodies.append({
"_op_type": "delete",
"_id": _id,
"_index": index_name,
})
result = self._bulk_index(bodies)
self._merge_logs(list(test_item_ids), index_name)
logger.info("Finished deleting logs %s for the project %s. It took %.2f sec",
clean_index.ids, index_name, time() - t_start)
return result.took
def create_index_for_stats_info(self, rp_aa_stats_index, override_index_name=None):
index_name = rp_aa_stats_index
if override_index_name is not None:
index_name = override_index_name
index = None
try:
index = self.es_client.indices.get(index=index_name)
except Exception:
pass
if index is None:
self.es_client.indices.create(index=index_name, body={
'settings': utils.read_json_file("", "index_settings.json", to_json=True),
'mappings': utils.read_json_file(
"", "%s_mappings.json" % rp_aa_stats_index, to_json=True)
})
else:
try:
self.es_client.indices.put_mapping(
index=index_name,
body=utils.read_json_file("", "%s_mappings.json" % rp_aa_stats_index, to_json=True))
except: # noqa
formatted_exception = traceback.format_exc()
self._recreate_index_if_needed([{"_index": index_name}], formatted_exception)
@utils.ignore_warnings
def send_stats_info(self, stats_info):
logger.info("Started sending stats about analysis")
stat_info_array = []
for launch_id in stats_info:
obj_info = stats_info[launch_id]
rp_aa_stats_index = "rp_aa_stats"
if "method" in obj_info and obj_info["method"] == "training":
rp_aa_stats_index = "rp_model_train_stats"
self.create_index_for_stats_info(rp_aa_stats_index)
stat_info_array.append({
"_index": rp_aa_stats_index,
"_source": obj_info
})
self._bulk_index(stat_info_array)
logger.info("Finished sending stats about analysis")
def get_test_items_by_ids_query(self, test_item_ids):
return {"_source": ["test_item"],
"size": self.app_config["esChunkNumber"],
"query": {
"bool": {
"filter": [
{"terms": {"test_item": test_item_ids}}
]
}}}
@utils.ignore_warnings
def defect_update(self, defect_update_info):
logger.info("Started updating defect types")
t_start = time()
test_item_ids = [int(key_) for key_ in defect_update_info["itemsToUpdate"].keys()]
defect_update_info["itemsToUpdate"] = {
int(key_): val for key_, val in defect_update_info["itemsToUpdate"].items()}
index_name = utils.unite_project_name(
str(defect_update_info["project"]), self.app_config["esProjectIndexPrefix"])
if not self.index_exists(index_name):
return test_item_ids
batch_size = 1000
log_update_queries = []
found_test_items = set()
for i in range(int(len(test_item_ids) / batch_size) + 1):
sub_test_item_ids = test_item_ids[i * batch_size: (i + 1) * batch_size]
if not sub_test_item_ids:
continue
for log in elasticsearch.helpers.scan(self.es_client,
query=self.get_test_items_by_ids_query(sub_test_item_ids),
index=index_name):
issue_type = ""
try:
test_item_id = int(log["_source"]["test_item"])
found_test_items.add(test_item_id)
issue_type = defect_update_info["itemsToUpdate"][test_item_id]
except: # noqa
pass
if issue_type.strip():
log_update_queries.append({
"_op_type": "update",
"_id": log["_id"],
"_index": index_name,
"doc": {
"issue_type": issue_type,
"is_auto_analyzed": False
}
})
self._bulk_index(log_update_queries)
items_not_updated = list(set(test_item_ids) - found_test_items)
logger.debug("Not updated test items: %s", items_not_updated)
if "amqpUrl" in self.app_config and self.app_config["amqpUrl"].strip():
AmqpClient(self.app_config["amqpUrl"]).send_to_inner_queue(
self.app_config["exchangeName"], "update_suggest_info", json.dumps(defect_update_info))
logger.info("Finished updating defect types. It took %.2f sec", time() - t_start)
return items_not_updated
def build_delete_query_by_test_items(self, sub_test_item_ids):
return {"query": {
"bool": {
"filter": [
{"terms": {"test_item": sub_test_item_ids}}
]
}}}
def build_delete_query_by_launch_ids(self, launch_ids):
return {"query": {"bool": {"filter": [{"terms": {"launch_id": launch_ids}}]}}}
@utils.ignore_warnings
def remove_test_items(self, remove_items_info):
logger.info("Started removing test items")
t_start = time()
index_name = utils.unite_project_name(
str(remove_items_info["project"]), self.app_config["esProjectIndexPrefix"])
deleted_logs = self.delete_by_query(
index_name, remove_items_info["itemsToDelete"], self.build_delete_query_by_test_items)
logger.debug("Removed %s logs by test item ids", deleted_logs)
logger.info("Finished removing test items. It took %.2f sec", time() - t_start)
return deleted_logs
@utils.ignore_warnings
def remove_launches(self, remove_launches_info):
project = remove_launches_info["project"]
launch_ids = remove_launches_info["launch_ids"]
logger.info("Started removing launches")
t_start = time()
index_name = utils.unite_project_name(
str(project), self.app_config["esProjectIndexPrefix"]
)
deleted_logs = self.delete_by_query(
index_name,
launch_ids,
self.build_delete_query_by_launch_ids,
)
logger.debug("Removed %s logs by launch ids", deleted_logs)
logger.info("Finished removing launches. It took %.2f sec", time() - t_start)
return deleted_logs
@utils.ignore_warnings
def delete_by_query(self, index_name, ids_for_removal, delete_query_deriver):
if not self.index_exists(index_name):
return 0
batch_size = 1000
deleted_logs = 0
for i in range(int(len(ids_for_removal) / batch_size) + 1):
sub_ids_for_removal = ids_for_removal[i * batch_size: (i + 1) * batch_size]
if not sub_ids_for_removal:
continue
result = self.es_client.delete_by_query(
index_name, body=delete_query_deriver(sub_ids_for_removal))
if "deleted" in result:
deleted_logs += result["deleted"]
return deleted_logs
def __time_range_query(
self,
time_field: str,
gte_time: str,
lte_time: str,
for_scan: bool = False,
) -> dict:
query = {"query": {"range": {time_field: {"gte": gte_time, "lte": lte_time}}}}
if for_scan:
query["size"] = self.app_config["esChunkNumber"]
return query
@utils.ignore_warnings
def get_launch_ids_by_start_time_range(
self, project: int, start_date: str, end_date: str
) -> List[str]:
index_name = utils.unite_project_name(
str(project), self.app_config["esProjectIndexPrefix"]
)
query = self.__time_range_query(
"launch_start_time", start_date, end_date, for_scan=True
)
launch_ids = set()
for log in elasticsearch.helpers.scan(
self.es_client, query=query, index=index_name
):
launch_ids.add(log["_source"]["launch_id"])
return list(launch_ids)
@utils.ignore_warnings
def remove_by_launch_start_time_range(
self, project: int, start_date: str, end_date: str
) -> int:
index_name = utils.unite_project_name(
str(project), self.app_config["esProjectIndexPrefix"]
)
query = self.__time_range_query("launch_start_time", start_date, end_date)
delete_response = self.es_client.delete_by_query(index_name, body=query)
return delete_response["deleted"]
@utils.ignore_warnings
def get_log_ids_by_log_time_range(
self, project: int, start_date: str, end_date: str
) -> List[str]:
index_name = utils.unite_project_name(
str(project), self.app_config["esProjectIndexPrefix"]
)
query = self.__time_range_query("log_time", start_date, end_date, for_scan=True)
log_ids = set()
for log in elasticsearch.helpers.scan(
self.es_client, query=query, index=index_name
):
log_ids.add(log["_id"])
return list(log_ids)
@utils.ignore_warnings
def remove_by_log_time_range(
self, project: int, start_date: str, end_date: str
) -> int:
index_name = utils.unite_project_name(
str(project), self.app_config["esProjectIndexPrefix"]
)
query = self.__time_range_query("log_time", start_date, end_date)
delete_response = self.es_client.delete_by_query(index_name, body=query)
return delete_response["deleted"]
|
'''
Write a program that takes a list of numbers
(for example, a = [5, 10, 15, 20, 25]) and makes a new list of only the first and last elements of the given list.
For practice, write this code inside a function.
'''
def giveFirstAndLast(listX):
return [listX[0],listX[-1]]
a = [5, 10, 15, 20, 25]
myList = giveFirstAndLast(a)
print(myList)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.utils.testing import AutoTeacherTest # noqa: F401
class TestBotAdversarialDialogueTurn4Teacher(AutoTeacherTest):
task = 'bot_adversarial_dialogue:bad_speaker_to_eval=all:bad_safety_mix=all:bad_num_turns=4'
class TestBotAdversarialDialogueSafeTeacher(AutoTeacherTest):
task = 'bot_adversarial_dialogue:bad_speaker_to_eval=all:bad_safety_mix=safe:bad_num_turns=4'
class TestBotAdversarialDialogueHumanTeacher(AutoTeacherTest):
task = 'bot_adversarial_dialogue:bad_speaker_to_eval=human:bad_safety_mix=all:bad_num_turns=4'
class TestHumanSafetyEvaluation(AutoTeacherTest):
task = 'bot_adversarial_dialogue:HumanSafetyEvaluation'
|
import argparse
import glob
import hashlib
import json
import os
import random
import re
import shutil
import sys
import zipfile
import networkx as nx
from BlackDuckUtils import BlackDuckOutput as bo
from BlackDuckUtils import Utils as bu
from BlackDuckUtils import bdio as bdio
from BlackDuckUtils import globals as bdglobals
from BlackDuckUtils import MavenUtils
from BlackDuckUtils import NpmUtils
from blackduck import Client
from github import Github
import globals
def github_create_pull_request_comment(g, github_repo, pr, pr_commit, comments_markdown, comments_markdown_footer):
if (globals.debug): print(f"DEBUG: Look up GitHub repo '{github_repo}'")
repo = g.get_repo(github_repo)
if (globals.debug): print(repo)
body = f'''
Synopsys Black Duck found the following vulnerabilities in Pull Reuqest #{pr.number}:
'''
body = body + "\n".join(comments_markdown) + "\n\n" + comments_markdown_footer
if (globals.debug): print(f"DEBUG: Get issue for pull request #{pr.number}")
issue = repo.get_issue(number = pr.number)
if (globals.debug): print(issue)
if (globals.debug): print(f"DEBUG: Create pull request review comment for pull request #{pr.number} with the following body:\n{body}")
issue.create_comment(body)
def github_commit_file_and_create_fixpr(g, github_token, github_api_url, github_repo, github_branch, files_to_commit, fix_pr_node):
if (globals.debug): print(f"DEBUG: Look up GitHub repo '{github_repo}'")
repo = g.get_repo(github_repo)
if (globals.debug): print(repo)
if (globals.debug): print(f"DEBUG: Get HEAD commit from '{github_repo}'")
commit = repo.get_commit('HEAD')
if (globals.debug): print(commit)
new_branch_seed = '%030x' % random.randrange(16**30)
#new_branch_seed = secrets.token_hex(15)
new_branch_name = github_branch + "-snps-fix-pr-" + new_branch_seed
if (globals.debug): print(f"DEBUG: Create branch '{new_branch_name}'")
ref = repo.create_git_ref("refs/heads/" + new_branch_name, commit.sha)
if (globals.debug): print(ref)
commit_message = f"Update {fix_pr_node['componentName']} to fix known security vulnerabilities"
for file_to_patch in files_to_patch:
if (globals.debug): print(f"DEBUG: Get SHA for file '{file_to_patch}'")
file = repo.get_contents(file_to_patch)
if (globals.debug): print(f"DEBUG: Upload file '{file_to_patch}'")
try:
with open(files_to_patch[file_to_patch], 'r') as fp:
file_contents = fp.read()
except:
print(f"ERROR: Unable to open package file '{files_to_patch[file_to_patch]}'")
sys.exit(1)
if (globals.debug): print(f"DEBUG: Update file '{file_to_patch}' with commit message '{commit_message}'")
file = repo.update_file(file_to_patch, commit_message, file_contents, file.sha, branch=new_branch_name)
pr_body = f'''
Pull request submitted by Synopsys Black Duck to upgrade {fix_pr_node['componentName']} from version {fix_pr_node['versionFrom']} to {fix_pr_node['versionTo']} in order to fix the known security vulnerabilities:
'''
pr_body = pr_body + "\n".join(fix_pr_node['comments_markdown']) + "\n\n" + fix_pr_node['comments_markdown_footer']
if (globals.debug):
print(f"DEBUG: Submitting pull request:")
print(pr_body)
pr = repo.create_pull(title=f"Black Duck: Upgrade {fix_pr_node['componentName']} to version {fix_pr_node['versionTo']} fix known security vulerabilities", body=pr_body, head=new_branch_name, base="master")
def get_pull_requests(g, github_repo):
if (globals.debug): print(f"DEBUG: Index pull requests, Look up GitHub repo '{github_repo}'")
repo = g.get_repo(github_repo)
if (globals.debug): print(repo)
pull_requests = []
# TODO Should this handle other bases than master?
pulls = repo.get_pulls(state='open', sort='created', base='master', direction="desc")
for pull in pulls:
if (globals.debug): print(f"DEBUG: Pull request number: {pull.number}: {pull.title}")
pull_requests.append(pull.title)
return pull_requests
def get_comps(bd, pv):
comps = bd.get_json(pv + '/components?limit=5000')
newcomps = []
complist = []
for comp in comps['items']:
if 'componentVersionName' not in comp:
continue
cname = comp['componentName'] + '/' + comp['componentVersionName']
if comp['ignored'] is False and cname not in complist:
newcomps.append(comp)
complist.append(cname)
return newcomps
def get_projver(bd, projname, vername):
params = {
'q': "name:" + projname,
'sort': 'name',
}
projects = bd.get_resource('projects', params=params, items=False)
if projects['totalCount'] == 0:
return ''
# projects = bd.get_resource('projects', params=params)
for proj in projects['items']:
versions = bd.get_resource('versions', parent=proj, params=params)
for ver in versions:
if ver['versionName'] == vername:
return ver['_meta']['href']
print("ERROR: Version '{}' does not exist in project '{}'".format(projname, vername))
return ''
# Parse command line arguments
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='Generate GitHub SARIF file from Black Duck Rapid Scan')
parser.add_argument('--debug', default=0, help='set debug level [0-9]')
parser.add_argument('--url', required=True, help='Black Duck Base URL')
parser.add_argument('--token', required=True, help='Black Duck API Token')
parser.add_argument('--output_directory', required=True, help='Rapid Scan output directory')
parser.add_argument('--output', required=True, help='File to output SARIF to')
parser.add_argument('--upgrade_major', default=False, action='store_true', help='Upgrade beyond current major version')
parser.add_argument('--fix_pr', default=False, action='store_true', help='Create Fix PR for upgrade guidance')
parser.add_argument('--comment_on_pr', default=False, action='store_true', help='Comment on the pull request being scanned')
parser.add_argument('--all_comps', default=False, action='store_true', help='Report on ALL components, not just newly introduced')
parser.add_argument('--upgrade_indirect', default=False, action='store_true', help='Attemp to upgrade indirect dependencies')
args = parser.parse_args()
globals.debug = int(args.debug)
bdglobals.debug = globals.debug
# TODO Better to read BD API Token from environment variable
#bd_apitoken = os.getenv("BLACKDUCK_TOKEN")
#if (bd_apitoken == None or bd_apitoken == ""):
# print("ERROR: Please set BLACKDUCK_TOKEN in environment before running")
# sys.exit(1)
bd_apitoken = args.token
bd_url = args.url
bd_output_dir = args.output_directory
upgrade_major = args.upgrade_major
sarif_output_file = args.output
fix_pr = args.fix_pr
comment_pr = args.comment_on_pr
allcomps = args.all_comps
upgrade_indirect = args.upgrade_indirect
fix_pr_annotation = ""
bd = Client(token=bd_apitoken,
base_url=bd_url,
timeout=300)
project_baseline_name, project_baseline_version, detected_package_files = bo.get_blackduck_status(bd_output_dir)
print(f"INFO: Running for project '{project_baseline_name}' version '{project_baseline_version}'")
# Look up baseline data
pvurl = get_projver(bd, project_baseline_name, project_baseline_version)
baseline_comp_cache = dict()
if (not allcomps):
if (pvurl == ''):
print(f"WARN: Unable to find project '{project_baseline_name}' version '{project_baseline_version}' - will not present incremental results")
else:
if (globals.debug): print(f"DEBUG: Project Version URL: {pvurl}")
baseline_comps = get_comps(bd, pvurl)
#if (globals.debug): print(f"DEBUG: Baseline components=" + json.dumps(baseline_comps, indent=4))
#sys.exit(1)
# Can't cache the component Id / external id very easily here as it's not top-level,
# and may have multiple origins
for comp in baseline_comps:
if (not comp['componentName'] in baseline_comp_cache): baseline_comp_cache[comp['componentName']] = dict()
#if (baseline_comp_cache[comp['componentName']] == None): baseline_comp_cache[comp['componentName']] = dict()
baseline_comp_cache[comp['componentName']][comp['componentVersionName']] = 1
#baseline_comp_cache[comp['componentName']] = comp['componentVersionName']
if (globals.debug): print(f"DEBUG: Baseline component cache=" + json.dumps(baseline_comp_cache, indent=4))
if (globals.debug): print(f"DEBUG: Generated baseline component cache")
bdio_graph, bdio_projects = bdio.get_bdio_dependency_graph(bd_output_dir)
if (len(bdio_projects) == 0):
print("ERROR: Unable to find base project in BDIO file")
sys.exit(1)
rapid_scan_data = bo.get_rapid_scan_results(bd_output_dir, bd)
# Prepare SARIF output structures
runs = []
run = dict()
component_match_types = dict()
components = dict()
tool_rules = []
results = []
fix_pr_data = dict()
comment_on_pr_comments = []
for item in rapid_scan_data['items']:
if (globals.debug):
print(f"DEBUG: Component: {item['componentIdentifier']}")
#print(item)
#sys.exit(1)
comp_ns, comp_name, comp_version = bu.parse_component_id(item['componentIdentifier'])
# If comparing to baseline, look up in cache and continue if already exists
if (not allcomps and item['componentName'] in baseline_comp_cache):
if (item['versionName'] in baseline_comp_cache[item['componentName']] and baseline_comp_cache[item['componentName']][item['versionName']] == 1):
if (globals.debug): print(f"DEBUG: Skipping component {item['componentName']} version {item['versionName']} because it was already seen in baseline")
continue
else:
if (globals.debug): print(f"DEBUG: Including component {item['componentName']} version {item['versionName']} because it was not seen in baseline")
# Is this a direct dependency?
dependency_type = "Direct"
# Track the root dependencies
dependency_paths = []
direct_ancestors = dict()
if (globals.debug): print(f"DEBUG: Looking for {item['componentIdentifier']}")
if (globals.debug):
print(f"DEBUG: comp_ns={comp_ns} comp_name={comp_name} comp_version={comp_version}")
# Matching in the BDIO requires an http: prefix
if (comp_ns == "npmjs"):
node_http_name = NpmUtils.convert_to_bdio(item['componentIdentifier'])
elif (comp_ns == "maven"):
node_http_name = MavenUtils.convert_to_bdio(item['componentIdentifier'])
else:
print(f"ERROR: Domain '{comp_ns}' not supported yet")
sys.exit(1)
if (globals.debug): print(f"DEBUG: Looking for {node_http_name}")
ans = nx.ancestors(bdio_graph, node_http_name)
ans_list = list(ans)
if (globals.debug): print(f"DEBUG: Ancestors are: {ans_list}")
pred = nx.DiGraph.predecessors(bdio_graph, node_http_name)
pred_list = list(pred)
if (globals.debug): print(f"DEBUG: Predecessors are: {ans_list}")
if (len(ans_list) != 1):
dependency_type = "Transitive"
# If this is a transitive dependency, what are the flows?
for proj in bdio_projects:
dep_paths = nx.all_simple_paths(bdio_graph, source=proj, target=node_http_name)
if (globals.debug): print(f"DEBUG: Paths to '{node_http_name}'")
paths = []
for path in dep_paths:
# First generate a string for easy output and reading
path_modified = path
path_modified.pop(0)
# Subtract http:<domain>/
path_modified_trimmed = [re.sub(r'http\:.*?\/', '', path_name) for path_name in path_modified]
# Change / to @
path_modified_trimmed = [re.sub(r'\/', '@', path_name) for path_name in path_modified_trimmed]
pathstr = " -> ".join(path_modified_trimmed)
if (globals.debug): print(f"DEBUG: path={pathstr}")
dependency_paths.append(pathstr)
if upgrade_indirect:
# Then log the direct dependencies directly
direct_dep = path_modified_trimmed[0]
direct_name = direct_dep.split('@')[0]
direct_version = direct_dep.split('@')[1]
direct_ancestors[direct_dep] = 1
if (globals.debug): print(f"DEBUG: Direct ancestor: {direct_dep} is of type {node_domain}")
if (comp_ns == "npmjs"):
NpmUtils.attempt_indirect_upgrade(comp_ns, comp_version, direct_name, direct_version)
else:
if (globals.debug): print(f"DEBUG: Domain '{comp_ns}' cannot be auto upgraded")
# Get component upgrade advice
shortTerm, longTerm = bu.get_upgrade_guidance(bd, item['componentIdentifier'])
upgrade_version = None
if (upgrade_major):
if (longTerm != None):
upgrade_version = longTerm
else:
if (shortTerm != None):
upgrade_version = shortTerm
if (globals.debug): print(f"DEUBG: Detected package files={detected_package_files} item={item}")
package_file, package_line = bu.detect_package_file(detected_package_files, item['componentIdentifier'], item['componentName'])
if (globals.debug): print(f"DEBUG: package file for {item['componentIdentifier']} is {package_file} on line {package_line} type is {dependency_type}")
if (dependency_type == "Direct" and upgrade_version != None):
fix_pr_node = dict()
fix_pr_node['componentName'] = comp_name
fix_pr_node['versionFrom'] = comp_version
fix_pr_node['versionTo'] = upgrade_version
fix_pr_node['ns'] = comp_ns
fix_pr_node['filename'] = bu.remove_cwd_from_filename(package_file)
fix_pr_node['comments'] = []
fix_pr_node['comments_markdown'] = ["| ID | Severity | Description | Vulnerable version | Upgrade to |", "| --- | --- | --- | --- | --- |"]
fix_pr_node['comments_markdown_footer'] = ""
# Loop through policy violations and append to SARIF output data
if (globals.debug):
print(f"DEBUG: Loop through policy violations")
print(item['policyViolationVulnerabilities'])
for vuln in item['policyViolationVulnerabilities']:
if (upgrade_version != None):
message = f"* {vuln['name']} - {vuln['vulnSeverity']} severity vulnerability violates policy '{vuln['violatingPolicies'][0]['policyName']}': *{vuln['description']}* Recommended to upgrade to version {upgrade_version}. {dependency_type} dependency."
message_markdown = f"| {vuln['name']} | {vuln['vulnSeverity']} | {vuln['description']} | {comp_version} | {upgrade_version} | "
comment_on_pr = f"| {vuln['name']} | {dependency_type} | {vuln['name']} | {vuln['vulnSeverity']} | {vuln['violatingPolicies'][0]['policyName']} | {vuln['description']} | {comp_version} | {upgrade_version} |"
else:
message = f"* {vuln['name']} - {vuln['vulnSeverity']} severity vulnerability violates policy '{vuln['violatingPolicies'][0]['policyName']}': *{vuln['description']}* No upgrade available at this time. {dependency_type} dependency."
message_markdown = f"| {vuln['name']} | {vuln['vulnSeverity']} | {vuln['description']} | {comp_version} | {upgrade_version} | "
comment_on_pr = f"| {vuln['name']} | {dependency_type} | {vuln['name']} | {vuln['vulnSeverity']} | {vuln['violatingPolicies'][0]['policyName']} | {vuln['description']} | {comp_version} | N/A |"
if (dependency_type == "Direct"):
message = message + f"Fix in package file '{bu.remove_cwd_from_filename(package_file)}'"
message_markdown_footer = f"**Fix in package file '{bu.remove_cwd_from_filename(package_file)}'**"
else:
if (len(dependency_paths) > 0):
message = message + f"Find dependency in {dependency_paths[0]}"
message_markdown_footer = f"**Find dependency in {dependency_paths[0]}**"
print("INFO: " + message)
comment_on_pr_comments.append(comment_on_pr)
# Save message to include in Fix PR
if (dependency_type == "Direct" and upgrade_version != None):
fix_pr_node['comments'].append(message)
fix_pr_node['comments_markdown'].append(message_markdown)
fix_pr_node['comments_markdown_footer'] = message_markdown_footer
result = dict()
result['ruleId'] = vuln['name']
message = dict()
message['text'] = f"This file introduces a {vuln['vulnSeverity']} severity vulnerability in {comp_name}."
result['message'] = message
locations = []
loc = dict()
loc['file'] = bu.remove_cwd_from_filename(package_file)
# TODO: Can we reference the line number in the future, using project inspector?
loc['line'] = package_line
tool_rule = dict()
tool_rule['id'] = vuln['name']
shortDescription = dict()
shortDescription['text'] = f"{vuln['name']} - {vuln['vulnSeverity']} severity vulnerability in {comp_name}"
tool_rule['shortDescription'] = shortDescription
fullDescription = dict()
fullDescription['text'] = f"This file introduces a {vuln['vulnSeverity']} severity vulnerability in {comp_name}"
tool_rule['fullDescription'] = fullDescription
rule_help = dict()
rule_help['text'] = ""
if (upgrade_version != None):
rule_help['markdown'] = f"**{vuln['name']}:** *{vuln['description']}*\n\nRecommended to upgrade to version {upgrade_version}.\n\n"
else:
rule_help['markdown'] = f"**{vuln['name']}:** *{vuln['description']}*\n\nNo upgrade available at this time.\n\n"
if (dependency_type == "Direct"):
rule_help['markdown'] = rule_help['markdown'] + f"Fix in package file '{bu.remove_cwd_from_filename(package_file)}'"
else:
if (len(dependency_paths) > 0):
rule_help['markdown'] = rule_help['markdown'] + f" Find dependency in **{dependency_paths[0]}**."
tool_rule['help'] = rule_help
defaultConfiguration = dict()
if (vuln['vulnSeverity'] == "CRITICAL" or vuln['vulnSeverity'] == "HIGH"):
defaultConfiguration['level'] = "error"
elif (vuln['vulnSeverity'] == "MEDIUM"):
defaultConfiguration['level'] = "warning"
else:
defaultConfiguration['level'] = "note"
tool_rule['defaultConfiguration'] = defaultConfiguration
properties = dict()
properties['tags'] = ["security"]
properties['security-severity'] = str(vuln['overallScore'])
tool_rule['properties'] = properties
tool_rules.append(tool_rule)
location = dict()
physicalLocation = dict()
artifactLocation = dict()
artifactLocation['uri'] = loc['file']
physicalLocation['artifactLocation'] = artifactLocation
region = dict()
region['startLine'] = loc['line']
physicalLocation['region'] = region
location['physicalLocation'] = physicalLocation
locations.append(location)
result['locations'] = locations
# Calculate fingerprint using simply the CVE/BDSA - the scope is the project in GitHub, so this should be fairly accurate for identifying a unique issue.
# Guidance from https://docs.github.com/en/code-security/code-scanning/integrating-with-code-scanning/sarif-support-for-code-scanning#preventing-duplicate-alerts-using-fingerprints
# and https://docs.oasis-open.org/sarif/sarif/v2.1.0/cs01/sarif-v2.1.0-cs01.html#_Toc16012611
# TODO Should this just leave it alone and let GitHub calculate it?
partialFingerprints = dict()
primaryLocationLineHash = hashlib.sha224(b"{vuln['name']}").hexdigest()
partialFingerprints['primaryLocationLineHash'] = primaryLocationLineHash
result['partialFingerprints'] = partialFingerprints
results.append(result)
if (dependency_type == "Direct" and upgrade_version != None):
fix_pr_data[comp_name + "@" + comp_name] = fix_pr_node
#fix_pr_data.append(fix_pr_node)
run['results'] = results
runs.append(run)
tool = dict()
driver = dict()
driver['name'] = "Synopsys Black Duck"
driver['organization'] = "Synopsys"
driver['rules'] = tool_rules
tool['driver'] = driver
run['tool'] = tool
code_security_scan_report = dict()
code_security_scan_report['runs'] = runs
code_security_scan_report['$schema'] = "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json"
code_security_scan_report['version'] = "2.1.0"
code_security_scan_report['runs'] = runs
if (globals.debug):
print("DEBUG: SARIF Data structure=" + json.dumps(code_security_scan_report, indent=4))
try:
with open(sarif_output_file, "w") as fp:
json.dump(code_security_scan_report, fp, indent=4)
except:
print(f"ERROR: Unable to write to SARIF output file '{sarif_output_file}'")
sys.exit(1)
# Optionally generate Fix PR
fix_pr_components = dict()
if (fix_pr and len(fix_pr_data.values()) > 0):
github_token = os.getenv("GITHUB_TOKEN")
github_repo = os.getenv("GITHUB_REPOSITORY")
github_branch = os.getenv("GITHUB_REF")
github_api_url = os.getenv("GITHUB_API_URL")
if (github_token == None or github_repo == None or github_branch == None or github_api_url == None):
print("ERROR: Cannot find GITHUB_TOKEN, GITHUB_REPOSITORY, GITHUB_REF and/or GITHUB_API_URL in the environment - are you running from a GitHub action?")
sys.exit(1)
if (globals.debug): print(f"DEBUG: Connect to GitHub at {github_api_url}")
g = Github(github_token, base_url=github_api_url)
print("DEBUG: Generating Fix Pull Requests")
pulls = get_pull_requests(g, github_repo)
for fix_pr_node in fix_pr_data.values():
if (globals.debug): print(f"DEBUG: Fix '{fix_pr_node['componentName']}' version '{fix_pr_node['versionFrom']}' in file '{fix_pr_node['filename']}' using ns '{fix_pr_node['ns']}' to version '{fix_pr_node['versionTo']}'")
pull_request_title = f"Black Duck: Upgrade {fix_pr_node['componentName']} to version {fix_pr_node['versionTo']} fix known security vulerabilities"
if pull_request_title in pulls:
if (globals.debug): print(f"DEBUG: Skipping pull request for {fix_pr_node['componentName']}' version '{fix_pr_node['versionFrom']} as it is already present")
continue
if (fix_pr_node['ns'] == "npmjs"):
files_to_patch = NpmUtils.upgrade_npm_dependency(fix_pr_node['filename'], fix_pr_node['componentName'], fix_pr_node['versionFrom'], fix_pr_node['versionTo'])
if (globals.debug): print(f"DEBUG: Files to patch are: {files_to_patch}")
github_commit_file_and_create_fixpr(g, github_token, github_api_url, github_repo, github_branch, files_to_patch, fix_pr_node)
elif (fix_pr_node['ns'] == "maven"):
files_to_patch = MavenUtils.upgrade_maven_dependency(fix_pr_node['filename'], fix_pr_node['componentName'], fix_pr_node['versionFrom'], fix_pr_node['versionTo'])
if (globals.debug): print(f"DEBUG: Files to patch are: {files_to_patch}")
github_commit_file_and_create_fixpr(g, github_token, github_api_url, github_repo, github_branch,
files_to_patch, fix_pr_node)
else:
print(f"INFO: Generating a Fix PR for packages of type '{fix_pr_node['ns']}' is not supported yet")
# Optionally comment on the pull request this is for
if (comment_pr and len(comment_on_pr_comments) > 0):
github_token = os.getenv("GITHUB_TOKEN")
github_repo = os.getenv("GITHUB_REPOSITORY")
github_ref = os.getenv("GITHUB_REF")
github_api_url = os.getenv("GITHUB_API_URL")
github_sha = os.getenv("GITHUB_SHA")
if (github_token == None or github_repo == None or github_ref == None or github_api_url == None or github_sha == None):
print("ERROR: Cannot find GITHUB_TOKEN, GITHUB_REPOSITORY, GITHUB_REF, GTIHUB_SHA and/or GITHUB_API_URL in the environment - are you running from a GitHub action?")
sys.exit(1)
if (globals.debug): print(f"DEBUG: Connect to GitHub at {github_api_url}")
g = Github(github_token, base_url=github_api_url)
if (globals.debug): print(f"DEBUG: Look up GitHub repo '{github_repo}'")
repo = g.get_repo(github_repo)
if (globals.debug): print(repo)
if (globals.debug): print(f"DEBUG: Look up GitHub ref '{github_ref}'")
# Remove leading refs/ as the API will prepend it on it's own
# Actually look pu the head not merge ref to get the latest commit so
# we can find the pull request
ref = repo.get_git_ref(github_ref[5:].replace("/merge", "/head"))
if (globals.debug):
print(ref)
# Look for this pull request by finding the first commit, and then looking for a
# PR that matches
# TODO Safe to assume that there are at least one commit?
github_sha = ref.object.sha
#for commit in ref:
# if (commit['object']['type'] == "commit"):
# github_sha = commit['object']['sha']
# break
#if (github_sha == None):
# print(f"ERROR: Unable to find any commits for ref '{github_ref}'")
# sys.exit(1)
print(f"DEBUG: Found Git sha {github_sha} for ref '{github_ref}'")
# TODO Should this handle other bases than master?
pulls = repo.get_pulls(state='open', sort='created', base='master', direction="desc")
pr = None
pr_commit = None
if (globals.debug): print(f"DEBUG: Pull requests:")
pull_number_for_sha = 0
for pull in pulls:
if (globals.debug): print(f"DEBUG: Pull request number: {pull.number}")
# Can we find the current commit sha?
commits = pull.get_commits()
for commit in commits.reversed:
if (globals.debug): print(f"DEBUG: Commit sha: " + str(commit.sha))
if (commit.sha == github_sha):
if (globals.debug): print(f"DEBUG: Found")
pull_number_for_sha = pull.number
pr = pull
pr_commit = commit
break
if (pull_number_for_sha != 0): break
if (pull_number_for_sha == 0):
print(f"ERROR: Unable to find pull request for commit '{github_sha}'")
sys.exit(1)
# Tricky here, we want everything all in one comment. So prepare a header, then append each of the comments and
# create a comment
comments_markdown = ["| Component | Type | Vulnerability | Severity | Description | Vulnerable version | Upgrade to |",
"| --- | --- | --- | --- | --- | --- | --- |"]
for comment in comment_on_pr_comments:
comments_markdown.append(comment)
if (globals.debug): print(f"DEUBG: Comment on Pull Request #{pr.number} for commit {github_sha}")
github_create_pull_request_comment(g, github_repo, pr, pr_commit, comments_markdown, "")
if (len(comment_on_pr_comments) > 0):
github_token = os.getenv("GITHUB_TOKEN")
github_repo = os.getenv("GITHUB_REPOSITORY")
github_ref = os.getenv("GITHUB_REF")
github_api_url = os.getenv("GITHUB_API_URL")
github_sha = os.getenv("GITHUB_SHA")
if (github_token == None or github_repo == None or github_ref == None or github_api_url == None or github_sha == None):
print("ERROR: Cannot find GITHUB_TOKEN, GITHUB_REPOSITORY, GITHUB_REF, GTIHUB_SHA and/or GITHUB_API_URL in the environment - are you running from a GitHub action?")
sys.exit(1)
if (globals.debug): print(f"DEBUG: Set check status for commit '{github_sha}', connect to GitHub at {github_api_url}")
g = Github(github_token, base_url=github_api_url)
if (globals.debug): print(f"DEBUG: Look up GitHub repo '{github_repo}'")
repo = g.get_repo(github_repo)
if (globals.debug): print(repo)
status = repo.get_commit(sha=github_sha).create_status(
state="error",
target_url="https://FooCI.com",
description="Black Duck security scan found vulnerabilities",
context="Synopsys Black Duck"
)
if (globals.debug):
print(f"DEBUG: Status:")
print(status)
print(f"INFO: Vulnerable components found, returning exit code 1")
sys.exit(1)
else:
print(f"INFO: No new components found, nothing to report")
sys.exit(0)
|
"""Helper functions to detect settings after app initialization. AKA 'dynamic settings'"""
from distutils.util import strtobool
from functools import lru_cache
#
# X_auth_enabled checks to see if a backend has been specified, thus assuming it is enabled.
# Leverages `lru_cache` since these are called per user session. The wrappers are a
# workaround to pass `lru_cache` a hashable data structure.
#
def remote_auth_enabled(auth_backends):
return _remote_auth_enabled(tuple(auth_backends))
@lru_cache(maxsize=5)
def _remote_auth_enabled(auth_backends):
return "nautobot.core.authentication.RemoteUserBackend" in auth_backends
def sso_auth_enabled(auth_backends):
return _sso_auth_enabled(tuple(auth_backends))
@lru_cache(maxsize=5)
def _sso_auth_enabled(auth_backends):
for backend in auth_backends:
if backend.startswith("social_core.backends"):
return True
return False
def ldap_auth_enabled(auth_backends):
return _ldap_auth_enabled(tuple(auth_backends))
@lru_cache(maxsize=5)
def _ldap_auth_enabled(auth_backends):
return "django_auth_ldap.backend.LDAPBackend" in auth_backends
def is_truthy(arg):
"""Convert "truthy" strings into Booleans.
Examples:
>>> is_truthy('yes')
True
Args:
arg (str): Truthy string (True values are y, yes, t, true, on and 1; false values are n, no,
f, false, off and 0. Raises ValueError if val is anything else.
"""
if isinstance(arg, bool):
return arg
return bool(strtobool(str(arg)))
|
"""
Tests for the DJORNL Parser
At the present time, this just ensures that the files are parsed correctly;
it does not check data loading into the db.
These tests run within the re_api docker image.
"""
import json
import unittest
import os
from importers.djornl.parser import DJORNL_Parser
from spec.test.helpers import modified_environ
_TEST_DIR = '/app/spec/test'
class Test_DJORNL_Parser(unittest.TestCase):
@classmethod
def setUpClass(cls):
# import the results file
results_file = os.path.join(_TEST_DIR, 'djornl', 'results.json')
with open(results_file) as fh:
cls.json_data = json.load(fh)
cls.maxDiff = None
def init_parser_with_path(self, root_path):
with modified_environ(RES_ROOT_DATA_PATH=root_path):
parser = DJORNL_Parser()
# ensure that the configuration has been set
parser._configure()
return parser
def test_missing_required_env_var(self):
'''test that the parser exits with code 1 if the RES_ROOT_DATA_PATH env var is not set'''
with self.assertRaisesRegex(RuntimeError, 'Missing required env var: RES_ROOT_DATA_PATH'):
parser = DJORNL_Parser()
parser.load_edges()
def test_config(self):
'''test that the parser raises an error if a config value cannot be found'''
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'test_data')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
with self.assertRaisesRegex(KeyError, 'No such config value: bananas'):
parser.config('bananas')
def test_load_no_manifest(self):
""" test loading when the manifest does not exist """
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'no_manifest')
err_str = 'No manifest file found at ' + os.path.join(RES_ROOT_DATA_PATH, 'manifest.yaml')
with self.assertRaisesRegex(RuntimeError, err_str):
self.init_parser_with_path(RES_ROOT_DATA_PATH)
def test_load_invalid_manifest(self):
""" test an invalid manifest file """
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'invalid_manifest')
err_str = "The manifest file failed validation"
with self.assertRaisesRegex(RuntimeError, err_str):
self.init_parser_with_path(RES_ROOT_DATA_PATH)
def test_load_invalid_file(self):
""" test loading when what is supposed to be a file is actually a directory """
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'invalid_file')
# edges: directory, not a file
err_str = os.path.join(RES_ROOT_DATA_PATH, "edges.tsv") + ": not a file"
with self.assertRaisesRegex(RuntimeError, err_str):
self.init_parser_with_path(RES_ROOT_DATA_PATH)
def test_load_empty_files(self):
""" test loading files containing no data """
# path: test/djornl/empty_files
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'empty_files')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
# header only, no content
err_str = 'aranet2-aragwas-MERGED-AMW-v2_091319_nodeTable.csv: no valid data found'
with self.assertRaisesRegex(RuntimeError, err_str):
parser.load_nodes()
# comments only
err_str = 'merged_edges-AMW-060820_AF.tsv: no header line found'
with self.assertRaisesRegex(RuntimeError, err_str):
parser.load_edges()
# mix of problems
err_str = "\n".join([
'cluster_data/headers_only.tsv: no valid data found',
'cluster_data/no_content.tsv: no header line found',
'cluster_data/comment_only.tsv: no header line found',
])
with self.assertRaisesRegex(RuntimeError, err_str):
parser.load_clusters()
def test_load_missing_files(self):
""" test loading when files cannot be found """
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'missing_files')
# not found
err_str = os.path.join(RES_ROOT_DATA_PATH, "edges.tsv") + ': file does not exist'
with self.assertRaisesRegex(RuntimeError, err_str):
self.init_parser_with_path(RES_ROOT_DATA_PATH)
def test_load_invalid_edges(self):
""" test file format errors """
# path: test/djornl/invalid_types
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'invalid_types')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
# invalid edge type, invalid scores
edge_err_msg = "\n".join([
r"edges.tsv line 3: 'Same-Old-Stuff' is not valid under any of the given schemas",
r"edges.tsv line 7: '2.' does not match .*?",
r"edges.tsv line 8: 'raNetv2-DC_' is not valid under any of the given schemas",
r"edges.tsv line 10: 'score!' does not match .*?"
])
with self.assertRaisesRegex(RuntimeError, edge_err_msg):
parser.load_edges()
def test_load_invalid_nodes(self):
""" test file format errors """
# path: test/djornl/invalid_types
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'invalid_types')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
# invalid node type
node_err_msg = "nodes.csv line 5: 'Monkey' is not valid under any of the given schemas"
with self.assertRaisesRegex(RuntimeError, node_err_msg):
parser.load_nodes()
def test_load_invalid_clusters(self):
""" test file format errors """
# path: test/djornl/invalid_types
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'invalid_types')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
# invalid node type
cluster_err_msg = "markov2_named.tsv line 7: 'HoneyNutCluster3' does not match"
with self.assertRaisesRegex(RuntimeError, cluster_err_msg):
parser.load_clusters()
def test_load_col_count_errors(self):
""" test files with invalid numbers of columns """
# path: test/djornl/col_count_errors
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'col_count_errors')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
# not enough cols
edge_err_msg = 'merged_edges-AMW-060820_AF.tsv line 6: expected 5 cols, found 3'
with self.assertRaisesRegex(RuntimeError, edge_err_msg):
parser.load_edges()
# too many cols
node_err_msg = 'aranet2-aragwas-MERGED-AMW-v2_091319_nodeTable.csv line 3: expected 20 cols, found 22'
with self.assertRaisesRegex(RuntimeError, node_err_msg):
parser.load_nodes()
def test_load_valid_edge_data(self):
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'test_data')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
edge_data = parser.load_edges()
expected = self.json_data["load_edges"]
for data_structure in [edge_data, expected]:
for k in data_structure.keys():
data_structure[k] = sorted(data_structure[k], key=lambda n: n['_key'])
self.assertEqual(edge_data, expected)
def test_load_valid_node_metadata(self):
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'test_data')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
node_metadata = parser.load_nodes()
expected = self.json_data["load_nodes"]
for data_structure in [node_metadata, expected]:
for k in data_structure.keys():
data_structure[k] = sorted(data_structure[k], key=lambda n: n['_key'])
data_structure[k] = [n['_key'] for n in data_structure[k]]
self.assertEqual(node_metadata, expected)
def test_load_valid_cluster_data(self):
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'test_data')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
cluster_data = parser.load_clusters()
self.assertEqual(
cluster_data,
self.json_data["load_clusters"]
)
def test_duplicate_edge_data(self):
""" test files with duplicate edge data, which should throw an error """
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'duplicate_data')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
err_msg = "\n".join([
"hithruput-edges.csv line 5: duplicate data for edge AT1G01010__AT1G01030__AraNetv2-HT_.*?",
"hithruput-edges.csv line 9: duplicate data for edge AT1G01030__AT1G01050__AraNetv2-CX_.*?"
])
with self.assertRaisesRegex(RuntimeError, err_msg):
parser.load_edges()
def test_duplicate_node_data(self):
""" test files with duplicate node data, which should throw an error """
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'duplicate_data')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
err_msg = "extra_node.tsv line 5: duplicate data for node AT1G01080"
with self.assertRaisesRegex(RuntimeError, err_msg):
parser.load_nodes()
def test_duplicate_cluster_data(self):
""" test files with duplicate cluster data, which should be seamlessly merged """
# path: test/djornl/col_count_errors
RES_ROOT_DATA_PATH = os.path.join(_TEST_DIR, 'djornl', 'duplicate_data')
parser = self.init_parser_with_path(RES_ROOT_DATA_PATH)
cluster_data = parser.load_clusters()
self.assertEqual(
cluster_data,
self.json_data["load_clusters"]
)
|
from django.shortcuts import redirect, render
from django.views import View
from .forms import TestForm
from .models import Post
class PostUpdate(View):
def get(self, request, pk):
post = Post.objects.get(id=pk)
bound_form = TestForm(instance=post)
return render(request, 'blog/post_update.html', {'form': bound_form, 'post': post})
def post(self, request, pk):
post = Post.objects.get(id=pk)
bound_form = TestForm(request.POST, instance=post)
if bound_form.is_valid():
new_post = bound_form.save()
return redirect(new_post)
return render(request, 'blog/post_update.html', {'form': bound_form, 'post': post})
class PostView(View):
def get(self, request, pk):
post = Post.objects.get(id=pk)
return render(request, 'blog/post_view.html', {'post': post})
|
"""Provides a Star class for using stars from the Trident API.
"""
from pykep import epoch, AU
from pykep.planet import keplerian
import requests
from tridentweb.constant import Constant
class Star:
"""Represents a star.
Arguments:
system_id - ID number denoting the solar system.
star_id - ID number denoting the star.
server_url - Trident API server URL. Defaults to http://trident.senorpez.com/
"""
solar_mass = None
grav = None
pykep_planet = None
def __init__(self, system_id, star_id, primary=None, server_url="http://trident.senorpez.com/"):
req = requests.get(server_url)
req.raise_for_status()
systems_url = req.json()['_links']['trident-api:systems']['href']
req = requests.get(systems_url)
req.raise_for_status()
system_url = None
for entry in req.json()['_embedded']['trident-api:system']:
if entry['id'] == system_id:
system_url = entry['_links']['self']['href']
req = requests.get(system_url)
req.raise_for_status()
stars_url = req.json()['_links']['trident-api:stars']['href']
req = requests.get(stars_url)
req.raise_for_status()
star_url = None
for entry in req.json()['_embedded']['trident-api:star']:
if entry['id'] == star_id:
star_url = entry['_links']['self']['href']
req = requests.get(star_url)
req.raise_for_status()
self._primary = primary
self.id = req.json()['id']
self.name = req.json()['name']
self.mass = req.json()['mass']
self.semimajor_axis = None
self.eccentricity = None
self.inclination = None
self.longitude_of_ascending_node = None
self.argument_of_periapsis = None
self.true_anomaly_at_epoch = None
if self._primary is not None:
self.semimajor_axis = req.json()['semimajorAxis']
self.eccentricity = req.json()['eccentricity']
self.inclination = req.json()['inclination']
self.longitude_of_ascending_node = req.json()['longitudeOfAscendingNode']
self.argument_of_periapsis = req.json()['argumentOfPeriapsis']
self.true_anomaly_at_epoch = req.json()['trueAnomalyAtEpoch']
@property
def gm(self):
"""Standard gravitational parameter of the Planet."""
if self.solar_mass is None:
solar_mass_constant = Constant("Msol")
self.solar_mass = solar_mass_constant.value
if self.grav is None:
grav_constant = Constant("G")
self.grav = grav_constant.value
return self.mass * self.solar_mass * self.grav
@property
def planet(self):
"""PyKep object (pykep.planet.keplerian) representation of Planet."""
if self._primary is None:
raise ValueError
if self.pykep_planet is None:
self.pykep_planet = keplerian(
epoch(0),
(
self.semimajor_axis * AU,
self.eccentricity,
self.inclination,
self.longitude_of_ascending_node,
self.argument_of_periapsis,
self.true_anomaly_at_epoch),
self._primary.gm,
self.gm,
1000,
1000,
self.name)
return self.pykep_planet
|
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from ..models import Product, Category
import requests
class CategoryAPITestCase(TestCase):
def setUp(self):
self.client = APIClient()
self.category_api = {
"name": "API Category"
}
self.response = self.client.post(
reverse('category_create'),
self.category_api,
format="json"
)
self.categories = Category.objects.all()
def test_api_can_create_a_category(self):
"""
Test that api can create a category.
"""
self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)
def test_api_can_list_categories(self):
"""
Test that api can list categories.
"""
response = self.client.get(
reverse('category_list'),
format="json"
)
list_count = response.json()
self.assertEqual(list_count['count'], self.categories.count())
self.assertEqual(response.status_code, status.HTTP_200_OK)
class ProductAPITest(TestCase):
@classmethod
def setUpTestData(cls):
r = requests.get("https://fakestoreapi.com/products")
cls.data = r.json()
def setUp(self):
self.client = APIClient()
self.product_json = {
"name": self.data[0]['title'],
"price": float(self.data[0]['price']),
"description": self.data[0]['description'],
"image_url": self.data[0]['image'],
"category": self.data[0]['category']
}
self.response = self.client.post(
path=reverse('product_create'),
data=self.product_json,
format="json"
)
self.products = Product.objects.all()
def test_api_can_create_a_product(self):
"""
Test that api can create a Product.
"""
self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)
def test_api_can_list_products(self):
"""
Test that api can list products.
"""
response = self.client.get(
reverse('product_list'),
format="json"
)
list_count = response.json()
self.assertEqual(list_count['count'], self.products.count())
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
"""
Read .nyzoblock file from ../data directory and convert to a list of native blocks objects.
No sanity check / validation yet, but checked to match nyzo website for first files.
"""
import sys
sys.path.append('../')
from pynyzo.block import Block
blocks = Block.from_nyzoblock('../data/000000.nyzoblock', verbose=False)
for block in blocks:
print(block.to_string())
# print(block.to_json())
|
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import print_function
from __future__ import absolute_import
import logging
import ping
from yardstick.benchmark.scenarios import base
LOG = logging.getLogger(__name__)
TIMEOUT = 0.05
PACKAGE_SIZE = 64
REPEAT_TIMES = 3000
class PingServer(base.Scenario):
"""Get a server by name"""
__scenario_type__ = "PingServer"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.options = self.scenario_cfg.get('options', {})
def run(self, result):
server_ip = self.options.get('server_ip', '')
connected = 1
for i in range(REPEAT_TIMES):
res = ping.do_one(server_ip, TIMEOUT, PACKAGE_SIZE)
if res:
connected = 0
break
keys = self.scenario_cfg.get('output', '').split()
values = [connected]
return self._push_to_outputs(keys, values)
|
#!/usr/bin/env python3
"""Some shared testing infrastructure."""
from contextlib import contextmanager
import io
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import os
import shutil
import sys
import tempfile
class NormalizedStringIO(StringIO):
"""StringIO, with write operations normalized to unicode."""
def __init__(self, buffer=None):
super(NormalizedStringIO, self).__init__(buffer)
@staticmethod
def normalized(s):
# s is Python2 str or unicode, or Python3 str or bytes
# goal is to convert to Python2 unicode, or Python3 str
try:
if type(s) is not unicode:
return s.decode('utf-8')
else:
return s
except NameError:
if type(s) is not str:
return s.decode('utf-8')
else:
return s
def write(self, s):
super(NormalizedStringIO, self).write(self.normalized(s))
def writelines(self,lines):
lines = [self.normalized(line) for line in lines]
super(NormalizedStringIO, self).writelines(lines)
class Tee(io.TextIOBase):
"""Duplicate all output to a text stream on a StringIO stream.
This class implements a write-only (unreadable, unseekable)
io.TextIOBase based on a given io.TextIOBase stream, and duplicates
all write operations on a intrinsic StringIO stream. The class can
be used to tee output, e.g., sys.stderr, to a string. To use this
class, make normal calls as if it is the base stream, with the
exception that one can call getvalue() as if it is also a StringIO.
"""
def __init__(self, textio):
try:
if not textio.writable():
raise io.UnsupportedOperation("not writable")
except AttributeError:
# somehow Python2 sys.stderr, a file object, does not have
# the writable method of io.IOBase
pass
self._textio = textio
self._stringio = NormalizedStringIO()
def close(self):
# Python 2 and all known versions of PyPy enters close
# incorrectly upon deconstruction, even though an implementation
# of __del__ is provided below that doesn't touch _textio
# itself. Therefore, we can't call close on self._textio to
# prevent closing stdout or stderr when a Tee goes out of scope.
self._stringio.close()
@property
def closed(self):
return self._textio.closed
def detach(self):
return self._textio.detach()
@property
def encoding(self):
return self._textio.encoding
@property
def errors(self):
return self._textio.errors
def fileno(self):
return self._textio.fileno()
def flush(self):
self._textio.flush()
self._stringio.flush()
def getvalue(self):
return self._stringio.getvalue()
def isatty(self):
return self._textio.isatty()
@staticmethod
def read(size):
raise io.UnsupportedOperation("not readable")
@staticmethod
def readable():
return False
@staticmethod
def readline(size=-1):
raise io.UnsupportedOperation("not readable")
@staticmethod
def readlines(hint=-1):
raise io.UnsupportedOperation("not readable")
@staticmethod
def seek(offset, whence=os.SEEK_SET):
raise io.UnsupportedOperation("not seekable")
@staticmethod
def seekable():
return False
@staticmethod
def tell():
raise io.UnsupportedOperation("not seekable")
@staticmethod
def truncate(size=None):
raise io.UnsupportedOperation("not seekable")
@staticmethod
def writable():
return True
def write(self, s):
bytes_written = self._textio.write(s)
self._stringio.write(s)
return bytes_written
def writelines(self, lines):
self._textio.writelines(lines)
self._stringio.writelines(lines)
def __del__(self):
del self._stringio
@contextmanager
def capture_stdout():
"""Single use context manager for capturing stdout in a StringIO.
The negative effect is that some properties of the stream are
changed, e.g., isatty().
"""
saved_stdout = sys.stdout
sys.stdout = NormalizedStringIO()
yield
sys.stdout = saved_stdout
@contextmanager
def capture_stderr():
"""Single use context manager for capturing stderr in a StringIO.
The negative effect is that some properties of the stream are
changed, e.g., isatty().
"""
saved_stderr = sys.stderr
sys.stderr = NormalizedStringIO()
yield
sys.stderr = saved_stderr
@contextmanager
def tee_stderr():
"""Single use context manager for teeing stderr to a StringIO.
"""
saved_stderr = sys.stderr
sys.stderr = Tee(sys.stderr)
yield
sys.stderr = saved_stderr
@contextmanager
def change_home():
"""Single use context manager for changing HOME to temp directory.
"""
if 'HOME' in os.environ:
saved_home = os.environ['HOME']
else:
saved_home = None
tmp_home = tempfile.mkdtemp()
os.environ['HOME'] = tmp_home
yield tmp_home
shutil.rmtree(tmp_home)
if saved_home is not None:
os.environ['HOME'] = saved_home
|
#!/usr/bin/python
# this source is part of my Hackster.io project: https://www.hackster.io/mariocannistra/radio-astronomy-with-rtl-sdr-raspberrypi-and-amazon-aws-iot-45b617
# this program will output the Jupiter-IO radio storm predictions in text format.
# It's a port to python of an older QBasic program whose original source can be found
# at http://www.spaceacademy.net.au/spacelab/projects/jovrad/jovrad.htm together with
# good explanations about the theory and instructions to build a folded dipole antenna.
# please see my project text about the source of the first python porting
# and about the additions I made for geographical specific predictions using SkyField by Brandon Rhodes
from skyfield.api import load, utc
from datetime import datetime, timedelta
from pytz import timezone
from skyfield.api import now, Topos
from math import sin, cos, fmod, degrees, radians, floor, trunc, sqrt
import ephem
import radioConfig
def method2():
global ts, iterDateUTCtime, th, L3, U1
pi = 3.141593
kr = pi / 180
tconv = ts.utc(iterDateUTCtime)
fjd = tconv.ut1 # we will use ut1 in input to the algorithm
d0 = fjd - 2435108
d = d0 + th / 24.0
v = (157.0456 + .0011159 * d) % 360.0
m = (357.2148 + .9856003 * d) % 360.0
n = (94.3455 + .0830853 * d + .33 * sin(kr * v)) % 360.0
j = (351.4266 + .9025179 * d - .33 * sin(kr * v)) % 360.0
a = 1.916 * sin(kr * m) + .02 * sin(kr * 2.0 * m)
b = 5.552 * sin(kr * n) + .167 * sin(kr * 2.0 * n)
k = j + a - b
r = 1.00014 - .01672 * cos(kr * m) - .00014 * cos(kr * 2.0 * m)
re = 5.20867 - .25192 * cos(kr * n) - .0061 * cos(kr * 2.0 * n)
dt = sqrt(re * re + r * r - 2 * re * r * cos(kr * k))
sp = r * sin(kr * k) / dt
ps = sp / .017452
dl = d - dt / 173.0
pb = ps - b
xi = 150.4529 * int(dl) + 870.4529 * (dl - int(dl))
L3 = (274.319 + pb + xi + .01016 * 51.0) % 360.0
U1 = 101.5265 + 203.405863 * dl + pb
U2 = 67.81114 + 101.291632 * dl + pb
z = (2.0 * (U1 - U2)) % 360.0
U1 = U1 + .472 * sin(kr * z)
U1 = (U1 + 180.0) % 360.0
#L3 = int(L3)
#U1 = int(U1)
def calcforjd():
global th, L3, U1, modeset, JupiterRise, JupiterSet, SunRise, SunSet, includeonlyiorelated, predList
method2()
s=""
# ranges from http://www.spaceacademy.net.au/spacelab/projects/jovrad/jovrad.htm
#~ Source CML (degrees) Io Phase (degrees) Characteristics of emission
#~ Io Related sources
#~ Io-A 200-290 195-265 RH polarized, mostly L bursts
#~ Io-B 90-200 75-105 RH polarized, mostly S bursts
#~ Io-C 290-10 225-250 LH polarized L and S bursts
#~ Non-Io Related Sources
#~ A 200-290
#~ B 90-200
#~ C 290-10
#~ The emission is usually either right (RH) or left hand (LH) circularly
#~ or elliptically polarized, depending on the source.
# ORIGINAL QBASIC ranges from http://www.spaceacademy.net.au/spacelab/projects/jovrad/jovrad.htm
#~if L3 < 255 and L3 > 200 and U1 < 250 and U1 > 220:
#~s = "io-a"
#~if L3 < 180 and L3 > 105 and U1 < 100 and U1 > 80:
#~s = "io-b"
#~if L3 < 350 and L3 > 300 and U1 < 250 and U1 > 230:
#~s = "io-c"
# from which other source?
#~ if L3>200.0 and L3<290.0:
#~ s="A"
#~ if U1>195.0 and U1<265.0:
#~ s="Io-A"
#~ if L3>90.0 and L3<200.0:
#~ s="B"
#~ if U1>75.0 and U1<105.0:
#~ s="Io-B"
#~ if L3>290.0 or L3<10.0:
#~ s="C"
#~ if U1>225.0 and U1<250.0:
#~ s="Io-C"
# first set of range found in papers:
if modeset == 1:
if L3>230.0 and L3<280.0:
s="A"
if L3>200.0 and L3<270.0 and U1>205.0 and U1<260.0:
s="Io-A"
if L3>105.0 and L3<185.0 and U1>80.0 and U1<110.0:
s="Io-B"
if (L3>300.0 or L3<20.0) and U1>225.0 and U1<260.0:
s="Io-C"
if (L3>1.0 and L3<200.0) and U1>95.0 and U1<130.0:
s="Io-D"
# second set of range found in papers:
if modeset == 2:
if L3>200.0 and L3<290.0:
s="A"
if L3>200.0 and L3<290.0 and U1>195.0 and U1<265.0:
s="Io-A"
if L3>90.0 and L3<200.0:
s="B"
if L3>90.0 and L3<200.0 and U1>75.0 and U1<105.0:
s="Io-B"
if (L3>290.0 or L3<20.0):
s="C"
if (L3>290.0 or L3<20.0) and U1>225.0 and U1<250.0:
s="Io-C"
# third set of range found in papers:
if modeset == 3:
if L3>200.0 and L3<290.0:
s="A"
if L3>200.0 and L3<290.0 and U1>195.0 and U1<265.0:
s="Io-A"
if L3>90.0 and L3<200.0:
s="B"
if L3>90.0 and L3<200.0 and U1>75.0 and U1<105.0:
s="Io-B"
if (L3>290.0 or L3<20.0) and U1>225.0 and U1<250.0:
s="Io-C"
if L3>0.0 and L3<200.0 and U1>95.0 and U1<130.0:
s="Io-D"
printout=False
if s != "":
if includeonlyiorelated == True and s[0:2] == "Io":
printout=True
if includeonlyiorelated == False:
printout=True
if printout == True:
predList.append(Prediction(iterDateUTCtime, L3, U1, s, JupiterRise, JupiterSet, SunRise, SunSet))
def ephem2datetime(inputcdt):
resdt = datetime.strptime(inputcdt,'%Y/%m/%d %H:%M:%S')
resdt = resdt.replace(tzinfo=utc)
return resdt
class Prediction(object):
def __init__(self, predts=None, CML3=None, ioPhase=None, source=None, jupRise=None, jupSet=None, sunRise=None, sunSet=None):
self.predts = predts
self.CML3 = CML3
self.ioPhase = ioPhase
self.source = source
self.jupRise = jupRise
self.jupSet = jupSet
self.sunRise = sunRise
self.sunSet = sunSet
def predictStorms(startdate, predictdays, includeonlyiorelatedFlag, calcinterval, modeflag):
global ts, iterDateUTCtime, th, L3, U1, modeset, JupiterRise, JupiterSet, SunRise, SunSet, includeonlyiorelated, predList
modeset = modeflag
includeonlyiorelated = includeonlyiorelatedFlag
planets = load('de421.bsp')
earthPlanet = planets['earth']
inidt = datetime.strptime(startdate, '%Y%m%d')
ts = load.timescale()
initDateUTC = ts.utc(inidt.year, inidt.month, inidt.day, 0.0, 0.0, 0.0 ).utc_datetime()
#print("initDateUTC: ", initDateUTC, type(initDateUTC))
ephemObsPos = ephem.Observer()
if radioConfig.stationLat[-1:] == "S":
lonSign = "-"
else:
lonSign = ""
ephemObsPos.lat = lonSign + radioConfig.stationLat[0:-2]
ephemObsPos.lon = radioConfig.stationLon[0:-2]
ephemObsPos.elev = radioConfig.stationElev
#To get U.S. Naval Astronomical Almanac values, use these settings
ephemObsPos.pressure= 0
# set the horizon that you want to use to one that is exactly 34 arcminutes lower
# than the normal horizon, to match the value by which the Navy reckons that
# an object at the horizon is refracted:
ephemObsPos.horizon = '-0:34'
myposition = earthPlanet + Topos(radioConfig.stationLat, radioConfig.stationLon)
print("pyephem observer position: ", ephemObsPos)
print("myposition: ", myposition)
# 1) the central meridian longitude of Jupiter that faces us
# 2) the position of the inner-most moon Io in its orbit around Jupiter
L3 = 0
U1 = 0
th = 0.0
predList = []
iterDateUTC = initDateUTC
finalDateUTC = initDateUTC + timedelta(days=(predictdays-1))
#external loop on requested days:
while iterDateUTC < finalDateUTC:
ephemObsPos.date = (iterDateUTC + + timedelta(hours=12) ).strftime('%Y-%m-%d %H:%M:%S')
JupiterRise = ephem2datetime(str(ephemObsPos.previous_rising(ephem.Jupiter())))
JupiterSet = ephem2datetime(str(ephemObsPos.next_setting (ephem.Jupiter())))
JupiterNextRise = ephem2datetime(str(ephemObsPos.next_rising(ephem.Jupiter())))
SunRise = ephem2datetime(str(ephemObsPos.next_rising(ephem.Sun())))
SunSet = ephem2datetime(str(ephemObsPos.next_setting(ephem.Sun())))
#internal loop within a single day
iterDateUTCtime = iterDateUTC
endOfDayUTCtime = iterDateUTC + timedelta(days=1)
while iterDateUTCtime < endOfDayUTCtime:
#this way i will do the other calcs only when Jupiter is above the local horizon and the Sun is not visible:
if ((iterDateUTCtime > JupiterRise and iterDateUTCtime < JupiterSet) or iterDateUTCtime > JupiterNextRise ) and (iterDateUTCtime < SunRise or iterDateUTCtime > SunSet):
calcforjd()
#calculate again every N minutes:
iterDateUTCtime = iterDateUTCtime + timedelta(minutes=calcinterval)
# since we have covered this day, let's go on with the next one:
iterDateUTC = iterDateUTC + timedelta(days=1)
return predList
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
help_info = """
This script uses takes a TRUST image and extracts the estimated blood T2 from
the superior sagittal sinus. It then applies a number of models to quantify
venous oxygenation.
The script will create:
1) a folder in the TRUST images directory called 'trust'
2) an excel spreadsheet in this folder called 't2_vals.xlsx' that contains the T2 for each acquisition along with uncertainty info
3) an excel spreadsheet in this folder called 'yv_vals.xlsx' that contains the venous oxygenation using mean T2 applied to various quantification models
input:
-t / --trust : the path to the trust, which is a .nii.gz file
this file should have an associated vlabels file in the same folder
(i.e., if your image is trust.nii.gz, there should be a trust_vlabels.xlsx in the same folder)
-c / --hct : the hematocrit as a float between 0 and 1
optional, but if not supplied then only T2 will be quantified, not Yv
-s / --hbs : the hemoglobin s fraction as a float between 0 and 1
optional, but if not supplied then some Yv models can not be used
-h / --help : brings up this helpful information. does not take an argument
"""
import os
import sys
import getopt
import glob
import shutil
import pathlib
two_up = str((pathlib.Path(__file__) / ".." / "..").resolve())
three_up = str((pathlib.Path(__file__) / ".." / ".." / "..").resolve())
four_up = str((pathlib.Path(__file__) / ".." / ".." / ".." / "..").resolve())
sys.path.append(three_up)
import pandas as pd
import numpy as np
import nibabel as nib
from fsl.wrappers import fslreorient2std, flirt, bet, epi_reg, fast, LOAD
import helpers.registration as regi
from helpers.conversion import parrec_to_nifti, unpack_dims
from helpers.general import read_xfm
####
SUBJECTS_DIR = os.environ['SUBJECTS_DIR']
subjid = 'default_subjid'
run_fs = 1
inp = sys.argv
bash_input = inp[1:]
options, remainder = getopt.getopt(bash_input, "t:c:s:h", ["trust=", 'hct=', 'hbs=', 'help'])
hct = None
hbs = None
for opt, arg in options:
if opt in ('-t', '--trust'):
trust_file = arg
if opt in ('-c', '--hct'):
hct = float(arg)
if opt in ('-s', '--hbs'):
hbs = float(arg)
elif opt in ('-h', '--help'):
print(help_info)
sys.exit()
assert os.path.isfile(trust_file)
trust_base = os.path.basename(os.path.normpath(trust_file))
trust_dir = os.path.dirname(os.path.normpath(trust_file))
trust_core = trust_base.split('.')[0]
trust_vlabels_base = f'{trust_core}_vlabels.xlsx'
trust_vlabels_loc = os.path.join(trust_dir, trust_vlabels_base)
trust_loaded = nib.load(trust_file)
vlabels_loaded = pd.read_excel(trust_vlabels_loc)
trust_unpacked = XXX
|
from json import JsonObject
from draftHost import models
import fantasy, college
class JsonNflPosition(JsonObject):
pass # No extra fields needed
class JsonNflPlayer(JsonObject):
fields = ['first_name', 'last_name']
functions = ['team', 'college',
'nfl_position', 'fantasy_position',
'fantasy_team', 'draft_year']
show_fantasy_team = False
draft = None
def get_team(self):
if self.db_object.team.name != "Unknown":
return JsonNflTeam(self.db_object.team).json_dict()
def get_nfl_position(self):
return JsonNflPosition(self.db_object.position).json_dict()
def get_fantasy_position(self):
return self.db_object.fantasy_position.position.abbreviation
def get_college(self):
return college.JsonCollege(self.db_object.school).json_dict()
def get_fantasy_team(self):
if not self.draft:
return False
selections = models.FantasySelection.objects.filter(
player=self.db_object,
player__fantasyselection__draft_pick__fantasy_team__draft=self.draft
)
if not selections:
return False
fantasy_team = selections[0].draft_pick.fantasy_team
json_team = fantasy.JsonFantasyTeam(fantasy_team)
# Only want the stub team info, shut off the other fields
json_team.show_picks = False
json_team.show_selections = False
return json_team.json_dict()
def get_draft_year(self):
"""Return the draft year only if it's valid"""
if self.db_object.draft_year > 1:
return self.db_object.draft_year
return None
class JsonNflTeam(JsonObject):
fields = ['city',]
functions = ['division', 'players',]
show_players = False
def get_division(self):
return JsonNflDivision(self.db_object.division).json_dict()
def get_players(self):
players = models.NflPlayer.objects.filter(team=self.db_object)
players_json = []
for p in players:
json = JsonNflPlayer(p)
json.show_team = False
players_json.append(json.json_dict())
return players_json
class JsonNflDivision(JsonObject):
functions = ['conference',]
def get_conference(self):
return JsonNflConference(self.db_object.conference).json_dict()
class JsonNflConference(JsonObject):
pass # No extra fields needed
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 10 20:14:07 2021
@author: jorge
"""
from PIL import Image
import matplotlib.pyplot as plt
im1 = Image.open('D:/jorge/Documents/ISPRS/Potsdam/1_DSM/dsm_potsdam_02_10.tif')
width = 2560
height = 2560
im2 = im1.resize((width, height), Image.NEAREST)
im3 = im1.resize((width, height), Image.BILINEAR)
im4 = im1.resize((width, height), Image.BICUBIC)
im5 = im1.resize((width, height), Image.ANTIALIAS)
plt.imshow(im1)
|
from util.module import make_class_instance
from .road import Road
from .curve_builder import CurveBuilder
from .curve_resolver import CurveResolver
class RoadBuilder:
def __init__(self):
self.links = set()
self.lanes = set()
self.curve_builders = {}
self.auto_id_counter = 0
def gen_id(self):
id = '#' + str(self.auto_id_counter)
self.auto_id_counter = self.auto_id_counter + 1
return id
def ensure_id(self, id):
return self.gen_id() if id is None else id
def link(self, from_id, to_id):
self.links.add((from_id, to_id))
return self
def lane(self, from_id, to_id):
self.lanes.add((from_id, to_id))
return self
def curve(self, id=None):
id = self.ensure_id(id)
builder = CurveBuilder()
builder.set_id(id)
self.curve_builders[id] = builder
return builder
def load_and_build(self, road_module_or_file, params = {}):
road_module = road_module_or_file
module_actual_name, road_inst = make_class_instance('py_roads', road_module, 'Road')
road_inst._build(self, params)
road = self.build()
road.description = road_inst.get_description()
road.asciiart = road_inst.get_asciiart()
road.param_descriptions = road_inst.get_param_descriptions()
road.params = params
road.source = module_actual_name
return road
def build(self):
# Load curves, unresolved
road = Road()
self.curves = []
self.curve_table = {}
unresolved_curves = []
# _print_road_ast(road_string)
curve_table = {}
for k, curve_builder in self.curve_builders.items():
new_curve = CurveResolver(curve_builder)
unresolved_curves.append(new_curve)
curve_table[new_curve.id] = new_curve
# Try to iteratively resolve curves, while some resolutions happen
resolved_curves = []
did_resolve_some_curves = True
while len(unresolved_curves) != 0 and did_resolve_some_curves:
resolved_curve_indices = []
for i, unresolved_curve in enumerate(unresolved_curves):
if unresolved_curve.resolve(curve_table) is not None:
resolved_curve_indices.append(i)
did_resolve_some_curves = len(resolved_curve_indices) > 0
for i in reversed(resolved_curve_indices):
resolved_curves.append(unresolved_curves[i].get_resolved())
unresolved_curves.pop(i)
# Raise exception if not all curves could be resolved
if len(unresolved_curves) != 0:
raise Exception(f"Failed to resolve loaded road"
+ f"\n Resolved: {[x.id for x in resolved_curves]}"
+ f"\n Unresolved: {[x.id for x in unresolved_curves]}")
road.curves = resolved_curves
for curve in road.curves:
if curve.id is not None:
road.curve_table[curve.id] = curve
# Link curves
for link in self.links:
link_from, link_to = link[0], link[1]
if link_from not in road.curve_table:
raise Exception('Link from inexistent source: {}'.format(
link_from))
if link_to not in road.curve_table:
raise Exception('Link to inexistent target: {}'.format(
link_to))
curve_from = road.curve_table[link_from]
curve_to = road.curve_table[link_to]
curve_from.add_outgoing_curve(curve_to)
curve_to.add_incoming_curve(curve_from)
# Lane curves
for link in self.lanes:
link_from, link_to = link[0], link[1]
if link_from not in road.curve_table:
raise Exception('Lane from inexistent source: {}'.format(
link_from))
if link_to not in road.curve_table:
raise Exception('Lane to inexistent target: {}'.format(
link_to))
curve_from = road.curve_table[link_from]
curve_to = road.curve_table[link_to]
curve_from.add_outgoing_lane_curve(curve_to)
curve_to.add_incoming_lane_curve(curve_from)
return road
|
# definitions de fonctions
def aire_carre(cote):
aire = cote * cote
return aire
def aire_rectangle(hauteur, largeur):
aire = hauteur * largeur
return aire
def aire_carre2(cote):
aire = aire_rectangle(cote, cote)
return aire
def aire_rectangle2(hauteur, largeur=None):
if largeur is None or largeur == hauteur:
aire = aire_carre(hauteur)
else:
aire = hauteur * largeur
return aire
def coords_carre(cote, x0y0):
x0, y0 = x0y0
L = [(x0, y0)]
L.append((x0+cote, y0))
L.append((x0+cote, y0+cote))
L.append((x0, y0+cote))
return L
# appel de fonctions
print(aire_carre(4))
print(aire_rectangle(3, 2))
print(aire_carre2(4))
print(aire_rectangle2(4, 2))
print(aire_rectangle2(4, 4))
print(aire_rectangle2(4))
print(coords_carre(1, (0, 0)))
print(coords_carre(4, (-2, 3)))
|
# ==BEGIN LICENSE==
#
# MIT License
#
# Copyright (c) 2018 SRI Lab, ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ==END LICENSE==
from abc import ABC, abstractmethod
import tensorflow as tf
import numpy as np
from typing import List
from dpfinder.utils.tf.tf_wrapper import TensorFlowWrapper
from dpfinder.logging import logger
# precision
precision_tf = tf.float64
precision_np = np.float64
class State:
def __init__(self, a, b, d, o, est_a, est_b, pas, pbs, eps):
self.a = a
self.b = b
self.d = d
self.o = o
self.est_a = est_a
self.est_b = est_b
self.pas = pas
self.pbs = pbs
self.eps = eps
def __repr__(self):
ret = "\ta:\t{}\n\tb:\t{}\n\td:\t{}\n\to:\t{}\n\teps:\t{}\n\tpa/pb:\t{}/{}".format(
self.a, self.b, self.d, self.o, self.eps, self.est_a, self.est_b)
return ret
def __eq__(self, other):
return self.__dict__ == other.__dict__
def get_list(self):
return [self.a, self.b, self.d, self.o, self.est_a, self.est_b, self.pas, self.pbs, self.eps]
class TensorFlowImplementation(ABC):
a_var = None; b_var = None; d_var = None; o_var = None
randomness_placeholders = None; n_samples_placeholder = None
est_a = None; est_b = None; pas = None; pbs = None
eps = None; loss = None
randomness = None; n_samples = None
tf_wrapper = None
def __init__(self, alg_name, input_shape, d_shape, output_shape, output_dtype):
self.alg_name = alg_name
self.input_shape = input_shape
self.output_shape = output_shape
self.d_shape = d_shape
self.output_dtype = output_dtype
self.o_var = None
@abstractmethod
def get_randomness(self, n_samples):
"""
:param n_samples:
:return: a dictionary, where keys are the placeholders from prepare_randomness_placeholders,
and values hold the randomness necessary to run the algorithm n_samples times
"""
pass
@abstractmethod
def estimate_internal(self, input, output):
"""
:param input:
:param output:
:return: the result for all n_samples runs of the checker function for input and output
"""
pass
@abstractmethod
def prepare_randomness_placeholders(self):
"""
prepare the tensorflow placeholders that hold the randomness needed to run the algorithm
:return:
"""
pass
@abstractmethod
def get_var_to_bounds(self, a, d, o):
"""
Get bounds for the variables limiting, e.g., the distance d between databases
"""
pass
def get_inequalities(self, a, d, o) -> List:
return []
@abstractmethod
def get_b(self, a, d) -> List:
"""
:param a: original database
:param d: distance
:return: the neighbouring database b from a and d
"""
pass
def build_fresh_graph(self):
# build graph
logger.info("Started building graph for algorithm")
self.tf_wrapper = TensorFlowWrapper(self.alg_name)
self.tf_wrapper.build_fresh_graph('eps', self.build_graph_internal)
logger.info("Finished building graph for algorithm")
def build_graph_internal(self):
"""Called internally upon construction. Do not call externally"""
# create tensors
self.a_var = tf.get_variable("a", shape=self.input_shape, dtype=precision_tf)
self.d_var = tf.get_variable("d", shape=self.d_shape, dtype=precision_tf)
self.o_var = tf.get_variable("o", shape=self.output_shape, trainable=False, dtype=self.output_dtype)
self.b_var = self.get_b(self.a_var, self.d_var)
# create placeholders
self.n_samples_placeholder = tf.placeholder(precision_tf)
self.prepare_randomness_placeholders()
# build network
with tf.name_scope("log-estimate-a"):
self.est_a, self.pas = self.estimate(self.a_var, self.o_var)
log_est_a = tf.log(self.est_a)
with tf.name_scope("log-estimate-b"):
self.est_b, self.pbs = self.estimate(self.b_var, self.o_var)
log_est_b = tf.log(self.est_b)
with tf.name_scope("eps"):
self.eps = tf.abs(log_est_a - log_est_b)
with tf.name_scope("loss"):
self.loss = -self.eps
return self.eps
def estimate(self, input, output):
"""
:param input:
:param output:
:return: an estimate of the P[P(input)=output] averaging over the probability estimates using the entries in randomness
"""
p = self.estimate_internal(input, output)
with tf.name_scope("prop-estimate"):
ret = tf.reduce_mean(p)
return ret, p
def fresh_randomness(self, n_samples):
self.n_samples = n_samples
self.randomness = self.get_randomness(n_samples)
def initialize(self, a_init, d_init, o_init):
vars_dict = {self.a_var: a_init, self.d_var: d_init, self.o_var: o_init}
vars_dict = {var: tf.constant(value) for var, value in vars_dict.items()}
feed_dict = self.get_feed_dict()
self.tf_wrapper.initialize(vars_dict, feed_dict)
def get_feed_dict(self):
return {**self.randomness, self.n_samples_placeholder: self.n_samples}
def run(self, x):
return self.tf_wrapper.run(x, self.get_feed_dict())
def run_all(self):
fetches = State(self.a_var, self.b_var, self.d_var, self.o_var,
self.est_a, self.est_b, self.pas, self.pbs, self.eps).get_list()
ret = self.run(fetches)
return State(*ret)
def close(self):
self.tf_wrapper.close()
def get_optimizer(self, n_opt_steps, min_p):
var_to_bounds = self.get_var_to_bounds(self.a_var, self.d_var, self.o_var)
inequalities = [self.est_a - min_p] + self.get_inequalities(self.a_var, self.d_var, self.o_var)
optimizer = self.tf_wrapper.get_optimizer(self.loss, n_opt_steps, var_to_bounds, inequalities)
return optimizer
def minimize(self, optimizer):
self.tf_wrapper.minimize(optimizer, self.get_feed_dict())
|
from crypt import methods
from lib2to3.pgen2.token import EQUAL
from flask import render_template, Blueprint, request
from conexao import start_connection_db, close_connection_db
cadastro = Blueprint('cadastro',__name__,static_folder='static',template_folder='templates')
@cadastro.route('', methods=['GET','POST'])
def cadastrar():
print(request.method)
if request.method == 'GET':
return render_template('cadastro.html')
else:
nome = request.form['nome']
email = request.form['email']
pass1 = request.form['pass1']
print('Dados',nome,email,pass1)
cursor, conexao = start_connection_db()
cursor.execute(f'INSERT INTO usuario(nome_usuario,email,palavra_passe) VALUES(%s, %s, %s)', (nome,email,pass1))
conexao.commit()
close_connection_db(cursor,conexao)
return render_template('login.html')
|
#!/usr/bin/env python3
# Copyright (C) 2020-2021 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Tests for the `btclib.sig_hash` module.
test vector at https://github.com/bitcoin/bitcoin/blob/master/src/test/data/sighash.json
"""
import json
from os import path
from btclib.ecc import dsa
from btclib.script import sig_hash
from btclib.script.script import serialize
from btclib.tx.tx import Tx
from btclib.tx.tx_in import OutPoint, TxIn
from btclib.tx.tx_out import TxOut
# block 170
def test_first_transaction() -> None:
tx_bytes = "0100000001c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704000000004847304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff0200ca9a3b00000000434104ae1a62fe09c5f51b13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1baded5c72a704f7e6cd84cac00286bee0000000043410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac00000000"
tx = Tx.parse(tx_bytes)
utxo = TxOut(
value=5000000000,
script_pub_key=bytes.fromhex(
"410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"
),
)
hash_ = sig_hash.from_tx([utxo], tx, 0, sig_hash.ALL)
assert hash_ == bytes.fromhex(
"7a05c6145f10101e9d6325494245adf1297d80f8f38d4d576d57cdba220bcb19"
)
pub_key = "0411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3"
signature = "304402204E45E16932B8AF514961A1D3A1A25FDF3F4F7732E9D624C6C61548AB5FB8CD410220181522EC8ECA07DE4860A4ACDD12909D831CC56CBBAC4622082221A8768D1D0901"
assert dsa.verify_(hash_, pub_key, bytes.fromhex(signature)[:-1])
# 8fea2a92db2940ebce62610b162bfe0ca13229e08cb384a886a6f677e2812e52
def test_legacy_p2pkh() -> None:
pub_key = "04280c8f66bf2ccaeb3f60a19ad4a06365f8bd6178aab0e709df2173df8f553366549aec336aae8742a84702b6c7c3052d89f5d76d535ec3716e72187956351613"
signature = "3045022100ea43c4800d1a860ec89b5273898a146cfb01d34ff4c364d24a110c480d0e3f7502201c82735577f932f1ca8e1c54bf653e0f8e74e408fe83666bc85cac4472ec950801"
script_sig = serialize([signature, pub_key])
out_point = OutPoint(
"d8343a35ba951684f2969eafe833d9e6fe436557b9707ae76802875952e860fc", 1
)
tx_in = TxIn(out_point, script_sig, 0xFFFFFFFF)
tx_out1 = TxOut(
2017682, bytes.fromhex("76a91413bd20236d0da56492c325dce289b4da35b4b5bd88ac")
)
tx_out2 = TxOut(
1049154982, bytes.fromhex("76a914da169b45781ca210f8c11617ba66bd843da76b1688ac")
)
tx = Tx(1, 0, [tx_in], [tx_out1, tx_out2])
script_pub_key = serialize(
[
"OP_DUP",
"OP_HASH160",
"82ac30f58baf99ec9d14e6181eee076f4e27f69c",
"OP_EQUALVERIFY",
"OP_CHECKSIG",
]
)
utxo = TxOut(1051173696, script_pub_key)
hash_ = sig_hash.from_tx([utxo], tx, 0, sig_hash.ALL)
assert dsa.verify_(hash_, pub_key, bytes.fromhex(signature)[:-1])
# the following tests are taken from python-bitcoinlib tests
def test_p2pk() -> None:
pub_key = "0479BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8"
signature = "304402200A5C6163F07B8D3B013C4D1D6DBA25E780B39658D79BA37AF7057A3B7F15FFA102201FD9B4EAA9943F734928B99A83592C2E7BF342EA2680F6A2BB705167966B742001"
script_pub_key = serialize([pub_key, "OP_CHECKSIG"])
script_sig = serialize([signature])
founding_tx_script = serialize(["OP_0", "OP_0"])
tx_in = TxIn(OutPoint(b"\x00" * 32, 0xFFFFFFFF), founding_tx_script, 0xFFFFFFFF)
funding_tx = Tx(1, 0, [tx_in], [TxOut(0, script_pub_key)])
tx_in = TxIn(OutPoint(funding_tx.id, 0), script_sig, 0xFFFFFFFF)
receiving_tx = Tx(1, 0, [tx_in], [TxOut(0, b"")])
hash_ = sig_hash.from_tx(funding_tx.vout, receiving_tx, 0, sig_hash.ALL)
assert dsa.verify_(hash_, pub_key, bytes.fromhex(signature)[:-1])
def test_p2pkh() -> None:
pub_key = "038282263212C609D9EA2A6E3E172DE238D8C39CABD5AC1CA10646E23FD5F51508"
signature = "304402206E05A6FE23C59196FFE176C9DDC31E73A9885638F9D1328D47C0C703863B8876022076FEB53811AA5B04E0E79F938EB19906CC5E67548BC555A8E8B8B0FC603D840C01"
script_pub_key = serialize(
[
"OP_DUP",
"OP_HASH160",
"1018853670F9F3B0582C5B9EE8CE93764AC32B93",
"OP_EQUALVERIFY",
"OP_CHECKSIG",
]
)
script_sig = serialize([signature, pub_key])
founding_tx_script = serialize(["OP_0", "OP_0"])
tx_in = TxIn(OutPoint(b"\x00" * 32, 0xFFFFFFFF), founding_tx_script, 0xFFFFFFFF)
funding_tx = Tx(1, 0, [tx_in], [TxOut(0, script_pub_key)])
tx_in = TxIn(OutPoint(funding_tx.id, 0), script_sig, 0xFFFFFFFF)
receiving_tx = Tx(1, 0, [tx_in], [TxOut(0, b"")])
hash_ = sig_hash.from_tx(funding_tx.vout, receiving_tx, 0, sig_hash.ALL)
assert dsa.verify_(hash_, pub_key, bytes.fromhex(signature)[:-1])
def test_p2pk_anyonecanpay() -> None:
pub_key = "048282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f5150811f8a8098557dfe45e8256e830b60ace62d613ac2f7b17bed31b6eaff6e26caf"
signature = "304402204710a85181663b32d25c70ec2bbd14adff5ddfff6cb50d09e155ef5f541fc86c0220056b0cc949be9386ecc5f6c2ac0493269031dbb185781db90171b54ac127790281"
script_pub_key = serialize([pub_key, "OP_CHECKSIG"])
script_sig = serialize([signature])
founding_tx_script = serialize(["OP_0", "OP_0"])
tx_in = TxIn(OutPoint(b"\x00" * 32, 0xFFFFFFFF), founding_tx_script, 0xFFFFFFFF)
funding_tx = Tx(1, 0, [tx_in], [TxOut(0, script_pub_key)])
tx_in = TxIn(OutPoint(funding_tx.id, 0), script_sig, 0xFFFFFFFF)
receiving_tx = Tx(1, 0, [tx_in], [TxOut(0, b"")])
hash_ = sig_hash.from_tx(
funding_tx.vout, receiving_tx, 0, sig_hash.ANYONECANPAY | sig_hash.ALL
)
assert dsa.verify_(hash_, pub_key, bytes.fromhex(signature)[:-1])
def test_sig_hashsingle_bug() -> None:
pub_key = "02D5C25ADB51B61339D2B05315791E21BBE80EA470A49DB0135720983C905AACE0"
signature = "3045022100C9CDD08798A28AF9D1BAF44A6C77BCC7E279F47DC487C8C899911BC48FEAFFCC0220503C5C50AE3998A733263C5C0F7061B483E2B56C4C41B456E7D2F5A78A74C07703"
script_pub_key = serialize(
[
"OP_DUP",
"OP_HASH160",
"5b6462475454710f3c22f5fdf0b40704c92f25c3",
"OP_EQUALVERIFY",
"OP_CHECKSIGVERIFY",
"OP_1",
]
)
utxo = TxOut(0, script_pub_key)
tx_bytes = "01000000020002000000000000000000000000000000000000000000000000000000000000000000000151ffffffff0001000000000000000000000000000000000000000000000000000000000000000000006b483045022100c9cdd08798a28af9d1baf44a6c77bcc7e279f47dc487c8c899911bc48feaffcc0220503c5c50ae3998a733263c5c0f7061b483e2b56c4c41b456e7d2f5a78a74c077032102d5c25adb51b61339d2b05315791e21bbe80ea470a49db0135720983c905aace0ffffffff010000000000000000015100000000"
tx = Tx.parse(tx_bytes)
hash_ = sig_hash.from_tx([TxOut(0, ""), utxo], tx, 1, sig_hash.SINGLE)
assert dsa.verify_(hash_, pub_key, bytes.fromhex(signature)[:-1])
def test_test_vectors() -> None:
fname = "sig_hash_legacy_test_vectors.json"
filename = path.join(path.dirname(__file__), "_data", fname)
with open(filename, "r", encoding="ascii") as file_:
data = json.load(file_)
data = data[1:] # skip column headers
for raw_tx, raw_script, input_index, hash_type, exp_hash in data:
script_ = sig_hash.legacy_script(raw_script)[0]
# FIXME: separate invalid transaction from the valid ones
tx = Tx.parse(raw_tx, check_validity=False)
if hash_type < 0:
hash_type += 0xFFFFFFFF + 1
actual_hash = sig_hash.legacy(script_, tx, input_index, hash_type)
assert actual_hash == bytes.fromhex(exp_hash)[::-1]
|
from flask import Flask, jsonify, request # import objects from the Flask model
from email_service import authenticate, send_message, create_message
app = Flask(__name__) # define app using Flask
@app.route('/', methods=['POST'])
def read_html():
html_message = request.data.decode("UTF-8")
recipient = request.headers.get('recipient')
subject = request.headers.get('subject')
service = authenticate()
send_message(service, 'me',
create_message('me', recipient, subject, html_message))
return "hi"
if __name__ == '__main__':
app.run(debug=True) # run app on port 8080 in debug mode
|
import pytorch_lightning as pl
import torch
import torchmetrics
# LightningModule that receives a PyTorch model as input
class LightningClassifier(pl.LightningModule):
def __init__(self, model, learning_rate, log_accuracy):
super().__init__()
self.log_accuracy = log_accuracy
# Note that the other __init__ parameters will be available as
# self.hparams.argname after calling self.save_hyperparameters below
# The inherited PyTorch module
self.model = model
if hasattr(model, "dropout_proba"):
self.dropout_proba = model.dropout_proba
# Save settings and hyperparameters to the log directory
# but skip the model parameters
self.save_hyperparameters(ignore=["model"])
# Set up attributes for computing the accuracy
self.train_acc = torchmetrics.Accuracy()
self.valid_acc = torchmetrics.Accuracy()
self.test_acc = torchmetrics.Accuracy()
# Defining the forward method is only necessary
# if you want to use a Trainer's .predict() method (optional)
def forward(self, x):
return self.model(x)
# A common forward step to compute the loss and labels
# this is used for training, validation, and testing below
def _shared_step(self, batch):
features, true_labels = batch
logits = self(features)
loss = torch.nn.functional.cross_entropy(logits, true_labels)
predicted_labels = torch.argmax(logits, dim=1)
return loss, true_labels, predicted_labels
def training_step(self, batch, batch_idx):
loss, true_labels, predicted_labels = self._shared_step(batch)
self.log("train_loss", loss)
# Do another forward pass in .eval() mode to compute accuracy
# while accountingfor Dropout, BatchNorm etc. behavior
# during evaluation (inference)
self.model.eval()
with torch.no_grad():
_, true_labels, predicted_labels = self._shared_step(batch)
if self.log_accuracy:
self.train_acc(predicted_labels, true_labels)
self.log("train_acc", self.train_acc, on_epoch=True, on_step=False)
self.model.train()
return loss # this is passed to the optimzer for training
def validation_step(self, batch, batch_idx):
loss, true_labels, predicted_labels = self._shared_step(batch)
self.log("valid_loss", loss)
self.valid_acc(predicted_labels, true_labels)
if self.log_accuracy:
self.log(
"valid_acc",
self.valid_acc,
on_epoch=True,
on_step=False,
prog_bar=True,
)
def test_step(self, batch, batch_idx):
loss, true_labels, predicted_labels = self._shared_step(batch)
self.test_acc(predicted_labels, true_labels)
self.log("test_acc", self.test_acc, on_epoch=True, on_step=False)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
return optimizer
|
# utils/profdata_merge/server.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
# This file contains the server and handler definitions that pass files to
# the merge worker processes.
import SocketServer
import logging
import thread
from main import SERVER_ADDRESS, TESTS_FINISHED_SENTINEL
class ProfdataTCPHandler(SocketServer.StreamRequestHandler):
def report(self, msg):
"""Convenience method for reporting status from the workers."""
logging.info("[ProfdataTCPHandler]: %s" % msg)
def handle(self):
"""Receive a newline-separated list of filenames from a TCP connection
and add them to the shared merge queue, where the workers will
execute llvm-profdata merge commands.
"""
data = self.rfile.read()
self.report("received data (length %d): %s" % (len(data), repr(data)))
# Stop once we receive the sentinel
if data.startswith(TESTS_FINISHED_SENTINEL):
self.report("received sentinel; killing server...")
self.finish()
self.connection.close()
def kill_server(server):
server.shutdown()
# must be killed on another thread, or else deadlock
thread.start_new_thread(kill_server, (self.server,))
else:
# Add all the files to the queue
for f in data.splitlines():
f = f.strip()
if f in self.server.files_merged:
return
self.server.files_merged.add(f)
self.server.file_queue.put(f)
class ProfdataServer(SocketServer.TCPServer, object):
def __init__(self, file_queue):
super(ProfdataServer, self).__init__(SERVER_ADDRESS,
ProfdataTCPHandler)
self.file_queue = file_queue
self.files_merged = set()
|
# Задача 9, Вариант 12
# Создайте игру, в которой компьютер выбирает какое-либо слово, а игрок должен его отгадать.
# Компьютер сообщает игроку, сколько букв в слове, и дает пять попыток узнать, есть ли какая-либо
# буква в слове, причем программа может отвечать только "Да" и "Нет". Вслед за тем игрок должен попробовать отгадать слово.
# Мамедов Р.А.
# 10.05.2016
import random
word = ("слава","победа","память","гордость","парад","салют","праздник")
varik=""
comp=random.choice(word)
quantity=5
attempt=1
print('у вас 5 попыток отгадать слово')
print("Угадайте заданное слово из ниже перечисленных")
print (word)
while varik != comp and quantity > 0:
if quantity == 5 :
if (input("нужны ли Вам подсказки?")) == "да" :
print("Длина заданного слова = :",len(comp))
else :
if quantity <5:
if (input("нужна ли Вам ещё подсказка?")) == "да" :
symbol=input("Назовите букву и я скажу - есть ли она в слове : ")
if symbol in comp :
print("Эта буква присутствует в слове")
else :
print ("буква отсутвует")
quantity=quantity-1
varik=input("Ваш вариант :")
print("Попытка :",attempt)
attempt=attempt+1
if varik==comp :
print("Вы угадали!")
else :
print('Вы проиграли!!! Правильный ответ: ', comp )
input ('Нажмите Enter для выхода')
|
# This file holds the base algorithms which manipulate the graph and do flow simulation
# It avoids dealing with UI and other concerns
from functools import partial
from collections import defaultdict
import tqdm
def check_nodes_equal_height(node_a, node_b, state, settings):
return (
settings.height_map[state.points[0][1] + node_a[1], state.points[0][0] + node_a[0]]
== settings.height_map[state.points[0][1] + node_b[1], state.points[0][0] + node_b[0]]
)
def equal_height_node_merge(state, settings, store_node_movements=True):
# If two nodes on the graph are the same height and adjacent then merge them into one node.
# The reason for this is to allow flow to cross large flat sections
# otherwise the water wouldn't know which wat to flow
from animations import _get_adjacent_nodes
node_merge_operations = []
skip_nodes = set()
node_check_func = partial(check_nodes_equal_height, state=state, settings=settings)
for x in tqdm.tqdm(range(state.selection_pixel_size[0]), desc='Processing rows for equal height merge'):
for y in range(state.selection_pixel_size[1]):
if (x, y) not in skip_nodes:
height = settings.height_map[state.points[0][1] + y, state.points[0][0] + x]
visited, queue = set(), [(x, y)]
while queue:
vertex = queue.pop(0)
if vertex not in visited:
visited.add(vertex)
queue.extend(set(_get_adjacent_nodes(vertex, state, node_check_func)) - visited - skip_nodes)
if visited != {(x, y)}:
node_merge_operations.append(visited)
for node in visited:
skip_nodes.add(node)
if not store_node_movements:
return None, skip_nodes, node_merge_operations
node_movements = {}
for merging_nodes in node_merge_operations:
new_location = (
sum(x for x, y in merging_nodes) / len(merging_nodes),
sum(y for x, y in merging_nodes) / len(merging_nodes),
)
for node in merging_nodes:
node_movements[node] = new_location
return node_movements, skip_nodes, node_merge_operations
def create_graph(node_merge_operations, skip_nodes, non_skip_nodes, state):
from animations import _get_adjacent_nodes
# For every original position
# Lookup the current positions new key
# Loop through the adjacent nodes
# Find the new keys for those adjacent nodes
# Filter out all adjacent nodes which have the same key as the current node
# Add an edge to the adjacent new nodes
graph = defaultdict(list)
new_key = {}
for merging_nodes in tqdm.tqdm(node_merge_operations, desc='Creating new merged nodes'):
sorted_merging_nodes = tuple(sorted(merging_nodes))
for node in merging_nodes:
new_key[node] = sorted_merging_nodes
for node in tqdm.tqdm(sorted(list(skip_nodes) + non_skip_nodes), desc='Processing other nodes'):
node_key = new_key.get(node, (node,))
adjacent_nodes = _get_adjacent_nodes(node, state)
for adjacent_node in adjacent_nodes:
adjacent_node_key = new_key.get(adjacent_node, (adjacent_node,))
if adjacent_node_key != node_key:
graph[node_key].append(adjacent_node_key)
return dict(graph)
def get_height_by_key(key, state):
return state.selected_area_height_map[key[0][1], key[0][0]]
def does_node_touch_border(node, state):
if node[0] == 0:
return True
if node[1] == 0:
return True
if node[0] == state.selection_pixel_size[0] - 1:
return True
if node[1] == state.selection_pixel_size[1] - 1:
return True
return False
def find_low_nodes(graph, state):
low_nodes = []
for node_key, adjacent_nodes in graph.items():
if any(does_node_touch_border(node, state) for node in node_key):
continue
height = get_height_by_key(node_key, state)
for adjacent_node_key in adjacent_nodes:
if height > get_height_by_key(adjacent_node_key, state):
break
else:
low_nodes.append(node_key)
return low_nodes
def calculate_watershed(state, source=None):
if source:
node_flows = defaultdict(float)
node_flows[source] = 1
else:
node_flows = {key: len(key) for key in state.graph}
for node in sorted(state.graph, key=lambda node: get_height_by_key(node, state), reverse=True):
if source is not None and node_flows[source] == 0:
continue
if node_flows[node] == 0:
continue
node_height = get_height_by_key(node, state)
if not any(does_node_touch_border(i, state) for i in node):
outflows = []
for neighbour in state.graph[node]:
neighbour_height = get_height_by_key(neighbour, state)
if neighbour_height < node_height:
outflows.append((neighbour, node_height - neighbour_height))
assert outflows, "Every node should have an outflow direction"
# Only send flow in multiple directions if it's fairly even. Try to avoid tiny fractional flows
largest_outflow_height = max(i[1] for i in outflows)
outflow_height_threshold = largest_outflow_height / 2
total_outflow_height = sum(i[1] for i in outflows if i[1] > outflow_height_threshold)
for neighbour, outflow_height in outflows:
if outflow_height > outflow_height_threshold:
node_flows[neighbour] += node_flows[node] * outflow_height / total_outflow_height
return node_flows, None
def calculate_flow(state, num_cycles, source=None):
if source:
node_flows = {source: 1}
else:
node_flows = {key: len(key) for key in state.graph}
yield node_flows
for i in range(num_cycles):
sorted_nodes = sorted(node_flows, key=lambda node: get_height_by_key(node, state))
for node in sorted_nodes:
node_height = get_height_by_key(node, state)
if not any(does_node_touch_border(i, state) for i in node):
outflows = []
for neighbour in state.graph[node]:
neighbour_height = get_height_by_key(neighbour, state)
if neighbour_height < node_height:
outflows.append((neighbour, node_height - neighbour_height))
assert outflows
total_outflow_height = sum(i[1] for i in outflows)
for neighbour, outflow_height in outflows:
if neighbour in node_flows:
node_flows[neighbour] += node_flows[node] * outflow_height / total_outflow_height
else:
node_flows[neighbour] = node_flows[node] * outflow_height / total_outflow_height
del node_flows[node]
if not node_flows:
break
yield node_flows
|
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from django.http import HttpResponse
from django.views.generic import CreateView, ListView, DetailView, TemplateView
from django.views import View
from .models import Post
from . import forms
# Create your views here.
class ProfileView(TemplateView):
template_name = 'social/profile.html'
class UserDetailView(DetailView):
model = get_user_model()
template_name = 'social/user_detail.html'
context_object_name = 'detail_user'
def get(self, request, pk):
if pk == request.user.pk:
return redirect('profile')
return super(UserDetailView, self).get(request, pk)
class UserListView(ListView):
model = get_user_model()
template_name = 'social/user_list.html'
context_object_name = 'users'
class PostView(DetailView):
model = Post
template_name = 'social/post_detail.html'
comment_model_form = forms.CreateCommentForm
def get_context_data(self, **kwargs):
context = super(PostView, self).get_context_data(**kwargs)
context['comment_form'] = self.comment_model_form
return context
def post(self):
form = self.comment_model_form(data=self.request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = self.request
print(comment.post)
return self.get(self.request)
else:
return self.get(self.request)
class PostDetailView(View):
model = Post
template_name = 'social/post_detail.html'
comment_model_form = forms.CreateCommentForm
def get(self, request, pk):
context = self.get_context_data(pk=pk)
return render(request, self.template_name, context=context)
def post(self, request, pk):
context = self.get_context_data(pk=pk)
form = self.comment_model_form(data=request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = context['post']
comment.save()
return self.get(request, pk)
else:
context['comment_form'] = form
return self.get(request, pk)
def get_context_data(self, **kwargs):
context = {}
context['post'] = get_object_or_404(Post, pk=kwargs['pk'])
context['comment_form'] = self.comment_model_form
context['comments'] = context['post'].comments.all().order_by('-timestamp')
return context
class CreatePostView(CreateView):
model = Post
fields = ['title', 'body']
template_name = "social/create_post.html"
def form_valid(self, form):
post = form.save(commit=False)
post.author = self.request.user
post.save()
return redirect(post.get_absolute_url())
def get_form(self, form_class=None):
form = super().get_form(form_class)
for key in form.fields:
form.fields[key].widget.attrs = {'placeholder': key}
form.fields['title'].widget.attrs['class'] = 'title'
form.fields['body'].widget.attrs['class'] = 'resize'
return form
|
# Given a 2D list, let's call a element "smooth" if index of the
# element in its 1D list plus the element is even. For example,
# given the 2D list [[0,4][2,6]], the 1st element of each of the
# 1D list is considered "smooth" because 0 + 0 is 0 and 0 + 2 is 2
# (both are even numbers). Find the maximum "smooth" element and
# print it. Using the example [[0,4][2,6]] again, the maximum
# "smooth" element is 2 because 2 is bigger than 0.
two_d_list = [[425, 214, 412, 123], [312, 214, 123, 343]]
|
import os
import re
from cort.core.spans import Span
from cort.core.corpora import Corpus
from _collections import defaultdict
from nltk import cluster
import codecs
import subprocess
import shutil
import fileinput
# annotator = 'minh'
annotator = 'cltl'
metrics = 'ceafm'
scorer_path = 'reference-coreference-scorers/v8.01/scorer.pl'
dirs = [
('data/conll-2012-manipulated/mentions_instance_position-dev',
'manual-annotation/%s/mentions_instance_position' %annotator),
('data/conll-2012-manipulated/position_events-dev',
'manual-annotation/%s/position_events' %annotator),
]
def check_text_files(raw_dir, ann_dir):
raw_fnames = set(p for p in os.listdir(raw_dir) if re.search(r'\.txt$', p))
ann_fnames = set(p for p in os.listdir(ann_dir) if re.search(r'\.txt$', p))
assert ann_fnames.issubset(raw_fnames)
for fname in ann_fnames:
with open(os.path.join(ann_dir, fname)) as f: ann_txt = f.read()
with open(os.path.join(raw_dir, fname)) as f: raw_txt = f.read()
assert ann_txt == raw_txt
def build_coref_set(path):
clusters = defaultdict(set)
with open(path) as f:
for line in f:
m = re.match('R\d+\tCoreference\s+\w+:(T\d+)\s+\w+:(T\d+)', line)
if m:
men1, men2 = m.group(1), m.group(2)
c1, c2 = clusters[men1], clusters[men2]
c1.update(c2) # merge into one cluster
for men in c2: # update mapping
clusters[men] = c1
c1.update({men1, men2})
last_set_id = 0
coref = {}
for c in set(tuple(c) for c in clusters.values()):
for men in c:
coref[men] = last_set_id
last_set_id += 1
return coref
def read_mapping_file(path):
mapping = {}
with open(path) as f:
for line in f:
fields = line.strip().split('\t')
mapping[fields[0]] = Span(int(fields[1]), int(fields[2]))
return mapping
def cat(inp_paths, out_path):
''' Similar to Linux's cat command '''
with open(out_path, 'w') as f_out,\
fileinput.input(inp_paths) as f_inp:
for line in f_inp: f_out.write(line)
def create_conll_files(raw_dir, ann_dir):
ann_fnames = (p for p in os.listdir(ann_dir) if re.search(r'\.ann$', p))
conll_files = []
for fname in ann_fnames:
coref = build_coref_set(os.path.join(ann_dir, fname))
mapping_path = os.path.join(raw_dir, re.sub(r'\.ann$', '.mapping', fname))
mapping = read_mapping_file(mapping_path)
coref2 = dict((mapping[key], val) for key, val in coref.items()
if key in mapping)
inp_file = os.path.join(raw_dir, re.sub(r'\.ann$', '', fname))
out_file = os.path.join(ann_dir, re.sub(r'\.ann$', '', fname))
with codecs.open(inp_file, 'r', "utf-8") as f: corpus = Corpus.from_file('', f)
assert len(corpus.documents) == 1
doc = corpus.documents[0]
doc.system_mentions = doc.annotated_mentions
for m in doc.system_mentions:
if m.span in coref2:
m.attributes['set_id'] = str(coref2[m.span])
with codecs.open(out_file, 'w', "utf-8") as f: corpus.write_to_file(f)
conll_files.append((inp_file, out_file))
return conll_files
if __name__ == '__main__':
for raw_dir, ann_dir in dirs:
check_text_files(raw_dir, ann_dir)
paths = create_conll_files(raw_dir, ann_dir)
# for gold, ann in paths:
# cmd = '%s %s %s %s' %(scorer_path, metrics, gold, ann)
# print('\n\n\n%s\n%s' %('='*80, cmd))
# subprocess.run(cmd, shell=True)
gold_paths, ann_paths = zip(*paths)
gold_all_path = 'output/exp-manual-annotation-gold.conll'
ann_all_path = 'output/exp-manual-annotation-ann.conll'
cat(gold_paths, gold_all_path)
cat(ann_paths, ann_all_path)
cmd = '%s %s %s %s' %(scorer_path, metrics, gold_all_path, ann_all_path)
print('\n\n\n%s\n%s' %('='*80, cmd))
subprocess.run(cmd, shell=True)
|
"""Tests for the ``signed_request`` module."""
from datetime import datetime, timedelta
from nose.tools import *
from aiofacepy import SignedRequest
TEST_ACCESS_TOKEN = '181259711925270|1570a553ad6605705d1b7a5f.1-499729129|8XqMRhCWDKtpG-i_zRkHBDSsqqk'
TEST_SIGNED_REQUEST = u'' \
'mnrG8Wc9CH_rh-GCqq97GFAPOh6AY7cMO8IYVKb6Pa4.eyJhbGdvcml0aG0iOi' \
'JITUFDLVNIQTI1NiIsImV4cGlyZXMiOjAsImlzc3VlZF9hdCI6MTMwNjE3OTkw' \
'NCwib2F1dGhfdG9rZW4iOiIxODEyNTk3MTE5MjUyNzB8MTU3MGE1NTNhZDY2MD' \
'U3MDVkMWI3YTVmLjEtNDk5NzI5MTI5fDhYcU1SaENXREt0cEctaV96UmtIQkRT' \
'c3FxayIsInVzZXIiOnsiY291bnRyeSI6Im5vIiwibG9jYWxlIjoiZW5fVVMiLC' \
'JhZ2UiOnsibWluIjoyMX19LCJ1c2VyX2lkIjoiNDk5NzI5MTI5In0'
TEST_SIGNED_REQUEST_UNKNOWN_ALGORITHM = u'' \
'HjPZBDNttKrX_DBxH-fD78wmqP5O7eDcvjE9ToayKb0=.eyJ1c2VyX2lkIjoiN' \
'Dk5NzI5MTI5IiwiYWxnb3JpdGhtIjoiVU5LTk9XTl9BTEdPUklUSE0iLCJleHB' \
'pcmVzIjowLCJvYXV0aF90b2tlbiI6IjE4MTI1OTcxMTkyNTI3MHwxNTcwYTU1M' \
'2FkNjYwNTcwNWQxYjdhNWYuMS00OTk3MjkxMjl8OFhxTVJoQ1dES3RwRy1pX3p' \
'Sa0hCRFNzcXFrIiwidXNlciI6eyJsb2NhbGUiOiJlbl9VUyIsImNvdW50cnkiO' \
'iJubyIsImFnZSI6eyJtYXgiOjk5LCJtaW4iOjIxfX0sImlzc3VlZF9hdCI6MTM' \
'wNjE3OTkwNH0='
TEST_SIGNED_REQUEST_MISSING_PAGE_DATA = u'' \
'9B19RL7tj3nvf_SA8_PSFxTZxc7xA3LEjl2ww-OGRlk=.eyJ1c2VyX2lkIjoiN' \
'Dk5NzI5MTI5IiwiYWxnb3JpdGhtIjoiSE1BQy1TSEEyNTYiLCJleHBpcmVzIjo' \
'wLCJvYXV0aF90b2tlbiI6IjE4MTI1OTcxMTkyNTI3MHwxNTcwYTU1M2FkNjYwN' \
'TcwNWQxYjdhNWYuMS00OTk3MjkxMjl8OFhxTVJoQ1dES3RwRy1pX3pSa0hCRFN' \
'zcXFrIiwidXNlciI6eyJsb2NhbGUiOiJlbl9VUyIsImNvdW50cnkiOiJubyIsI' \
'mFnZSI6eyJtYXgiOjk5LCJtaW4iOjIxfX0sImlzc3VlZF9hdCI6MTMwNjE3OTk' \
'wNCwicGFnZSI6e319'
TEST_FACEBOOK_APPLICATION_SECRET_KEY = '214e4cb484c28c35f18a70a3d735999b'
def test_parse_signed_request():
signed_request = SignedRequest.parse(
signed_request=TEST_SIGNED_REQUEST,
application_secret_key=TEST_FACEBOOK_APPLICATION_SECRET_KEY
)
assert signed_request == {
'user_id': '499729129',
'algorithm': 'HMAC-SHA256',
'expires': 0,
'oauth_token': '181259711925270|1570a553ad6605705d1b7a5f.1-499729129|8XqMRhCWDKtpG-i_zRkHBDSsqqk',
'user': {
'locale': 'en_US',
'country': 'no',
'age': {'min': 21}
},
'issued_at': 1306179904
}
def test_parse_invalid_signed_request():
assert_raises(
SignedRequest.Error,
SignedRequest,
signed_request="<invalid signed request>",
application_secret_key=TEST_FACEBOOK_APPLICATION_SECRET_KEY
)
def test_initialize_signed_request():
signed_request = SignedRequest(
signed_request=TEST_SIGNED_REQUEST,
application_secret_key=TEST_FACEBOOK_APPLICATION_SECRET_KEY
)
assert signed_request.user.id == '499729129'
assert signed_request.user.oauth_token.token == TEST_ACCESS_TOKEN
assert signed_request.user.oauth_token.expires_at is None
assert signed_request.raw == {
'user_id': '499729129',
'algorithm': 'HMAC-SHA256',
'expires': 0,
'oauth_token': '181259711925270|1570a553ad6605705d1b7a5f.1-499729129|8XqMRhCWDKtpG-i_zRkHBDSsqqk',
'user': {
'locale': 'en_US',
'country': 'no',
'age': {'min': 21}
},
'issued_at': 1306179904
}
def test_signed_request_missing_page_data():
try:
SignedRequest(TEST_SIGNED_REQUEST_MISSING_PAGE_DATA, TEST_FACEBOOK_APPLICATION_SECRET_KEY)
except KeyError:
raise AssertionError('Missing page data in signed request')
def test_signed_request_page_url():
page = SignedRequest.Page(id=1)
assert page.url == 'http://facebook.com/1'
def test_signed_request_user_profile_url():
user = SignedRequest.User(id=1)
assert user.profile_url == 'http://facebook.com/1'
def test_signed_request_user_has_authorized_application():
oauth_token = SignedRequest.User.OAuthToken(
token='<token>',
issued_at=datetime.now(),
expires_at=None
)
user = SignedRequest.User(id=1, oauth_token=oauth_token)
assert user.has_authorized_application is True
user = SignedRequest.User(id=1, oauth_token=None)
assert user.has_authorized_application is False
def test_signed_request_user_oauth_token_has_expired():
today = datetime.now()
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
oauth_token = SignedRequest.User.OAuthToken(
token='<token>',
issued_at=yesterday,
expires_at=None,
)
assert oauth_token.has_expired is False
oauth_token = SignedRequest.User.OAuthToken(
token='<token>',
issued_at=yesterday,
expires_at=tomorrow
)
assert oauth_token.has_expired is False
oauth_token = SignedRequest.User.OAuthToken(
token='<token>',
issued_at=yesterday,
expires_at=yesterday
)
assert oauth_token.has_expired is True
def test_generate_signed_request():
signed_request = SignedRequest(
signed_request=TEST_SIGNED_REQUEST,
application_secret_key=TEST_FACEBOOK_APPLICATION_SECRET_KEY
)
signed_request = signed_request.generate()
def test_parse_signed_request_unknown_algorithm():
assert_raises(
SignedRequest.Error,
SignedRequest.parse,
signed_request=TEST_SIGNED_REQUEST_UNKNOWN_ALGORITHM,
application_secret_key=TEST_FACEBOOK_APPLICATION_SECRET_KEY
)
def test_parse_signed_request_incorrect_signature():
encoded_signature, _ = (str(string) for string in TEST_SIGNED_REQUEST_UNKNOWN_ALGORITHM.split('.', 2))
_, encoded_payload = (str(string) for string in TEST_SIGNED_REQUEST.split('.', 2))
assert_raises(
SignedRequest.Error,
SignedRequest.parse,
signed_request=u"%s.%s" % (encoded_signature, encoded_payload),
application_secret_key=TEST_FACEBOOK_APPLICATION_SECRET_KEY
)
|
import inspect
import bokeh
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import glob
import pyspawn
import h5py
from bokeh.io import curdoc
from bokeh.layouts import row, widgetbox
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, TextInput
from bokeh.plotting import figure
import ipywidgets as widgets
from ipywidgets import interactive
from IPython.display import display
from glob import glob
from bokeh.io import curdoc, output_notebook
from bokeh.plotting import figure, show
from bokeh.layouts import widgetbox, row, column
from bokeh.models import Slider, CheckboxGroup
from bokeh.models.glyphs import MultiLine
from bokeh.models import CategoricalColorMapper, ColorMapper
from bokeh.palettes import Spectral11, Paired, Category20
h5filename = "sim.hdf5"
trajfilename = "working.hdf5"
trajfile = h5py.File(trajfilename, "r")
#full_H = trajfile["traj_00"].attrs["full_H"]
#krylov_sub_n = trajfile["traj_00"]["krylov_sub_n"]
h5file = h5py.File(h5filename, "r")
#print h5file["sim"].attrs.keys()
an = pyspawn.fafile("sim.hdf5")
work = pyspawn.fafile("working.hdf5")
# create N.dat and store the data in times and N
an.fill_nuclear_bf_populations(column_filename = "N.dat")
an.fill_trajectory_populations(column_file_prefix = "Pop")
an.fill_labels()
# write files with energy data for each trajectory
an.fill_trajectory_energies(column_file_prefix="E")
labels = an.datasets["labels"]
ntraj = len(an.datasets["labels"])
total_el_pop = an.datasets["el_pop"]
N = an.datasets["nuclear_bf_populations"]
qm_time = an.datasets["quantum_times"]
def print_classes():
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
print(obj)
nstates = 9
arrays = ("poten", "pop", "toten", "aven", "kinen", "time")
for array in arrays:
exec(array +"= dict()")
for traj in an.datasets["labels"]:
poten[traj] = an.datasets[traj + "_poten"]
pop[traj] = an.datasets[traj + "_pop"]
toten[traj] = an.datasets[traj + "_toten"]
aven[traj] = an.datasets[traj +"_aven"]
kinen[traj] = an.datasets[traj + "_kinen"]
time[traj] = an.datasets[traj + "_time"]
mypalette=Category20[nstates]
qm_time_array = []
total_el_pop_array = []
for n in range(nstates):
total_el_pop_array.append(total_el_pop[:,n].tolist())
qm_time_array.append(qm_time.tolist())
data_qm = {'qm_time': qm_time_array,
'total_el_pop':total_el_pop_array,
'colors': mypalette,
'states':[str(n)+ ' state' for n in range(nstates)]}
source_qm = ColumnDataSource(data_qm)
colors = ["red", "green", "blue", "magenta", "yellow", "purple", 'darkmagenta',"darlsalmon", 'gold', 'black']
color_mapper = CategoricalColorMapper(factors=labels, palette=Category20[20])
plot = figure(plot_height=450, plot_width=600, title="Total Energy")
plot2 = figure(plot_height=450, plot_width=600, title="Ehrenfest Energy")
plot3 = figure(plot_height=450, plot_width=600, title="Electronic population")
plot4 = figure(plot_height=450, plot_width=600, title="Energies Test")
times = []
e = []
av_en = []
elec_pop = []
pot_en = []
labels_full = []
labels_array = []
nstates_array = []
for key in labels:
for nstate in range(nstates):
times.append(time[key])
e.append(toten[key])
pot_en.append(poten[key][:,nstate])
av_en.append(aven[key])
elec_pop.append(pop[key])
labels_full.append(key +":"+ str(nstate)+'state'),
labels_array.append(key)
nstates_array.append(str(nstate))
data = {
'time': times,
'tot_e': e,
'av_en': av_en,
'pot_en': pot_en,
'labels' : labels_array,
'labels_full': labels_full,
'elec_pop' : elec_pop,
'states': nstates_array
}
color_mapper_qm = CategoricalColorMapper(factors= [str(n) for n in range(nstates)], palette=Category20[nstates])
source = ColumnDataSource(data)
def update_plot(attr, old, new):
new_labels = []
for n in checkbox.active:
new_labels.append(checkbox.labels[n])
times = []
tot_energies = []
av_en = []
elec_pop = []
pot_en = []
labels_full = []
new_labels_array = []
nstates_array = []
for key in new_labels:
for nstate in range(nstates):
times.append(time[key][:,0].tolist())
tot_energies.append(toten[key][:,0].tolist())
av_en.append(aven[key][:,0].tolist())
pot_en.append(poten[key][:,nstate])
elec_pop.append(pop[key][:,0].tolist())
new_labels_array.append(key),
labels_full.append(key +":"+ str(nstate)+'state')
nstates_array.append(str(nstate))
new_data = {
'time': times,
'tot_e': tot_energies,
'av_en': av_en,
'pot_en': pot_en,
'labels': new_labels_array,
'labels_full': labels_full,
'elec_pop' : elec_pop,
'states': nstates_array
}
source.data = new_data
plot.multi_line(xs='time', ys='tot_e', source=source, color=dict(field='labels', transform=color_mapper), legend='labels', line_width=2)
plot2.multi_line(xs='time', ys='av_en', source=source, color=dict(field='labels', transform=color_mapper), legend='labels', line_width=2)
plot3.multi_line(xs='qm_time', ys='total_el_pop', source=source_qm, line_width=2, color='colors', legend='states')
plot2.legend.location = 'bottom_left'
plot.x_range = plot2.x_range
plot3.x_range = plot.x_range
print 'time shape = ', len(data['time'])
#plot4.multi_line(xs='time', ys='pot_en', source=source, color=dict(field='states', transform=color_mapper_qm))
#print inspect.getmembers(bokeh.models.markers[__name__], inspect.isclass)
# Add the plot to the current document
checkbox = CheckboxGroup(labels=list(labels), active=list([n for n in range(labels.shape[0])]))
checkbox.on_change('active', update_plot)
doc = column(widgetbox(checkbox), plot, plot2, plot3, plot4)
curdoc().add_root(doc)
#show(doc)
|
import os
import sys
def inject_index_html(filename):
with open(filename, "r") as f:
content = f.readlines()
PATTERNS = [
"/assets",
"/_dash",
]
for line_idx in range(len(content)):
for p in PATTERNS:
content[line_idx] = content[line_idx].replace(
p, "/0" + p
)
content[line_idx] = content[line_idx].replace(
'"requests_pathname_prefix": "/"',
'"requests_pathname_prefix": "/0/"'
)
with open(filename, "w") as f:
f.writelines(content)
if __name__ == "__main__":
filename = os.path.join(sys.argv[1], "index.html")
inject_index_html(filename)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.