content stringlengths 5 1.05M |
|---|
r"""
Tools
#####
**Helper Functions**
This module contains a variety of functions for manipulating images in
ways do NOT return a modified version of the original image.
.. currentmodule:: porespy
.. autosummary::
:template: mybase.rst
:toctree: generated/
tools.align_image_with_openpnm
tools.bbox_to_slices
tools.extend_slice
tools.extract_subsection
tools.extract_regions
tools.extract_cylinder
tools.extract_subsection
tools.find_outer_region
tools.get_border
tools.get_planes
tools.insert_cylinder
tools.insert_sphere
tools.in_hull
tools.isolate_object
tools.make_contiguous
tools.marching_map
tools.mesh_region
tools.norm_to_uniform
tools.overlay
tools.ps_ball
tools.ps_disk
tools.ps_rect
tools.ps_round
tools.randomize_colors
tools.recombine
tools.subdivide
tools.unpad
"""
__all__ = [
"align_image_with_openpnm",
"bbox_to_slices",
"extend_slice",
"extract_cylinder",
"extract_subsection",
"extract_regions",
"find_outer_region",
"get_border",
"get_planes",
"insert_cylinder",
"insert_sphere",
"in_hull",
"isolate_object",
"make_contiguous",
"marching_map",
"mesh_region",
"norm_to_uniform",
"overlay",
"ps_ball",
"ps_disk",
"ps_rect",
"ps_round",
"randomize_colors",
"recombine",
"subdivide",
"unpad",
"sanitize_filename",
"get_tqdm",
"show_docstring"]
from ._funcs import align_image_with_openpnm
from ._funcs import bbox_to_slices
from ._funcs import extend_slice
from ._funcs import extract_cylinder
from ._funcs import extract_subsection
from ._funcs import extract_regions
from ._funcs import find_outer_region
from ._funcs import get_border
from ._funcs import get_planes
from ._funcs import insert_cylinder
from ._funcs import insert_sphere
from ._funcs import in_hull
from ._funcs import isolate_object
from ._funcs import marching_map
from ._funcs import make_contiguous
from ._funcs import mesh_region
from ._funcs import norm_to_uniform
from ._funcs import overlay
from ._funcs import randomize_colors
from ._funcs import recombine
from ._funcs import ps_ball
from ._funcs import ps_disk
from ._funcs import ps_rect
from ._funcs import ps_round
from ._funcs import subdivide
from ._utils import sanitize_filename
from ._utils import get_tqdm
from ._utils import show_docstring
from ._utils import Results
from ._funcs import _check_for_singleton_axes
from ._unpad import unpad
|
import pytest
import os
from cclm.models import Embedder
def test_persist_embedder(tmp_path):
emb_dir = tmp_path / ".embedder_test"
emb = Embedder(max_len=5)
emb.save(emb_dir)
print(os.listdir(emb_dir))
emb2 = Embedder(load_from=emb_dir)
assert emb2.max_len == 5, "did not properly persist attribute max_len"
|
# flake8: noqa
from .tests_discoverer import load_tests
|
import sys
import os
import time
from PIL import Image
from pygame import mixer
def resize_image(img):
width, height = img.size
aspect_ratio = height / width
new_width = 120
new_height = int(aspect_ratio * new_width * 0.55)
return img.resize((new_width, new_height))
def image_to_ascii(img):
img = resize_image(img)
pixel_types = ['.', ':', '!', '*', '%', '$', '@', '&', '#', 'S', 'B']
color_codes = [0, 4, 2, 6, 1, 5, 3, 7]
def is_on(pixel): return pixel > 127
ascii_pixels = []
for rgb_pixel, l_pixel in zip(img.convert('RGB').getdata(),
img.convert('L').getdata()):
r, g, b = rgb_pixel
color_index = is_on(r) * 4 + is_on(g) * 2 + is_on(b)
color = f'\033[3{color_codes[color_index]}m'
pixel = pixel_types[l_pixel // 25]
ascii_pixels += [color + pixel]
def lines(pixels, width):
for i in range(0, len(pixels), width):
yield pixels[i: i + width]
width, height = img.size
ascii_image = '\n'.join(''.join(line)
for line in lines(ascii_pixels, width))
return ascii_image
def frames_to_ascii(directory_name):
ascii_images = []
for filename in sorted(os.listdir(directory_name)):
img = Image.open(directory_name + filename)
ascii_images += [image_to_ascii(img)]
return ascii_images
def play_music(video_name):
mixer.init()
mixer.music.load(f'.{video_name}.wav')
mixer.music.play()
def play_video(ascii_frames):
expected_time = time.time()
for image in ascii_frames:
print(image)
expected_time += 0.1
time.sleep(max(expected_time - time.time(), 0))
os.system('clear')
video_name = sys.argv[1]
frames_directory = f'.{video_name}/'
audio_directory = f'.{video_name}.wav'
os.system(f'mkdir {frames_directory}')
os.system(f'ffmpeg -i {video_name} -r 10 {frames_directory}img-%5d.png -loglevel quiet')
os.system(f'ffmpeg -i {video_name} -ac 2 -f wav {audio_directory} -loglevel quiet')
ascii_frames = frames_to_ascii(frames_directory)
play_music(video_name)
play_video(ascii_frames)
|
# Version 0.2.0
# 2009/06/18
# Copyright Michael Foord 2005-2009
# akismet.py
# Python interface to the akismet API
# E-mail fuzzyman@voidspace.org.uk
# http://www.voidspace.org.uk/python/modules.shtml
# http://akismet.com
# Released subject to the BSD License
# See http://www.voidspace.org.uk/python/license.shtml
"""
A python interface to the `Akismet <http://akismet.com>`_ API.
This is a web service for blocking SPAM comments to blogs - or other online
services.
You will need a Wordpress API key, from `wordpress.com <http://wordpress.com>`_.
You should pass in the keyword argument 'agent' to the name of your program,
when you create an Akismet instance. This sets the ``user-agent`` to a useful
value.
The default is : ::
Python Interface by Fuzzyman | akismet.py/0.2.0
Whatever you pass in, will replace the *Python Interface by Fuzzyman* part.
**0.2.0** will change with the version of this interface.
Usage example::
from akismet import Akismet
api = Akismet(agent='Test Script')
# if akismet-apikey.txt is in place,
# the key will automatically be set
# or you can call api.setAPIKey()
#
if api.key is None:
print "No 'akismet-apikey.txt' file."
elif not api.verify_key():
print "The API key is invalid."
else:
# data should be a dictionary of values
# They can all be filled in with defaults
# from a CGI environment
if api.comment_check(comment, data):
print 'This comment is spam.'
else:
print 'This comment is ham.'
"""
import os, sys
from urllib import urlencode
import socket
if hasattr(socket, 'setdefaulttimeout'):
# Set the default timeout on sockets to 5 seconds
socket.setdefaulttimeout(5)
__version__ = '0.2.0'
__all__ = (
'__version__',
'Akismet',
'AkismetError',
'APIKeyError',
)
__author__ = 'Michael Foord <fuzzyman AT voidspace DOT org DOT uk>'
__docformat__ = "restructuredtext en"
user_agent = "%s | akismet.py/%s"
DEFAULTAGENT = 'Python Interface by Fuzzyman/%s'
isfile = os.path.isfile
urllib2 = None
try:
from google.appengine.api import urlfetch
except ImportError:
import urllib2
if urllib2 is None:
def _fetch_url(url, data, headers):
req = urlfetch.fetch(url=url, payload=data, method=urlfetch.POST, headers=headers)
if req.status_code == 200:
return req.content
raise Exception('Could not fetch Akismet URL: %s Response code: %s' %
(url, req.status_code))
else:
def _fetch_url(url, data, headers):
req = urllib2.Request(url, data, headers)
h = urllib2.urlopen(req)
resp = h.read()
return resp
class AkismetError(Exception):
"""Base class for all akismet exceptions."""
class APIKeyError(AkismetError):
"""Invalid API key."""
class Akismet(object):
"""A class for working with the akismet API"""
baseurl = 'rest.akismet.com/1.1/'
def __init__(self, key=None, blog_url=None, agent=None):
"""Automatically calls ``setAPIKey``."""
if agent is None:
agent = DEFAULTAGENT % __version__
self.user_agent = user_agent % (agent, __version__)
self.setAPIKey(key, blog_url)
def _getURL(self):
"""
Fetch the url to make requests to.
This comprises of api key plus the baseurl.
"""
return 'http://%s.%s' % (self.key, self.baseurl)
def _safeRequest(self, url, data, headers):
try:
resp = _fetch_url(url, data, headers)
except Exception, e:
raise AkismetError(str(e))
return resp
def setAPIKey(self, key=None, blog_url=None):
"""
Set the wordpress API key for all transactions.
If you don't specify an explicit API ``key`` and ``blog_url`` it will
attempt to load them from a file called ``akismet-apikey.txt`` in the current
directory.
This method is *usually* called automatically when you create a new
``Akismet`` instance.
"""
if key is None and isfile('akismet-apikey.txt'):
the_file = [l.strip() for l in open('akismet-apikey.txt').readlines()
if l.strip() and not l.strip().startswith('#')]
try:
self.key = the_file[0]
self.blog_url = the_file[1]
except IndexError:
raise APIKeyError("Your 'akismet-apikey.txt' is invalid.")
else:
self.key = key
self.blog_url = blog_url
def verify_key(self):
"""
This equates to the ``verify-key`` call against the akismet API.
It returns ``True`` if the key is valid.
The docs state that you *ought* to call this at the start of the
transaction.
It raises ``APIKeyError`` if you have not yet set an API key.
If the connection to akismet fails, it allows the normal ``HTTPError``
or ``URLError`` to be raised.
(*akismet.py* uses `urllib2 <http://docs.python.org/lib/module-urllib2.html>`_)
"""
if self.key is None:
raise APIKeyError("Your have not set an API key.")
data = { 'key': self.key, 'blog': self.blog_url }
# this function *doesn't* use the key as part of the URL
url = 'http://%sverify-key' % self.baseurl
# we *don't* trap the error here
# so if akismet is down it will raise an HTTPError or URLError
headers = {'User-Agent' : self.user_agent}
resp = self._safeRequest(url, urlencode(data), headers)
if resp.lower() == 'valid':
return True
else:
return False
def _build_data(self, comment, data):
"""
This function builds the data structure required by ``comment_check``,
``submit_spam``, and ``submit_ham``.
It modifies the ``data`` dictionary you give it in place. (and so
doesn't return anything)
It raises an ``AkismetError`` if the user IP or user-agent can't be
worked out.
"""
data['comment_content'] = comment
if not 'user_ip' in data:
try:
val = os.environ['REMOTE_ADDR']
except KeyError:
raise AkismetError("No 'user_ip' supplied")
data['user_ip'] = val
if not 'user_agent' in data:
try:
val = os.environ['HTTP_USER_AGENT']
except KeyError:
raise AkismetError("No 'user_agent' supplied")
data['user_agent'] = val
#
data.setdefault('referrer', os.environ.get('HTTP_REFERER', 'unknown'))
data.setdefault('permalink', '')
data.setdefault('comment_type', 'comment')
data.setdefault('comment_author', '')
data.setdefault('comment_author_email', '')
data.setdefault('comment_author_url', '')
data.setdefault('SERVER_ADDR', os.environ.get('SERVER_ADDR', ''))
data.setdefault('SERVER_ADMIN', os.environ.get('SERVER_ADMIN', ''))
data.setdefault('SERVER_NAME', os.environ.get('SERVER_NAME', ''))
data.setdefault('SERVER_PORT', os.environ.get('SERVER_PORT', ''))
data.setdefault('SERVER_SIGNATURE', os.environ.get('SERVER_SIGNATURE',
''))
data.setdefault('SERVER_SOFTWARE', os.environ.get('SERVER_SOFTWARE',
''))
data.setdefault('HTTP_ACCEPT', os.environ.get('HTTP_ACCEPT', ''))
data.setdefault('blog', self.blog_url)
def comment_check(self, comment, data=None, build_data=True, DEBUG=False):
"""
This is the function that checks comments.
It returns ``True`` for spam and ``False`` for ham.
If you set ``DEBUG=True`` then it will return the text of the response,
instead of the ``True`` or ``False`` object.
It raises ``APIKeyError`` if you have not yet set an API key.
If the connection to Akismet fails then the ``HTTPError`` or
``URLError`` will be propogated.
As a minimum it requires the body of the comment. This is the
``comment`` argument.
Akismet requires some other arguments, and allows some optional ones.
The more information you give it, the more likely it is to be able to
make an accurate diagnosise.
You supply these values using a mapping object (dictionary) as the
``data`` argument.
If ``build_data`` is ``True`` (the default), then *akismet.py* will
attempt to fill in as much information as possible, using default
values where necessary. This is particularly useful for programs
running in a {acro;CGI} environment. A lot of useful information
can be supplied from evironment variables (``os.environ``). See below.
You *only* need supply values for which you don't want defaults filled
in for. All values must be strings.
There are a few required values. If they are not supplied, and
defaults can't be worked out, then an ``AkismetError`` is raised.
If you set ``build_data=False`` and a required value is missing an
``AkismetError`` will also be raised.
The normal values (and defaults) are as follows : ::
'user_ip': os.environ['REMOTE_ADDR'] (*)
'user_agent': os.environ['HTTP_USER_AGENT'] (*)
'referrer': os.environ.get('HTTP_REFERER', 'unknown') [#]_
'permalink': ''
'comment_type': 'comment' [#]_
'comment_author': ''
'comment_author_email': ''
'comment_author_url': ''
'SERVER_ADDR': os.environ.get('SERVER_ADDR', '')
'SERVER_ADMIN': os.environ.get('SERVER_ADMIN', '')
'SERVER_NAME': os.environ.get('SERVER_NAME', '')
'SERVER_PORT': os.environ.get('SERVER_PORT', '')
'SERVER_SIGNATURE': os.environ.get('SERVER_SIGNATURE', '')
'SERVER_SOFTWARE': os.environ.get('SERVER_SOFTWARE', '')
'HTTP_ACCEPT': os.environ.get('HTTP_ACCEPT', '')
(*) Required values
You may supply as many additional 'HTTP_*' type values as you wish.
These should correspond to the http headers sent with the request.
.. [#] Note the spelling "referrer". This is a required value by the
akismet api - however, referrer information is not always
supplied by the browser or server. In fact the HTTP protocol
forbids relying on referrer information for functionality in
programs.
.. [#] The `API docs <http://akismet.com/development/api/>`_ state that this value
can be " *blank, comment, trackback, pingback, or a made up value*
*like 'registration'* ".
"""
if self.key is None:
raise APIKeyError("Your have not set an API key.")
if data is None:
data = {}
if build_data:
self._build_data(comment, data)
if 'blog' not in data:
data['blog'] = self.blog_url
url = '%scomment-check' % self._getURL()
# we *don't* trap the error here
# so if akismet is down it will raise an HTTPError or URLError
headers = {'User-Agent' : self.user_agent}
resp = self._safeRequest(url, urlencode(data), headers)
if DEBUG:
return resp
resp = resp.lower()
if resp == 'true':
return True
elif resp == 'false':
return False
else:
# NOTE: Happens when you get a 'howdy wilbur' response !
raise AkismetError('missing required argument.')
def submit_spam(self, comment, data=None, build_data=True):
"""
This function is used to tell akismet that a comment it marked as ham,
is really spam.
It takes all the same arguments as ``comment_check``, except for
*DEBUG*.
"""
if self.key is None:
raise APIKeyError("Your have not set an API key.")
if data is None:
data = {}
if build_data:
self._build_data(comment, data)
url = '%ssubmit-spam' % self._getURL()
# we *don't* trap the error here
# so if akismet is down it will raise an HTTPError or URLError
headers = {'User-Agent' : self.user_agent}
self._safeRequest(url, urlencode(data), headers)
def submit_ham(self, comment, data=None, build_data=True):
"""
This function is used to tell akismet that a comment it marked as spam,
is really ham.
It takes all the same arguments as ``comment_check``, except for
*DEBUG*.
"""
if self.key is None:
raise APIKeyError("Your have not set an API key.")
if data is None:
data = {}
if build_data:
self._build_data(comment, data)
url = '%ssubmit-ham' % self._getURL()
# we *don't* trap the error here
# so if akismet is down it will raise an HTTPError or URLError
headers = {'User-Agent' : self.user_agent}
self._safeRequest(url, urlencode(data), headers)
|
labels = {
"X": [
"Q_AC_OFFICE",
"Q_HEAT_OFFICE",
"Q_PEOPLE",
"Q_EQP",
"Q_LIGHT",
"Q_AHU_C",
"Q_AHU_H",
"T_INT_OFFICE"
],
"Z": [
"ac_t_conf",
"ac_t_red",
"ac_mask",
"heat_t_conf",
"heat_t_red",
"heat_mask",
"ventilation_t",
"ventilation_vol",
"ventilation_mask",
"occupancy",
"pc_on_mask",
"DNI",
"IBEAM_H",
"IBEAM_N",
"IDIFF_H",
"IGLOB_H",
"RHUM",
"TAMB"
],
"R": [
"airchange_infiltration_vol_per_h",
"capacitance_kJ_perdegreK_perm3",
"power_VCV_kW_heat",
"power_VCV_kW_clim",
"nb_occupants",
"nb_PCs",
"facade_1_thickness_2",
"facade_1_window_area_percent",
"facade_2_thickness_2",
"facade_2_window_area_percent",
"facade_3_thickness_2",
"facade_3_window_area_percent",
"facade_4_thickness_2",
"facade_4_window_area_percent",
"roof_thickness_2",
"ground_thickness_2",
"init_day",
"init_month",
"init_year"
]
} |
""" Convenience functions to find the database and other system locations
without the user having to specify full paths.
"""
# std
import os
import collections
import datetime
import pathlib
from functools import lru_cache
import shutil
from typing import Union, Optional
# ours
from ankipandas.util.log import log
@lru_cache(32)
def _find_db(
search_path,
maxdepth=6,
filename="collection.anki2",
break_on_first=False,
user: Optional[str] = None,
):
"""
Like find_database but only for one search path at a time. Also doesn't
raise any error, even if the search path doesn't exist.
Args:
search_path:
maxdepth: Maximum depth relative to search_path
filename:
break_on_first: Break on first search result
user: Only search for this user
Returns:
collection.defaultdict({user: [list of results]})
"""
search_path = pathlib.Path(search_path)
if not search_path.exists():
log.debug(
"_find_db: Search path '{}' does not "
"exist.".format(str(search_path))
)
return collections.defaultdict(list)
if search_path.is_file():
if search_path.name == filename:
return collections.defaultdict(
list, {search_path.parent.name: [search_path]}
)
else:
log.warning(
"_find_db: Search path '{}' is a file, but filename does not "
"match that of '{}'.".format(str(search_path), filename)
)
return collections.defaultdict(list)
found = collections.defaultdict(list)
for root, dirs, files in os.walk(str(search_path)):
if filename in files:
_user = os.path.basename(root)
if user and not _user == user:
continue
found[_user].append(pathlib.Path(root) / filename)
if break_on_first:
log.debug("_find_db: Breaking after first hit.")
break
depth = len(pathlib.Path(root).relative_to(search_path).parts)
if maxdepth and depth >= maxdepth:
# log.debug(
# "_find_db: Abort search at '{}'. "
# "Max depth exceeded.".format(str(root))
# )
del dirs[:]
return found
@lru_cache(32)
def find_db(
search_paths=None,
maxdepth=8,
filename="collection.anki2",
user=None,
break_on_first=True,
) -> pathlib.Path:
"""
Find path to anki2 database.
Args:
search_paths: Search path as string or pathlib object or list/iterable
thereof. If None, some search paths are set by default.
maxdepth: Maximal search depth.
filename: Filename of the collection (default: ``collections.anki2``)
user: Username to which the collection belongs. If None, search for
databases of any user.
break_on_first: Stop searching once a database is found. This is
obviously faster, but you will not get any errors if there are
multiple databases matching your criteria.
Raises:
If none ore more than one result is found: :class:`ValueError`
Returns:
pathlib.Path to the anki2 database
"""
if not search_paths:
log.info(
"Searching for database. This might take some time. "
"You can speed this up by specifying a search path or "
"directly entering the path to your database."
)
search_paths = [
"~/.local/share/Anki2/",
"~/Documents/Anki2",
pathlib.Path(os.getenv("APPDATA", "~") + "/Anki2/"),
"~/.local/share/Anki2",
pathlib.Path.home(),
]
search_paths = [
pathlib.Path(sp).expanduser().resolve() for sp in search_paths
]
if break_on_first:
log.warning(
"The search will stop at the first hit, so please verify that "
"the result is correct (for example in case there might be more "
"than one Anki installation)"
)
if isinstance(search_paths, (str, pathlib.PurePath)):
search_paths = [search_paths]
found = {}
for search_path in search_paths:
found = {
**found,
**_find_db(
search_path,
maxdepth=maxdepth,
filename=filename,
user=user,
break_on_first=break_on_first,
),
}
if break_on_first:
if user is not None:
if user in found:
break
else:
if found:
break
if user:
if user not in found:
raise ValueError(
f"Could not find database belonging to user {user}"
)
found = found[user]
else:
if len(found) >= 2:
raise ValueError(
"Found databases for more than one user: {}. Please specify "
"the user.".format(", ".join(found))
)
elif not found:
raise ValueError(
"No database found. You might increase the search depth or "
"specify search paths to find more."
)
else:
found = found.popitem()[1]
if len(found) >= 2:
raise ValueError(
"Found more than one database belonging to user {} at {}".format(
user, ", ".join(map(str, found))
)
)
found = found[0]
log.debug(f"Database found at '{found}'.")
return found
@lru_cache(32)
def db_path_input(
path: Union[str, pathlib.PurePath] = None, user: str = None
) -> pathlib.Path:
""" Helper function to interpret user input of path to database.
1. If no path is given, we search through some default locations
2. If path points to a file: Take that file
3. If path points to a directory: Search in that directory
Args:
path: Path to database or search path or None
user: User name of anki collection or None
Returns:
Path to anki database as :class:`pathlib.Path` object
Raises:
If path does not exist: :class:`FileNotFoundError`
In various other cases: :class:`ValueError`
"""
if path is None:
result = find_db(user=user)
else:
path = pathlib.Path(path)
if not path.exists():
raise FileNotFoundError(
"db_path_input: File '{}' does not exist.".format(str(path))
)
if path.is_file():
log.debug(
f"db_path_input: Database explicitly set to '{path}'."
)
result = path
else:
result = find_db(
search_paths=(path,), user=user, break_on_first=False
)
log.info(f"Database found at '{result}'.")
if result:
return result
else:
raise ValueError("Database could not be found.")
def db_backup_file_name() -> str:
""" Time based file name of the backup file. """
return "backup-ankipandas-{}.anki2".format(
datetime.datetime.now().strftime("%Y-%m-%d-%H.%M.%S.%f")
)
def get_anki_backup_folder(
path: Union[str, pathlib.PurePath], nexist="raise"
) -> pathlib.Path:
""" Return path to Anki backup folder.
Args:
path: Path to Aki database as :class:`pathlib.Path`
nexist: What to do if backup folder doesn't seem to exist: ``raise`` or
``ignore``.
Returns:
Path to Anki backup folder as :class:`pathlib.Path`.
"""
path = pathlib.Path(path)
if not path.is_file():
raise FileNotFoundError(
f"Database path {path} seems to be invalid."
)
backup_folder = path.parent / "backups"
if nexist == "raise" and not backup_folder.is_dir():
raise ValueError(
"Anki backup folder corresponding to database at {} doesn't seem"
" to exist. Perhaps you can specify a custom back "
"folder?".format(path)
)
return backup_folder
def backup_db(
db_path: Union[str, pathlib.PurePath],
backup_folder: Union[str, pathlib.PurePath] = None,
) -> pathlib.Path:
"""
Back up database file.
Args:
db_path: Path to database
backup_folder: Path to backup folder. If None is given, the backup is
created in the Anki backup directory.
Returns:
Path to newly created backup file as :class:`pathlib.Path`.
"""
db_path = pathlib.Path(db_path)
if backup_folder:
backup_folder = pathlib.Path(backup_folder)
if not backup_folder.is_dir():
log.debug("Creating backup directory {}".format(str(backup_folder)))
backup_folder.mkdir(parents=True)
else:
backup_folder = get_anki_backup_folder(db_path, nexist="raise")
if not db_path.is_file():
raise FileNotFoundError("Database does not seem to exist.")
backup_path = backup_folder / db_backup_file_name()
shutil.copy2(str(db_path), str(backup_path))
return backup_path
|
import asyncio
import json
import logging
from datetime import timedelta
from functools import partial
from typing import Optional
from collections import OrderedDict
import async_timeout
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from aiohttp import ClientSession
from homeassistant.components.humidifier import (
HumidifierEntity, PLATFORM_SCHEMA)
from homeassistant.const import *
from homeassistant.components.humidifier.const import *
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import aiohttp_client
from homeassistant.util import Throttle
from miio.exceptions import DeviceException
from .deps.miio_new import MiotDevice
from . import GenericMiotDevice, ToggleableMiotDevice, dev_info
from .deps.const import (
DOMAIN,
CONF_UPDATE_INSTANT,
CONF_MAPPING,
CONF_CONTROL_PARAMS,
CONF_CLOUD,
CONF_MODEL,
ATTR_STATE_VALUE,
ATTR_MODEL,
ATTR_FIRMWARE_VERSION,
ATTR_HARDWARE_VERSION,
SCHEMA,
MAP,
DUMMY_IP,
DUMMY_TOKEN,
)
import copy
TYPE = 'humidifier'
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Generic MIoT " + TYPE
DATA_KEY = TYPE + '.' + DOMAIN
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
SCHEMA
)
SCAN_INTERVAL = timedelta(seconds=10)
# pylint: disable=unused-argument
@asyncio.coroutine
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the sensor from config."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config.get(CONF_HOST)
token = config.get(CONF_TOKEN)
mapping = config.get(CONF_MAPPING)
params = config.get(CONF_CONTROL_PARAMS)
mappingnew = {}
main_mi_type = None
this_mi_type = []
for t in MAP[TYPE]:
if mapping.get(t):
this_mi_type.append(t)
if 'main' in (params.get(t) or ""):
main_mi_type = t
if main_mi_type or type(params) == OrderedDict:
for k,v in mapping.items():
for kk,vv in v.items():
mappingnew[f"{k[:10]}_{kk}"] = vv
_LOGGER.info("Initializing %s with host %s (token %s...)", config.get(CONF_NAME), host, token[:5])
if type(params) == OrderedDict:
miio_device = MiotDevice(ip=host, token=token, mapping=mapping)
else:
miio_device = MiotDevice(ip=host, token=token, mapping=mappingnew)
try:
if host == DUMMY_IP and token == DUMMY_TOKEN:
raise DeviceException
device_info = miio_device.info()
model = device_info.model
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
except DeviceException as de:
if not config.get(CONF_CLOUD):
_LOGGER.warn(de)
raise PlatformNotReady
else:
if not (di := config.get('cloud_device_info')):
_LOGGER.error(f"未能获取到设备信息,请删除 {config.get(CONF_NAME)} 重新配置。")
raise PlatformNotReady
else:
device_info = dev_info(
di['model'],
di['mac'],
di['fw_version'],
""
)
device = MiotHumidifier(miio_device, config, device_info, hass, main_mi_type)
_LOGGER.info(f"{main_mi_type} is the main device of {host}.")
hass.data[DOMAIN]['miot_main_entity'][f'{host}-{config.get(CONF_NAME)}'] = device
hass.data[DOMAIN]['entities'][device.unique_id] = device
async_add_devices([device], update_before_add=True)
else:
_LOGGER.error(f"加湿器只能作为主设备!请检查{config.get(CONF_NAME)}配置")
async def async_setup_entry(hass, config_entry, async_add_entities):
config = copy.copy(hass.data[DOMAIN]['configs'].get(config_entry.entry_id, dict(config_entry.data)))
# config[CONF_MAPPING] = config[CONF_MAPPING][TYPE]
# config[CONF_CONTROL_PARAMS] = config[CONF_CONTROL_PARAMS][TYPE]
await async_setup_platform(hass, config, async_add_entities)
class MiotHumidifier(ToggleableMiotDevice, HumidifierEntity):
"""Representation of a humidifier device."""
def __init__(self, device, config, device_info, hass, main_mi_type):
ToggleableMiotDevice.__init__(self, device, config, device_info, hass, main_mi_type)
self._target_humidity = None
self._mode = None
self._available_modes = None
self._device_class = DEVICE_CLASS_HUMIDIFIER
@property
def supported_features(self):
"""Return the list of supported features."""
s = 0
if self._did_prefix + 'mode' in self._mapping:
s |= SUPPORT_MODES
return s
@property
def min_humidity(self):
try:
return (self._ctrl_params['target_humidity']['value_range'][0])
except KeyError:
return None
@property
def max_humidity(self):
try:
return (self._ctrl_params['target_humidity']['value_range'][1])
except KeyError:
return None
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
return self._target_humidity
@property
def mode(self):
"""Return current mode."""
return self._mode
@property
def available_modes(self):
"""Return available modes."""
return list(self._ctrl_params['mode'].keys())
@property
def device_class(self):
"""Return the device class of the humidifier."""
return self._device_class
async def async_set_humidity(self, humidity):
"""Set new humidity level."""
hum = self.convert_value(humidity, "target_humidity", True, self._ctrl_params['target_humidity']['value_range'])
result = await self.set_property_new(self._did_prefix + "target_humidity", hum)
if result:
self._target_humidity = hum
self.async_write_ha_state()
async def async_set_mode(self, mode):
"""Update mode."""
result = await self.set_property_new(self._did_prefix + "mode", self._ctrl_params['mode'].get(mode))
if result:
self._mode = mode
self.async_write_ha_state()
def _handle_platform_specific_attrs(self):
super()._handle_platform_specific_attrs()
self._target_humidity = self._state_attrs.get(self._did_prefix + 'target_humidity')
self._mode = self.get_key_by_value(self._ctrl_params['mode'], self._state_attrs.get(self._did_prefix + 'mode'))
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author Komal Thareja (kthare10@renci.org)
import configparser
from datetime import datetime
from typing import List
from fabric_cm.credmgr.common.exceptions import ConfigError
class Config:
# Sections
SECTION_RUNTIME = 'runtime'
SECTION_LOGGING = 'logging'
SECTION_OAUTH = 'oauth'
SECTION_LDAP = 'ldap'
SECTION_JWT = 'jwt'
SECTION_PROJECT_REGISTRY = 'project-registry'
SECTION_VOUCH = 'vouch'
# Runtime parameters
REST_PORT = 'rest-port'
PROMETHEUS_PORT = 'prometheus-port'
ENABLE_PROJECT_REGISTRY = 'enable-project-registry'
ENABLE_VOUCH_COOKIE = 'enable-vouch-cookie'
TOKEN_LIFETIME = 'token-lifetime'
PROJECT_NAMES_IGNORE_LIST = 'project-names-ignore-list'
ROLES_LIST = 'roles-list'
ALLOWED_SCOPES = 'allowed-scopes'
# Logging Parameters
LOGGER = 'logger'
LOG_DIR = 'log-directory'
LOG_FILE = 'log-file'
LOG_RETAIN = 'log-retain'
LOG_SIZE = 'log-size'
LOG_LEVEL = 'log-level'
# Oauth Parameters
PROVIDER = 'oauth-provider'
TOKEN_URL = 'oauth-token-url'
REVOKE_URL = 'oauth-revoke-url'
JWKS_URL = 'oauth-jwks-url'
KEY_REFRESH = 'oauth-key-refresh'
CLIENT_ID = 'oauth-client-id'
CLIENT_SECRET = 'oauth-client-secret'
# LDAP Parameters
LDAP_HOST = 'ldap-host'
LDAP_USER = 'ldap-user'
LDAP_PASSWORD = 'ldap-password'
LDAP_SEARCH_BASE = 'ldap-search-base'
# JWT Parameters
JWT_PUBLIC_KEY = 'jwt-public-key'
JWT_PUBLIC_KEY_KID = 'jwt-public-key-kid'
JWT_PRIVATE_KEY = 'jwt-private-key'
JWT_PRIVATE_KEY_PASS_PHRASE = 'jwt-pass-phrase'
# Project Registry Parameters
PROJECT_REGISTRY_URL = 'project-registry-url'
SSL_VERIFY = 'ssl_verify'
# Vouch Parameters
VOUCH = 'vouch'
SECRET = 'secret'
COMPRESSION = 'compression'
CUSTOM_CLAIMS = 'custom_claims'
LIFETIME = 'lifetime'
COOKIE_NAME = 'cookie-name'
COOKIE_DOMAIN_NAME = 'cookie-domain-name'
def __init__(self, config_parser: configparser.ConfigParser):
self.config_parser = config_parser
def _get_config_from_section(self, section_name: str, parameter_name: str) -> str:
try:
return self.config_parser.get(section_name, parameter_name)
except Exception as e:
raise ConfigError("Missing {} in section {} Error: {}".format(parameter_name, section_name, e))
def get_rest_port(self) -> int:
return int(self._get_config_from_section(self.SECTION_RUNTIME, self.REST_PORT))
def get_prometheus_port(self) -> int:
return int(self._get_config_from_section(self.SECTION_RUNTIME, self.PROMETHEUS_PORT))
def is_project_registry_enabled(self) -> bool:
value = self._get_config_from_section(self.SECTION_RUNTIME, self.ENABLE_PROJECT_REGISTRY)
if value.lower() == 'true':
return True
return False
def is_vouch_cookie_enabled(self) -> bool:
value = self._get_config_from_section(self.SECTION_RUNTIME, self.ENABLE_VOUCH_COOKIE)
if value.lower() == 'true':
return True
return False
def get_allowed_scopes(self) -> str:
return self._get_config_from_section(self.SECTION_RUNTIME, self.ALLOWED_SCOPES)
def get_roles(self) -> str:
return self._get_config_from_section(self.SECTION_RUNTIME, self.ROLES_LIST)
def get_project_ignore_list(self) -> str:
return self._get_config_from_section(self.SECTION_RUNTIME, self.PROJECT_NAMES_IGNORE_LIST)
def get_token_life_time(self) -> int:
return int(self._get_config_from_section(self.SECTION_RUNTIME, self.TOKEN_LIFETIME))
def get_logger_name(self) -> str:
return self._get_config_from_section(self.SECTION_LOGGING, self.LOGGER)
def get_logger_dir(self) -> str:
return self._get_config_from_section(self.SECTION_LOGGING, self.LOG_DIR)
def get_logger_file(self) -> str:
return self._get_config_from_section(self.SECTION_LOGGING, self.LOG_FILE)
def get_logger_level(self) -> str:
return self._get_config_from_section(self.SECTION_LOGGING, self.LOG_LEVEL)
def get_logger_retain(self) -> int:
return int(self._get_config_from_section(self.SECTION_LOGGING, self.LOG_RETAIN))
def get_logger_size(self) -> int:
return int(self._get_config_from_section(self.SECTION_LOGGING, self.LOG_SIZE))
def get_jwt_public_key(self) -> str:
return self._get_config_from_section(self.SECTION_JWT, self.JWT_PUBLIC_KEY)
def get_jwt_public_key_kid(self) -> str:
return self._get_config_from_section(self.SECTION_JWT, self.JWT_PUBLIC_KEY_KID)
def get_jwt_private_key(self) -> str:
return self._get_config_from_section(self.SECTION_JWT, self.JWT_PRIVATE_KEY)
def get_jwt_private_key_pass_phrase(self) -> str:
return self._get_config_from_section(self.SECTION_JWT, self.JWT_PRIVATE_KEY_PASS_PHRASE)
def get_oauth_provider(self) -> str:
return self._get_config_from_section(self.SECTION_OAUTH, self.PROVIDER)
def get_oauth_token_url(self) -> str:
return self._get_config_from_section(self.SECTION_OAUTH, self.TOKEN_URL)
def get_oauth_jwks_url(self) -> str:
return self._get_config_from_section(self.SECTION_OAUTH, self.JWKS_URL)
def get_oauth_revoke_url(self) -> str:
return self._get_config_from_section(self.SECTION_OAUTH, self.REVOKE_URL)
def get_oauth_client_id(self) -> str:
return self._get_config_from_section(self.SECTION_OAUTH, self.CLIENT_ID)
def get_oauth_client_secret(self) -> str:
return self._get_config_from_section(self.SECTION_OAUTH, self.CLIENT_SECRET)
def get_oauth_key_refresh(self) -> datetime:
value = self._get_config_from_section(self.SECTION_OAUTH, self.KEY_REFRESH)
return datetime.strptime(value, "%H:%M:%S")
def get_ldap_host(self):
return self._get_config_from_section(self.SECTION_LDAP, self.LDAP_HOST)
def get_ldap_user(self):
return self._get_config_from_section(self.SECTION_LDAP, self.LDAP_USER)
def get_ldap_pwd(self):
return self._get_config_from_section(self.SECTION_LDAP, self.LDAP_PASSWORD)
def get_ldap_search_base(self):
return self._get_config_from_section(self.SECTION_LDAP, self.LDAP_SEARCH_BASE)
def is_pr_ssl_verify(self) -> bool:
value = self._get_config_from_section(self.SECTION_PROJECT_REGISTRY, self.SSL_VERIFY)
if value.lower() == 'true':
return True
return False
def get_pr_url(self) -> str:
return self._get_config_from_section(self.SECTION_PROJECT_REGISTRY, self.PROJECT_REGISTRY_URL)
def get_vouch_secret(self) -> str:
return self._get_config_from_section(self.SECTION_VOUCH, self.SECRET)
def is_vouch_cookie_compressed(self) -> bool:
value = self._get_config_from_section(self.SECTION_VOUCH, self.COMPRESSION)
if value.lower() == 'true':
return True
return False
def get_vouch_custom_claims(self) -> List[str]:
value = self._get_config_from_section(self.SECTION_VOUCH, self.CUSTOM_CLAIMS)
return value.split(',')
def get_vouch_cookie_lifetime(self) -> int:
return int(self._get_config_from_section(self.SECTION_VOUCH, self.LIFETIME))
def get_vouch_cookie_name(self) -> str:
return self._get_config_from_section(self.SECTION_VOUCH, self.COOKIE_NAME)
def get_vouch_cookie_domain_name(self) -> str:
return self._get_config_from_section(self.SECTION_VOUCH, self.COOKIE_DOMAIN_NAME)
def get_providers(self) -> dict:
"""
Constructor providers dict based on the information provided in config file
"""
providers = {}
provider = self.get_oauth_provider()
providers[provider] = {}
providers[provider]['client_id'] = self.get_oauth_client_id()
providers[provider]['client_secret'] = self.get_oauth_client_secret()
providers[provider]['token_uri'] = self.get_oauth_token_url()
providers[provider]['revoke_uri'] = self.get_oauth_revoke_url()
return providers
|
from aiogram import types
from aiogram.dispatcher import FSMContext
from loader import dp, bot
from states import Request
from keyboards import create_kb_smart_choose_curr
# from keyboards import main_menu
from keyboards import create_kb_coustom_main_menu
@dp.message_handler(state=Request.how_much_give)
async def set_how_much_give(message:types.Message, state:FSMContext):
try:
summ = int(message.text)
await state.update_data(how_much_give=summ)
data = await state.get_data()
await bot.delete_message (
chat_id=message.chat.id,
message_id=data['_del_message']
)
request_data = await state.get_data()
await bot.delete_message (
chat_id=message.chat.id,
message_id=message.message_id
)
await message.answer (
f'Выберете валюту:',
reply_markup=create_kb_smart_choose_curr(request_data['currencies__give'])
# reply_markup=create_kb_choose_currency()
)
### for logs ### delete later
request_data = await state.get_data()
print('=== state: ===')
print(request_data)
print('==============')
### for logs ### delete later
await Request.currency__how_much__give.set()
# currensy_for_how_much.py
except Exception as e:
print(e)
print("EXEPTION HOW MACH GIVE")
await message.answer (
f'Формат суммы неправильный. Создание заявки отменено\n===========',
reply_markup=create_kb_coustom_main_menu(message.chat.id)
)
await state.finish()
await message.delete() |
# ****数据预处理代码-第一步:数据清洗**** #
import pandas as pd
import numpy as np
import random
from collections import Counter
from sklearn import preprocessing
from matplotlib import pyplot as plt
import seaborn as sns
import missingno
from scipy import stats
import math
# 绘图预处理
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# 预备数据
list_prov_code = { # <province, adcode>
'河南': '410000', '广东': '440000', '山东': '370000', '河北': '130000', '江苏': '320000', '四川': '510000',
'上海': '310000', '云南': '530000', '陕西': '610000', '山西': '140000', '广西': '450000', '重庆': '150000',
'内蒙古': '320000', '湖南': '430000', '北京': '110000', '安徽': '340000','辽宁': '210000',
'黑龙江': '230000', '江西': '360000', '福建': '350000', '浙江': '330000', '湖北': '420000'
}
list_season = {'season1-2016': 0, 'season2-2016': 0, 'season3-2016': 0, 'season4-2016': 0,
'season1-2017': 0, 'season2-2017': 0, 'season3-2017': 0, 'season4-2017': 0}
list_province = list_prov_code.keys() # [province]
list_adcode = list_prov_code.values() # [adcode]
list_model = [] # [model] 通过遍历数据添加
dict_model_int = dict() # <model, int> 车型的数据映射
list_bodyType = ['SUV', 'Sedan', 'MPV', 'Hatchback'] # [bodyType]
sum_sales = 0 # 销量总和
# 自定义函数
def is_digit(num): # 判断是否为数值
if isinstance(num, np.int64) or isinstance(num, float) or isinstance(num, int) and not math.isnan(num):
return True
return False
def is_str(string, this_list): # 判断是否为合理字符串
if isinstance(string, str):
if str in this_list:
return True
return False
def add_model(df): # 获取model数据并映射到dict_model_int
i = 1
for model in df['model']:
if model not in list_model:
list_model.append(model)
dict_model_int.update({model: i})
i += 1
def sales_sum_prov(df): # 分省份对销量求和,返回<省,销量>
prov_data = dict()
list_str = list_province
for str in list_str:
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[0] == str and is_digit(row_date[6]): # 把异常省份名称和异常销量值、空数据排除
if str in prov_data.keys():
prov_data[str] += row_date[6]
else:
prov_data.update({str: row_date[6]})
print(prov_data)
return prov_data
def sales_sum_season(df, str): # 对某一省份按季度销量求和,返回<季度,销量>
season_data = dict(list_season)
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[0] == str and is_digit(row_date[6]): # 把异常省份名称和异常销量值排除
if not (math.isnan(row_date[5]) and math.isnan(row_date[4])): # 排除年月为空
if row_date[5] in [1, 2, 3]: # 找到对应月份的季度
if row_date[4] == 2016:
season_data['season1-2016'] += row_date[6]
else:
season_data['season1-2017'] += row_date[6]
elif row_date[5] in [4, 5, 6]:
if row_date[4] == 2016:
season_data['season2-2016'] += row_date[6]
else:
season_data['season2-2017'] += row_date[6]
elif row_date[5] in [7, 8, 9]:
if row_date[4] == 2016:
season_data['season3-2016'] += row_date[6]
else:
season_data['season3-2017'] += row_date[6]
elif row_date[5] in [10, 11, 12]:
if row_date[4] == 2016:
season_data['season4-2016'] += row_date[6]
else:
season_data['season4-2017'] += row_date[6]
return season_data
def body_type_sales_sum(df): # 对车身类型销量求和,返回<车身类型,销量>
global sum_sales # 声明全局变量
dict_bodyType_sales = dict({'SUV': 0, 'Sedan': 0, 'MPV': 0, 'Hatchback': 0})
for str in list_bodyType:
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[3] == str and is_digit(row_date[6]):
dict_bodyType_sales[str] += row_date[6]
sum_sales += row_date[6]
return dict_bodyType_sales.values()
def model_sales_sum(df): # 销售量求和,返回字典<车型,销售量>
dict_sales_model = dict()
for str in list_model:
dict_sales_model.update({dict_model_int[str]: 0})
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[2] == str and is_digit(row_date[6]):
dict_sales_model[dict_model_int[str]] += row_date[6]
return dict_sales_model
def search_sum_model(df): # 搜索量求和,返回字典<车型,搜索量>
dict_search_model = dict()
for str in list_model:
dict_search_model.update({dict_model_int[str]: 0})
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[2] == str and is_digit(row_date[5]):
dict_search_model[dict_model_int[str]] += row_date[5]
return dict_search_model
def comment_sum_model(df): # 评论量求和,返回字典<车型,评论量>
dict_comment_model = dict()
for str in list_model:
dict_comment_model.update({dict_model_int[str]: 0})
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[0] == str and is_digit(row_date[3]):
dict_comment_model[dict_model_int[str]] += row_date[3]
return dict_comment_model
def com_rep_sum_season(df, index): # 对评论量和回复量求和,并绘制对比折线图
season_data_com = dict(list_season)
season_data_rep = dict(list_season)
str = list(dict_model_int.keys())[list(dict_model_int.values()).index(index)]
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[0] == str and is_digit(row_date[3]) and is_digit(row_date[4]):
if not (math.isnan(row_date[1]) and math.isnan(row_date[2])):
if row_date[2] in [1, 2, 3]: # 找到对应月份的季度
if row_date[1] == 2016:
season_data_com['season1-2016'] += row_date[3]
season_data_rep['season1-2016'] += row_date[4]
else:
season_data_com['season1-2017'] += row_date[3]
season_data_rep['season1-2017'] += row_date[4]
elif row_date[2] in [4, 5, 6]:
if row_date[1] == 2016:
season_data_com['season2-2016'] += row_date[3]
season_data_rep['season2-2016'] += row_date[4]
else:
season_data_com['season2-2017'] += row_date[3]
season_data_rep['season2-2017'] += row_date[4]
elif row_date[2] in [7, 8, 9]:
if row_date[1] == 2016:
season_data_com['season3-2016'] += row_date[3]
season_data_rep['season3-2016'] += row_date[4]
else:
season_data_com['season3-2017'] += row_date[3]
season_data_rep['season3-2017'] += row_date[4]
elif row_date[2] in [10, 11, 12]:
if row_date[1] == 2016:
season_data_com['season4-2016'] += row_date[3]
season_data_rep['season4-2016'] += row_date[4]
else:
season_data_com['season4-2017'] += row_date[3]
season_data_rep['season4-2017'] += row_date[4]
x1 = list(season_data_com.keys())
y1 = list(season_data_com.values())
x2 = list(season_data_rep.keys())
y2 = list(season_data_rep.values())
name1 = '车型对应的季度评论'
name2 = '车型对应的季度回复'
addr = '../result/com_rep_man_Plot2.png'
x_name = '季度'
y_name = '数量'
title = '最高评论量车型季度走势'
graph_plot2(x1, y1, name1, x2, y2, name2, addr, x_name, y_name, title)
def model1_sales_list(df): # 车型1销量列表
list_sales_model1 = []
str = list(dict_model_int.keys())[list(dict_model_int.values()).index(31)]
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[2] == str and is_digit(row_date[6]):
list_sales_model1.append(row_date[6])
return list_sales_model1
def model1_search_list(df): # 车型1搜索量列表
list_sea_model1 = []
str = list(dict_model_int.keys())[list(dict_model_int.values()).index(31)]
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[2] == str and is_digit(row_date[5]):
list_sea_model1.append(row_date[5])
return list_sea_model1
def model1_com_list(df): # 车型1评论量列表
list_com_model1 = []
str = list(dict_model_int.keys())[list(dict_model_int.values()).index(31)]
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[0] == str and is_digit(row_date[3]):
list_com_model1.append(row_date[3])
return list_com_model1
def model1_rep_list(df): #车型1回复量列表
list_rep_model1 = []
str = list(dict_model_int.keys())[list(dict_model_int.values()).index(31)]
for i in range(1, df.shape[0]):
row_date = df.iloc[i]
if row_date[0] == str and is_digit(row_date[4]):
list_rep_model1.append(row_date[4])
return list_rep_model1
def graph_bar(x, y, addr, x_name, y_name, title): # 绘制条形图
#plt.figure()
plt.bar(x, y)
plt.axis('tight')
plt.xlabel(x_name)
plt.ylabel(y_name)
# plt.xlim((-3, 5)) 坐标区间
plt.title(title)
#plt.tight_layout(w_pad=3.0)
plt.savefig(addr)
plt.show()
def graph_plot(x, y, addr, x_name, y_name, title): # 绘制折现图
plt.plot(x, y, marker='.', mec='r', mfc='w')
plt.axis('tight')
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.title(title)
plt.savefig(addr)
plt.show()
def graph_plot2(x1, y1, name1, x2, y2, name2, addr, x_name, y_name, title): # 绘制两条对比折现图
plt.plot(x1, y1, marker='.', mec='r', mfc='w', label=name1)
plt.plot(x2, y2, marker='+', ms=10, label=name2)
plt.legend()
plt.axis('tight')
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.title(title)
plt.savefig(addr)
plt.show()
def graph_pie(x, addr, title): # 绘制饼图
explode = [0, 0.05, 0, 0.02]
# explode一个列表,用于指定每块饼片边缘偏离半径的百分比
plt.pie(list(x), explode=explode,
labels=list_bodyType, autopct='%3.1f%%',
startangle=180, shadow=True, colors=['cyan', 'lightpink', 'green', 'yellow'])
plt.title(title)
plt.savefig(addr)
plt.show()
def norm_fun(x, mu, sigma): # 概率密度函数
f = np.exp(-((x - mu)**2) / (2 * sigma**2)) / (sigma * np.sqrt(2 * np.pi))
return f
def graph_f_plot(df, x_l, x_r, x_len, x_name, title, addr): # 正态分布图
mean = df.mean() # 得到均值
std = df.std() # 得到标准差
x = np.arange(x_l, x_r, x_len)
# 设定 y 轴,载入刚才的正态分布函数
y = norm_fun(x, mean, std)
plt.plot(x, y)
# 画出直方图,最后的“normed”参数,是赋范的意思,数学概念
plt.hist(df, bins=100, rwidth=0.5, normed=True)
plt.axis('tight')
plt.title(title)
plt.xlabel(x_name)
plt.ylabel('Probability')
plt.savefig(addr)
plt.show()
def pier(a, b): # 计算皮尔逊相关系数
r = stats.pearsonr(a, b)
return r
# 从表格导入数据
data_sales = pd.read_excel('../data/train_sales_data.xls') # 读入csv文件
data_search = pd.read_excel('../data/train_search_data.xls')
data_user = pd.read_excel('../data/train_user_reply_data.xls')
data_test = pd.read_excel('../data/test.xls')
add_model(data_sales) # 获取model数据,并映射
# .shape获取表格行列数
print('数据行列数:')
print("\t销售数据:{}".format(data_sales.shape))
print("\t搜索数据:{}".format(data_search.shape))
print("\t评论数据:{}".format(data_user.shape))
print("\t测试数据:{}".format(data_test.shape))
'''
# 预处理前数据统计图
# #对省份总销量绘图
list_sales_prov = sales_sum_prov(data_sales)
print('各省份的总销量数据:')
print(list_sales_prov)
list_x_sales = list_sales_prov.keys() # 省份
list_y_sales = list_sales_prov.values() # 销量
graph_bar(list_x_sales, list_y_sales, '../result/sales_data_Bar.png', '省份', '销量', '各省份总销量条形图')
# #对销量最高和销量最低省份按季度绘图折线图
max_sales = max(list_y_sales) # 得到最大销量数
province_max_sales = list(list_x_sales)[list(list_y_sales).index(max_sales)] # 通过value在字典中的下标获取对应的键值
min_sales = min(list_y_sales) # 最小销量数
province_min_sales = list(list_x_sales)[list(list_y_sales).index(min_sales)]
list_sales_max = sales_sum_season(data_sales, province_max_sales)
list_sales_min = sales_sum_season(data_sales, province_min_sales)
print('最高销售量省份分季度销量数据:')
print(list_sales_max)
print('最低销售量省份分季度销量数据:')
print(list_sales_min)
graph_plot(list_sales_max.keys(), list_sales_max.values(), '../result/sales_max_Plot.png', '季度', '销量',
province_max_sales + '省各季度销量')
graph_plot(list_sales_min.keys(), list_sales_min.values(), '../result/sales_min_Plot.png', '季度', '销量',
province_min_sales + '省各季度销量')
graph_plot2(list_sales_max.keys(), list_sales_max.values(), province_max_sales,
list_sales_min.keys(), list_sales_min.values(), province_min_sales,
'../result/sales_min_Plot.png', '季度', '销量', '最高最低省份季度销量对比')
# #对销售量按车身类型绘图饼图
list_bodyType_sales = body_type_sales_sum(data_sales)
graph_pie(list_bodyType_sales, '../result/bodyType_sales_Pie.png', '车身类型销量图')
# #对销售量求和按车型列表并排序
dict_model_sales = model_sales_sum(data_sales)
print("销售量按车型列表并排序:{}".format(sorted(dict_model_sales.items(), key=lambda x: x[1], reverse=True)))
# #搜索量求和按车型列表并排序
dict_search_sum_model = search_sum_model(data_search)
print("搜索量按车型列表并排序:{}".format(sorted(dict_search_sum_model.items(), key=lambda x: x[1], reverse=True)))
'''
# #评论量求和按车型列表并排序
dict_comment_sum_model = comment_sum_model(data_user)
print("评论量按车型列表并排序:{}".format(sorted(dict_comment_sum_model.items(), key=lambda x: x[1], reverse=True)))
# #某个车型的评论量与回复量对比折线图
max_comment_model = max(dict_comment_sum_model, key=dict_comment_sum_model.get) # 获取评论量最高的车型
com_rep_sum_season(data_user, max_comment_model) # 对最高评论量车型绘制评论和回复量季度走势
# 数据清洗部分
# 统计重复记录数
'''
print('数据重复记录行数:')
print("\t销售数据:{:d}".format(data_sales.duplicated().sum()))
print("\t搜索数据:{:d}".format(data_search.duplicated().sum()))
print("\t评论数据:{:d}".format(data_user.duplicated().sum()))
print("\t测试数据:{:d}".format(data_test.duplicated().sum()))
# 删除重复记录行
print('正在删除重复记录数据……')
data_sales = data_sales.drop_duplicates()
data_search = data_search.drop_duplicates()
data_user = data_user.drop_duplicates()
print("删除结束!")
# 删除4个以上缺失值的整行以及空行
print('删除空行以及缺失值有4个及以上的行……')
data_sales.dropna(axis=0, how='all')
data_search.dropna(axis=0, how='all')
data_user.dropna(axis=0, how='all')
data_sales = data_sales.dropna(thresh=4) # 删除4个以上缺失值的行
data_search = data_search.dropna(thresh=4)
data_user = data_user.dropna(thresh=4)
print('删除结束')
print('新数据行列数:')
print("\t销售数据:{}".format(data_sales.shape))
print("\t搜索数据:{}".format(data_search.shape))
print("\t评论数据:{}".format(data_user.shape))
'''
# 根据已有数据补充部分缺失值以及异常值
# 提取出需要统计的行
'''cat_col = ['regYear', 'province']
d = data_test[cat_col]
c = d['上海']
print(c)
ave_regYear = data_test[data_test['province'].isin('上海')].mean() # 获取该列的均值
print(ave_regYear)
data_test = data_test.fillna(ave_regYear) # 用该均值去填充缺失值
print(data_test)'''
# 随机抽样10%数据
# n抽取的行数,frac抽取的比列,replace=True时为有放回抽样,axis=0的时是抽取行,axis=1时是抽取列
sample_sales = data_sales.sample(frac=0.1, replace=True, axis=0)
sample_search = data_search.sample(frac=0.1, replace=True, axis=0) # frac=0.1
sample_user = data_user.sample(frac=0.1, replace=True, axis=0)
'''
# 求解正态分布、方差、均值、极大极小值
print('样本方差:')
print("\t销售量:{:.2f}".format(sample_sales['salesVolume'].var()))
print("\t搜索量:{:.2f}".format(sample_search['popularity'].var()))
print("\t评论量:{:.2f}".format(sample_user['carCommentVolum'].var()))
print("\t回复量:{:.2f}".format(sample_user['newsReplyVolum'].var()))
print('样本均值:')
print("\t销售量:{:.2f}".format(sample_sales['salesVolume'].mean()))
print("\t搜索量:{:.2f}".format(sample_search['popularity'].mean()))
print("\t评论量:{:.2f}".format(sample_user['carCommentVolum'].mean()))
print("\t回复量:{:.2f}".format(sample_user['newsReplyVolum'].mean()))
print('样本极值:')
print("\t销售量极大值:{0}, 极小值:{1}".format(data_sales['salesVolume'].max(), data_sales['salesVolume'].min()))
print("\t搜索量极大值:{0}, 极小值:{1}".format(data_search['popularity'].max(), data_search['popularity'].min()))
print("\t评论量极大值:{0}, 极小值:{1}".format(data_user['carCommentVolum'].max(), data_user['carCommentVolum'].min()))
print("\t回复量极大值:{0}, 极小值:{1}".format(data_user['newsReplyVolum'].max(), data_user['newsReplyVolum'].min()))'''
# #画正态分布图
'''graph_f_plot(data_sales['salesVolume'], -1000, 3000, 0.1, '销售量', '销售量正态分布图', '../result/data_sales_f.png')
graph_f_plot(data_search['popularity'], -5000, 20000, 1, '搜索量', '搜索量正态分布图', '../result/data_search_f.png')'''
'''
# 数据相关性,另确定一定的事故发生率导致的评论数上升,即(1 - 评论与销量相关性)/2
# #验证搜索量与销量的相关性,必须为同一车型,这里取车型31,抽取500个数据
data_sea_sales = pd.DataFrame({'搜索量': model1_search_list(data_search), '销售量': model1_sales_list(data_sales)})
sample_sea_sales = data_sea_sales.sample(n=500, replace=False, axis=0)
search_a1 = sample_sea_sales['搜索量']
sales_a1 = sample_sea_sales['销售量']
sea_rel_sales_rate = pier(sales_a1, search_a1)
print("搜索量与销售量的相关性:{}".format(sea_rel_sales_rate))
# #验证评论与回复量的相关性
data_com_rep = pd.DataFrame({'评论量': model1_com_list(data_user), '回复量': model1_rep_list(data_user)})
sample_com_rep = data_com_rep.sample(n=20, replace=False, axis=0)
com_c1 = sample_com_rep['评论量']
rep_c1 = sample_com_rep['回复量']
com_rel_rep_rate = pier(com_c1, rep_c1)
print("评论量与回复量的相关性:{}".format(com_rel_rep_rate))
accidence_rel_rate = (1 - sea_rel_sales_rate[0]) / 2
print("事故导致搜索量增加率:{:.4f}".format(accidence_rel_rate))
'''
|
from radical.ensemblemd import Kernel
from radical.ensemblemd import Pipeline
from radical.ensemblemd import EnsemblemdError
from radical.ensemblemd import SingleClusterEnvironment
#Used to register user defined kernels
from radical.ensemblemd.engine import get_engine
#Import our new kernel
from new_kernel import MyUserDefinedKernel
# Register the user-defined kernel with Ensemble MD Toolkit.
get_engine().add_kernel_plugin(MyUserDefinedKernel)
#Now carry on with your application as usual !
class Sleep(Pipeline):
def __init__(self,steps,instances):
Pipeline.__init__(self, steps,instaces)
def step_1(self, instance):
"""This step sleeps for 60 seconds."""
k = Kernel(name="sleep")
k.arguments = ["--interval=10"]
return k
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
try:
# Create a new static execution context with one resource and a fixed
# number of cores and runtime.
cluster = SingleClusterEnvironment(
resource="local.localhost",
cores=1,
walltime=15,
username=None,
project=None
)
# Allocate the resources.
cluster.allocate()
# Set the 'instances' of the pipeline to 16. This means that 16 instances
# of each pipeline step are executed.
#
# Execution of the 16 pipeline instances can happen concurrently or
# sequentially, depending on the resources (cores) available in the
# SingleClusterEnvironment.
sleep = Sleep(steps=1,instances=16)
cluster.run(sleep)
cluster.deallocate()
except EnsemblemdError, er:
print "Ensemble MD Toolkit Error: {0}".format(str(er))
raise # Just raise the execption again to get the backtrace
|
import os
import tqdm
import pandas as pd
def load_records(path, results_fname='results', depth=1):
assert results_fname in ['results', 'meters']
records = []
def add_record(results_path):
try:
df = pd.read_pickle(results_path)
records.append(df)
# want to ignore existing results.txt file
except IOError:
pass
if depth == 0:
results_path = os.path.join(path, f'{results_fname}.pkl')
add_record(results_path)
elif depth == 1:
for i, subdir in tqdm.tqdm(list(enumerate(os.listdir(path))), ncols=80, leave=False):
results_path = os.path.join(path, subdir, f'{results_fname}.pkl')
add_record(results_path)
else:
raise ValueError(f'Depth {depth} is invalid.')
return pd.concat(records, ignore_index=True) |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import subprocess
import time
from collections import Counter, defaultdict
from datetime import datetime, timedelta
from enum import Enum
from random import choice
import numpy as np
import requests
from retry import retry
from metadata.backend.interface import RawBackendType
from metadata.exc import StateManageError
from metadata.runtime import rt_context, rt_g
from metadata.state.state import BackendHAState, CommonState
from metadata.util.common import StrictABCMeta
from metadata.util.i18n import lazy_selfish as _
class StateManager(object, metaclass=StrictABCMeta):
__abstract__ = True
def __init__(self, state, check_cnt=None, config_collection=None):
self.state = state
self.check_cnt = check_cnt if check_cnt else 20
self.logger = logging.getLogger(__name__)
self.config_collection = rt_context.config_collection if not config_collection else config_collection
self.normal_conf = self.config_collection.normal_config
self.logging_conf = self.config_collection.logging_config
def __enter__(self):
self.state.state_manager_lock.acquire(timeout=0.1 * self.check_cnt)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.state.state_manager_lock.release()
class BackendSwitch(StateManager):
def __init__(self, state, backend_type=RawBackendType.DGRAPH, check_cnt=20):
super(BackendSwitch, self).__init__(state, check_cnt)
self.backend_type = backend_type
self.master_backend_id, self.slave_backend_id = None, None
def __enter__(self):
if (
self.state['backends_ha_state'][self.backend_type.value]['master']['state'] == BackendHAState.ON.value
and self.state['backends_ha_state'][self.backend_type.value]['slave']['state'] == BackendHAState.ON.value
):
return super(BackendSwitch, self).__enter__()
else:
raise StateManageError(
_('The backends ha state {} is not allowed to switch.'.format(self.state['backends_ha_state']))
)
def __exit__(self, exc_type, exc_val, exc_tb):
backends_ha_state = self.state['backends_ha_state']
backends_ha_state[self.backend_type.value]['master']['state'] = BackendHAState.ON.value
backends_ha_state[self.backend_type.value]['slave']['state'] = BackendHAState.ON.value
self.state['backends_ha_state'] = backends_ha_state
super(BackendSwitch, self).__exit__(exc_type, exc_val, exc_tb)
def switch(self):
self.master_backend_id, self.slave_backend_id = (
self.state['backends_ha_state'][self.backend_type.value]['master']['id'],
self.state['backends_ha_state'][self.backend_type.value]['slave']['id'],
)
if self.switch_master():
if self.switch_slave():
# 切换backends_ha_state中的主从状态
backends_ha_state = self.state['backends_ha_state']
backends_ha_state[self.backend_type.value]['master']['id'] = self.slave_backend_id
backends_ha_state[self.backend_type.value]['slave']['id'] = self.master_backend_id
self.state['backends_ha_state'] = backends_ha_state
# 切换backend_configs中的主从状态
backend_configs = self.state['backend_configs']
backend_configs[self.backend_type.value][self.slave_backend_id]['ha_state'] = 'master'
backend_configs[self.backend_type.value][self.master_backend_id]['ha_state'] = 'slave'
self.state['backend_configs'] = backend_configs
else:
raise StateManageError(_('Fail to switch slave {} backend.'.format(self.backend_type.value)))
else:
raise StateManageError(_('Fail to switch master {} backend.'.format(self.backend_type.value)))
def switch_master(self):
backends_ha_state = self.state['backends_ha_state']
backends_ha_state[self.backend_type.value]['master']['state'] = BackendHAState.SWITCH_PREPARE.value
self.state['backends_ha_state'] = backends_ha_state
for i in range(self.check_cnt):
if not self.state.interactor_commit_lock.contenders():
backends_ha_state[self.backend_type.value]['master']['state'] = BackendHAState.SWITCHING.value
self.state['backends_ha_state'] = backends_ha_state
return True
time.sleep(0.1)
return False
def switch_slave(self):
backends_ha_state = self.state['backends_ha_state']
backends_ha_state[self.backend_type.value]['slave']['state'] = BackendHAState.SWITCH_PREPARE.value
self.state['backends_ha_state'] = backends_ha_state
self.state.replay_switch_barrier.create()
ret = self.state.replay_switch_barrier.wait(0.1 * self.check_cnt)
if ret:
backends_ha_state[self.backend_type.value]['slave']['state'] = BackendHAState.SWITCHING.value
self.state['backends_ha_state'] = backends_ha_state
return True
return False
class DgraphBESwitch(StateManager):
"""
切换BestEffort模式
"""
def turn(self, status=CommonState.ON.value):
master_id = self.state['backends_ha_state'][RawBackendType.DGRAPH.value]['master']['id']
backend_configs = self.state['backend_configs']
backend_configs[RawBackendType.DGRAPH.value][master_id]['be'] = status
self.state['backend_configs'] = backend_configs
self.logger.info('BE status is turned. The status now is {}'.format(self.state['backend_configs']))
class RecoverDgraphCluster(StateManager):
"""
恢复Dgraph集群在线状态标记
"""
def __init__(self, state, backend_type=RawBackendType.DGRAPH, check_cnt=20):
super(RecoverDgraphCluster, self).__init__(state, check_cnt)
self.backend_type = backend_type
def recover(self, name='slave'):
backends_ha_state = self.state['backends_ha_state']
if backends_ha_state[self.backend_type.value][name]['state'] == BackendHAState.CHAOTIC.value:
backends_ha_state[self.backend_type.value][name]['state'] = BackendHAState.ON.value
self.state['backends_ha_state'] = backends_ha_state
self.logger.info('the state of slave cluster is recovered: {}'.format(self.state['backends_ha_state']))
class DgraphVersionManager(StateManager):
"""
DgraphBackend的版本管理
"""
def __init__(self, state, backend_type=RawBackendType.DGRAPH, check_cnt=20):
super(DgraphVersionManager, self).__init__(state, check_cnt)
self.backend_type = backend_type
self.master_backend_id, self.slave_backend_id = None, None
def get_backend_version(self, backend_id=None):
if backend_id:
backend_configs = self.state['backend_configs']
target_backend = backend_configs[self.backend_type.value].get(backend_id, None)
if target_backend:
return target_backend.get('ver', None)
StateManageError(_('fail to get backend version, because {} is invalid.'.format(backend_id)))
def set_backend_version(self, backend_id=None, ver=None):
self.master_backend_id, self.slave_backend_id = (
self.state['backends_ha_state'][self.backend_type.value]['master']['id'],
self.state['backends_ha_state'][self.backend_type.value]['slave']['id'],
)
if backend_id and ver and backend_id in (self.master_backend_id, self.slave_backend_id):
backend_configs = self.state['backend_configs']
backend_configs[self.backend_type.value][backend_id]['ver'] = ver
self.state['backend_configs'] = backend_configs
else:
raise StateManageError(
_(
'fail to set backend version, because {} is invalid. should be one of[{}, {}]'.format(
backend_id, self.master_backend_id, self.slave_backend_id
)
)
)
return True
class NodeHealthy(Enum):
STATUS_NOT_ENOUGH = 'status_not_enough'
FAIL_TO_GET_STATUS = 'fail_to_get_status'
NOT_HEALTHY = False
HEALTHY = True
class BackendNodesHealthChecker(StateManager):
def __init__(self, *args, **kwargs):
super(BackendNodesHealthChecker, self).__init__(*args, **kwargs)
if not getattr(rt_context, 'backend_nodes_health_status', None):
rt_g.backend_nodes_health_status = {'dgraph': defaultdict(dict)}
self.backend_nodes_health_status = rt_g.backend_nodes_health_status
self.last_maintain_time = None
self.last_get_time = None
def get_status(self, backend_type=RawBackendType.DGRAPH):
"""
获取后端集群节点状态
:param backend_type:集群类型
:return:
"""
if backend_type is not RawBackendType.DGRAPH:
raise NotImplementedError('This manager only support dgraph now.')
backend_type = backend_type.value
ha_state, config, master_config, slave_config = self.get_config_in_state(backend_type)
self.logger.info(
{'backends_ha_state': ha_state, 'metric_type': 'backend_config', 'backend_configs': config},
extra={'metric': True},
)
servers = set()
for n, config_to_detect in enumerate([master_config, slave_config]):
for k in ['SERVERS', 'OFF_SERVERS']:
for item in config_to_detect.get(k, []):
self.logger.info(
{
'node': item,
'backend_type': 'dgraph',
'metric_type': 'backend_healthy_config',
'config_healthy_status': k,
'config_ha_status': 'master' if n == 0 else 'slave',
},
extra={'metric': True},
)
servers.add(item)
for server in servers:
status, get_time = self.get_dgraph_node_status(server)
self.backend_nodes_health_status['dgraph'][server][get_time] = status
self.last_get_time = get_time
def maintain_status(self):
"""
维护缓存的后端集群状态
:return:
"""
self.last_maintain_time = datetime.now()
for server, series in self.backend_nodes_health_status['dgraph'].items():
sorted_series = sorted(iter(series.items()), key=lambda item: item[0], reverse=True)
for n, (get_time, status) in enumerate(sorted_series):
if n > self.normal_conf.BACKEND_STATUS_CACHE_LENGTH:
series.pop(get_time)
elif self.last_maintain_time - get_time >= timedelta(
seconds=self.normal_conf.BACKEND_STATUS_CACHE_TIME
):
series.pop(get_time)
def get_dgraph_node_status(self, node_url):
"""
获取具体某dgraph集群状态
:param node_url: 集群url
:return:
"""
@retry(tries=3, delay=0.1, backoff=2)
def _check():
require_metrics_mapping = {
'dgraph_num_queries_total{method="Server.Mutate",status=""}': 'dgraph_num_queries_total',
'dgraph_num_queries_total{method="Server.Query",status=""}': 'dgraph_num_queries_total',
}
r = requests.get(node_url + '/debug/prometheus_metrics', timeout=3)
r.raise_for_status()
metrics_dict = dict()
lines = r.text.split('\n')
for line in lines:
if line.startswith('#'):
continue
if ' ' in line:
metric_key, metric_val = line.split(' ')
metrics_dict[metric_key] = float(metric_val)
if metric_key in require_metrics_mapping:
upper_metric_key = require_metrics_mapping[metric_key]
if upper_metric_key not in metrics_dict:
metrics_dict[upper_metric_key] = 0
metrics_dict[upper_metric_key] += float(metric_val)
return metrics_dict, datetime.now()
try:
return _check()
except Exception:
self.logger.exception('Fail to check dgraph health.')
return NodeHealthy.FAIL_TO_GET_STATUS, datetime.now()
def get_config_in_state(self, backend_type_value):
"""
获取当前状态配置
:param backend_type_value: 集群类型
:return:
"""
ha_state, config = self.state['backends_ha_state'], self.state['backend_configs']
master_id = ha_state[backend_type_value]['master']['id']
slave_id = ha_state[backend_type_value]['slave']['id']
master_config = config[backend_type_value][master_id]['config']
slave_config = config[backend_type_value][slave_id]['config']
return ha_state, config, master_config, slave_config
def check(self, backend_type=RawBackendType.DGRAPH):
"""
检查集群状态
:param backend_type: 集群类型
:return: (是否需要变更,变更内容)
"""
if backend_type is not RawBackendType.DGRAPH:
raise NotImplementedError('This manager only support dgraph now.')
ha_state, config, master_config, slave_config = self.get_config_in_state(backend_type.value)
nodes_state_change = False
not_enough = False
online_warning_config = None
for (env, config_to_detect) in list({'master': master_config, 'slave': slave_config}.items()):
active_servers = set()
off_services = set()
for k in ['SERVERS', 'OFF_SERVERS']:
for node_url in config_to_detect.get(k, []):
state_now = NodeHealthy.HEALTHY if k == 'SERVERS' else NodeHealthy.NOT_HEALTHY
ret, node_static = self.check_dgraph_node_health(node_url, state_now=state_now)
if isinstance(node_static, dict):
for k_ in ['dgraph_predicate_stats', 'memstats']:
if k_ in node_static:
node_static.pop(k_)
self.logger.info(
{
'node_url': node_url,
'backend_type': 'dgraph',
'metric_type': 'backend_node_static',
'static': node_static,
},
extra={'output_metric': True},
)
if ret is NodeHealthy.HEALTHY:
active_servers.add(node_url)
elif ret is NodeHealthy.STATUS_NOT_ENOUGH:
not_enough = True
self.logger.warning('Status series of node {} is not enough.'.format(node_url))
else:
self.logger.info(
{
'node_url': node_url,
'backend_type': 'dgraph',
'metric_type': 'backend_unhealthy',
'static': node_static,
'status': ret,
},
extra={'output_metric': True},
)
off_services.add(node_url)
if len(active_servers) == 0 and not not_enough:
item = choice(list(off_services))
active_servers.add(item)
off_services.remove(item)
if active_servers != set(config_to_detect['SERVERS']) and not not_enough:
config_to_detect['SERVERS'] = list(active_servers)
config_to_detect['OFF_SERVERS'] = list(off_services)
nodes_state_change = True
if env == 'master' and nodes_state_change:
online_warning_config = config_to_detect
return nodes_state_change, config, online_warning_config
def maintain(self, config, warning=None):
"""
根据check反馈的内容,维护配置
:param config: 变更的配置
:param warning: 警告信息
:return:
"""
self.state['backend_configs'] = config
self.logger.info('Backend configs is changing, the status is {}'.format(config))
self.logger.info(
{
'config': config,
'backend_type': 'dgraph',
'metric_type': 'maintain_unhealthy_backend',
},
extra={'metric': True},
)
if warning is not None:
self.state['backend_warning'] = warning
def check_dgraph_node_health(self, node_url, state_now=NodeHealthy.HEALTHY):
"""
Dgraph节点后端健康判断逻辑
:param node_url: 节点url
:return:
"""
series = self.backend_nodes_health_status['dgraph'][node_url]
range_cnt = self.normal_conf.BACKEND_STATUS_DETECT_RANGE
# 处理节点联不通问题
# [Logic]统计近期的健康请求失败情况;若发现近期失败次数>1或最新一次健康请求失败,返回`FAIL_TO_GET_STATUS`
sorted_series = sorted(iter(series.items()), key=lambda item: item[0], reverse=True)
last_fail_get_counter = Counter(
s for t, s in sorted_series[: range_cnt * 2] if s in (NodeHealthy.FAIL_TO_GET_STATUS,)
)
if (
last_fail_get_counter[NodeHealthy.FAIL_TO_GET_STATUS] > 1
or sorted_series[0][1] == NodeHealthy.FAIL_TO_GET_STATUS
):
return NodeHealthy.FAIL_TO_GET_STATUS, None
# 处理节点异常的问题
# [Logic]有效健康信息数量不足, 返回`STATUS_NOT_ENOUGH`
sorted_gotten_series = sorted(
((k, v) for k, v in series.items() if v is not NodeHealthy.FAIL_TO_GET_STATUS),
key=lambda item: item[0],
reverse=True,
)
if len(sorted_gotten_series) < range_cnt * 2 + 1:
return NodeHealthy.STATUS_NOT_ENOUGH, sorted_series[0][1]
# 处理故障节点恢复策略
# [Logic]当前状态不健康情况下,如果pending_queries数量过高,继续保持`NOT_HEALTHY`
if state_now is NodeHealthy.NOT_HEALTHY:
if (
sorted_gotten_series[0][1]['dgraph_pending_queries_total']
> self.normal_conf.BACKEND_NODE_RECOVERY_PENDING_QUERIES_CNT
):
return NodeHealthy.NOT_HEALTHY, sorted_gotten_series[0][1]
# 故障发生
# [Logic]获取当前时段和前一时段的请求量均值,如果backend处理的请求平均请求量下降70%以上,同事pending_query仍然维持1000以上,
# 则认定发生故障, 返回`NOT_HEALTHY`
concurrent_query_num_series = [
s['dgraph_num_queries_total'] - sorted_gotten_series[n + 1][1]['dgraph_num_queries_total']
for n, (t, s) in enumerate(sorted_gotten_series)
if n < len(sorted_gotten_series) - 1
]
average_concurrent_nums_recently = np.average(concurrent_query_num_series[range_cnt : range_cnt * 2])
average_concurrent_nums_now = np.average(concurrent_query_num_series[0:range_cnt])
self.logger.info(
{
'recently': average_concurrent_nums_recently,
'now': average_concurrent_nums_now,
'backend_type': 'dgraph',
'metric_type': 'backend_concurrent_queries',
'node': node_url,
},
extra={'metric': True},
)
if average_concurrent_nums_recently:
current_query_num_dropped = (
average_concurrent_nums_recently - average_concurrent_nums_now
) / average_concurrent_nums_recently > self.normal_conf.BACKEND_QUERY_DROP_PERCENT
else:
current_query_num_dropped = True
if (
current_query_num_dropped
and sorted_gotten_series[0][1]['dgraph_pending_queries_total']
>= self.normal_conf.BACKEND_NODE_PENDING_QUERIES_CNT
):
return NodeHealthy.NOT_HEALTHY, sorted_gotten_series[0][1]
# 节点正常,返回`HEALTHY`
return NodeHealthy.HEALTHY, sorted_gotten_series[0][1]
class ServiceLoadChecker(StateManager):
"""
metadata服务负载检测
"""
def __init__(self, *args, **kwargs):
super(ServiceLoadChecker, self).__init__(*args, **kwargs)
self.config_collection = rt_context.config_collection
self.normal_conf = self.config_collection.normal_config
self.db_conf = self.config_collection.db_config
self.logger = logging.getLogger(__name__)
self.summarize_method = self.normal_conf.SUMMARIZE_METHOD
def statistic(self, stat_win=5, delay=2):
"""
根据策略检查服务器负载状态
:param stat_win: 时间窗口(默认5s)
:param delay: 时间窗口延时(默认2s)
:return: tuple (boolean{是否触发过滤策略}, dict{负载信息})
"""
now = datetime.now().replace(microsecond=0)
load_info = dict(info={}, rules={})
try:
nearest_summaries = self.get_nearest_summaries(now, stat_win, delay)
nearest_metric = self.get_nearest_metric(nearest_summaries, stat_win)
rules = self.get_filter_rules(nearest_metric)
load_info['info'] = nearest_metric
load_info['rules'] = rules
except Exception as e:
self.logger.error('get load info failed, detail: {}'.format(e))
return False, None
filter_flag = False if not rules else True
return filter_flag, load_info
def get_nearest_summaries(self, now, stat_win=5, delay=2):
"""
取最近的数据进行汇总计算
:param now: 当前时间
:param stat_win: 统计窗口,目前支持最大10s(s)
:param delay: 窗口延时(s)
:return: dict 统计详情
"""
now = now - timedelta(seconds=delay)
stat_win = 10 if stat_win > 10 else stat_win
min_prefix_set = set()
sec_range_dict = dict()
cmd_list = []
cmd_proto = (
"grep \"{grep_pattern}\" {log_path} | grep '\"msg\": \"Executed' | grep '\"level\": \"INFO\"' | "
"perl -ne 'print \"$1\\t$2\\t$3\\t$4\\t$5\\t$6\\n\" "
"if /\"thread_name\": \"([^\"]+)\".+\"timestamp\": \"({perl_pattern}).+"
"\"process_id\": \"([\\d]+)\".+Executed RPC Call ([\\S]+) in ([\\.0-9]+) with r_state ([^,]+)/'"
)
rpc_server_log_path = self.logging_conf.file_path.replace(
self.logging_conf.file_name, 'metadata_access_rpc_server.common.log'
)
for i in range(stat_win):
stat_time = now - timedelta(seconds=i)
min_prefix = stat_time.strftime('%Y-%m-%dT%H:%M')
sec_range = stat_time.strftime('%S')
min_prefix_set.add(min_prefix)
if min_prefix not in sec_range_dict:
sec_range_dict[min_prefix] = dict(sec_from=sec_range, sec_to=sec_range)
else:
sec_range_dict[min_prefix]['sec_from'] = sec_range
for min_prefix in sorted(min_prefix_set):
sec_from = sec_range_dict[min_prefix]['sec_from']
sec_to = sec_range_dict[min_prefix]['sec_to']
from_tens, from_unit = sec_from[0:1], sec_from[1:2]
to_tens, to_unit = sec_to[0:1], sec_to[1:2]
if from_tens == to_tens:
tens_str = to_tens
unit_str = to_unit if from_unit == to_unit else '[{}-{}]'.format(from_unit, to_unit)
cmd_list.append(
cmd_proto.format(
grep_pattern=min_prefix,
perl_pattern='{}:{}{}'.format(min_prefix, tens_str, unit_str),
log_path=rpc_server_log_path,
)
)
else:
tens_str = from_tens
unit_str = from_unit if from_unit == '9' else '[{}-{}]'.format(from_unit, 9)
cmd_list.append(
cmd_proto.format(
grep_pattern=min_prefix,
perl_pattern='{}:{}{}'.format(min_prefix, tens_str, unit_str),
log_path=rpc_server_log_path,
)
)
if int(sec_to) > int(sec_from):
tens_str = to_tens
unit_str = to_unit if to_unit == '0' else '[{}-{}]'.format(0, to_unit)
cmd_list.append(
cmd_proto.format(
grep_pattern=min_prefix,
perl_pattern='{}:{}{}'.format(min_prefix, tens_str, unit_str),
log_path=rpc_server_log_path,
)
)
nearest_summaries = dict()
for cmd in cmd_list:
status, capture = subprocess.getstatusoutput(cmd)
if int(status) == 0:
capture_lines = capture.split("\n")
for line in capture_lines:
if not line:
continue
c_name, tm_str, p_id, method_name, cost, r_state = line.split("\t")
query_type = None
for query_type_name, query_item_list in list(self.summarize_method.items()):
if method_name in query_item_list:
query_type = query_type_name
break
if not query_type:
continue
if query_type not in nearest_summaries:
nearest_summaries[query_type] = dict()
if r_state not in nearest_summaries[query_type]:
nearest_summaries[query_type][r_state] = dict(cnt=0, dur_ms=0)
nearest_summaries[query_type][r_state]['cnt'] += 1
nearest_summaries[query_type][r_state]['dur_ms'] += int(float(cost) * 1000)
return nearest_summaries
@staticmethod
def get_nearest_metric(nearest_summaries, stat_win=5):
"""
获取指标信息
:param nearest_summaries: 统计详情
:param stat_win: 统计窗口
:return: dict 指标字典
"""
metric_info = dict()
for query_type, item in list(nearest_summaries.items()):
if not metric_info.get(query_type, {}):
metric_info[query_type] = dict()
metric_info[query_type]['succ_cnt'] = int(item.get('success', {}).get('cnt', 0))
metric_info[query_type]['fail_cnt'] = int(item.get('fail', {}).get('cnt', 0))
metric_info[query_type]['tot_cnt'] = int(
metric_info[query_type]['succ_cnt'] + metric_info[query_type]['fail_cnt']
)
metric_info[query_type]['succ_qps'] = int(metric_info[query_type]['succ_cnt'] / stat_win)
metric_info[query_type]['fail_qps'] = int(metric_info[query_type]['fail_cnt'] / stat_win)
# 指标除数默认+1,防止非法除法运算
metric_info[query_type]['succ_dur_ms'] = round(
float(item.get('success', {}).get('dur_ms', 0) / (metric_info[query_type]['succ_cnt'] + 1)), 2
)
metric_info[query_type]['fail_dur_ms'] = round(
float(item.get('fail', {}).get('dur_ms', 0) / (metric_info[query_type]['succ_cnt'] + 1)), 2
)
metric_info[query_type]['fail_rate'] = 100 * round(
float(
metric_info[query_type]['fail_cnt']
/ (metric_info[query_type]['succ_cnt'] + metric_info[query_type]['fail_cnt'] + 1)
),
2,
)
return metric_info
def get_filter_rules(self, metric_info):
"""
获取服务负载情况信息
:param metric_info: 指标信息
:return: 过滤策略
"""
warning_conf = self.normal_conf.FILTER_METRIC_WARNING_CONF
rules = dict()
for query_type in list(warning_conf.keys()):
metric_item = metric_info.get(query_type, {})
if metric_item and metric_item.get('tot_cnt', 0) > warning_conf[query_type]['enable_cnt']:
# 失败率检查
if (
metric_item.get('fail_cnt', 0) >= warning_conf[query_type]['fail_cnt']
and metric_item.get('fail_rate', 0) >= warning_conf[query_type]['fail_rate']
):
filter_rate = round(metric_item['fail_rate'] * warning_conf[query_type]['rate_factor'], 2)
filter_rate = (
filter_rate
if filter_rate < warning_conf[query_type]['filter_limit']
else warning_conf[query_type]['filter_limit']
)
if query_type not in rules:
rules[query_type] = {}
rules[query_type]['fail_rate_check'] = {
'rate': filter_rate,
'metric': {'fail_rate': metric_item['fail_rate'], 'fail_cnt': metric_item['fail_cnt']},
}
# 平均耗时
if metric_item and metric_item.get('succ_dur_ms', 0) > warning_conf[query_type]['cost_time_ms']:
filter_rate = round(
(metric_item['succ_dur_ms'] - warning_conf[query_type]['cost_time_ms'])
/ metric_item['succ_dur_ms'],
2,
)
filter_rate = (
filter_rate
if filter_rate < warning_conf[query_type]['filter_limit']
else warning_conf[query_type]['filter_limit']
)
if query_type not in rules:
rules[query_type] = {}
rules[query_type]['avg_dur_check'] = {
'rate': filter_rate,
'metric': {'succ_dur_ms': metric_item['succ_dur_ms']},
}
return rules
def maintain(self, load_info=None):
"""
记录负载信息到zk
:param load_info: 负载信息
:return: None
"""
if isinstance(load_info, dict):
self.state['service_load_info'] = load_info
self.logger.info('service load info is changed, detail:{}'.format(load_info))
class ServiceSwitch(StateManager):
"""
服务内容开关管理
管理功能范围: shunt-分流; filter-限流; kafka-数据同步
"""
def switch(self, service_name, sub_switch_name, switch_value=False):
valid_keys = list(self.normal_conf.SERVICE_SWITCHES.keys())
if service_name not in valid_keys:
return False
valid_sub_keys = list(self.normal_conf.SERVICE_SWITCHES[service_name].keys())
if sub_switch_name not in valid_sub_keys:
return False
service_switches = self.state['service_switches']
if service_name not in service_switches:
service_switches[service_name] = dict()
service_switches[service_name][sub_switch_name] = switch_value
self.state['service_switches'] = service_switches
self.logger.info('service switch status is turned. The status now is {}'.format(self.state['service_switches']))
return True
|
from core.base_page import BasePage
class SwaggerPage(BasePage):
"""
SwaggerPage logic
"""
templates_dir = "core/sys_modules/swagger/"
template = "Swagger_template.html"
group_access = ['ADMIN', 'DEVELOPER']
def get_data(self, req):
"""
Set up the data source
Args:
req: object
Returns:
"""
data = {
"title": "PyStack framework",
"text": "Welcome to PyStack!"
}
return data
|
# coding=utf-8
from django.contrib.sitemaps import Sitemap
from blog.models import Entry
class BlogSitemap(Sitemap):
changefreq = "daily"
priority = 0.5
def items(self):
return Entry.objects.filter(public=True)
def lastmod(self, obj):
return obj.modified
|
# -*- coding: utf-8 -*-
# 09 April 2020
# Nicolò Stevanato - Politecnico di Milano - Fondazione Eni Enrico Mattei
#%% Definition of the inputs
from core import User, np
User_list = []
'''
This File contains the average appliances that charcterize a Primary School in Kenya, according to PoliMi field campaign - 2020
'''
#Create new user classes
School = User("school",10)
User_list.append(School)
#Create new appliances
S_indoor_bulb = School.Appliance(School,90,12,2,360,0.2,120,wd_we_type = 0)
S_indoor_bulb.windows([420,600],[840,1020],0.2)
S_indoor_tubes = School.Appliance(School,130,30,2,360,0.2,120,wd_we_type = 0)
S_indoor_tubes.windows([420,600],[840,1020],0.2)
S_outdoor_bulb = School.Appliance(School,10,40,2,720,0,720, flat = 'yes')
S_outdoor_bulb.windows([0,360],[1080,1440],0)
S_Phone_charger = School.Appliance(School,14,7,2,300,0.2,60,wd_we_type = 0)
S_Phone_charger.windows([480,720],[840,1020],0.35)
S_PC = School.Appliance(School,50,100,2,180,0.2,120, occasional_use = 0.7,wd_we_type = 0)
S_PC.windows([600,720],[840,1020],0.35)
S_PC = School.Appliance(School,1,100,1,540,0.2,360,wd_we_type = 0)
S_PC.windows([480,1020],[0,0],0.2)
S_laptop = School.Appliance(School,10,65,1,240,0.2,120,wd_we_type = 0)
S_laptop.windows([480,990],[0,0],0.2)
S_printer = School.Appliance(School,1,40,2,30,0.2,1,wd_we_type = 0)
S_printer.windows([600,720],[840,1020],0.35)
S_photocopy = School.Appliance(School,1,400,2,30,0.2,1,wd_we_type = 0)
S_photocopy.windows([600,720],[840,1020],0.35)
S_projector = School.Appliance(School,6,250,2,120,0.2,60, occasional_use = 0.7,wd_we_type = 0)
S_projector.windows([600,720],[840,1020],0.35)
S_tablets = School.Appliance(School,150,10,2,120,0.2,60, occasional_use = 0.7,wd_we_type = 0)
S_tablets.windows([600,720],[840,1020],0.35)
S_TV = School.Appliance(School,2,60,2,120,0.2,30, occasional_use = 0.7,wd_we_type = 0)
S_TV.windows([600,720],[840,1020],0.35)
S_radio = School.Appliance(School,4,5,1,90,0.2,60,wd_we_type = 0)
S_radio.windows([720,840],[0,0],0.35)
S_router = School.Appliance(School,2,6,1,1440,0,1440, flat = 'yes')
S_router.windows([0,1440],[0,0],0)
S_pump = School.Appliance(School,1,750,1,120,0.1,30,wd_we_type = 0)
S_pump.windows([360,480],[0,0],0.2)
# S_heater = School.Appliance(School,2,1000,1,30,0.3,10, thermal_P_var = 0.3, wd_we_type = 0)
# S_heater.windows([480,600],[0,0],0.35)
|
import os
import torch
import numpy as np
from torch.utils.data import Dataset
from glob import glob
from copy import deepcopy
import data_util
import matplotlib.pyplot as plt
class NovelViewTriplets():
def __init__(self,
root_dir,
img_size,
sampling_pattern):
super().__init__()
self.img_size = img_size
images_dir = [os.path.join(root_dir, o) for o in os.listdir(root_dir)
if os.path.isdir(os.path.join(root_dir,o))]
self.color_dir = {}
self.pose_dir = {}
self.all_color = {}
self.all_poses = {}
self.all_views = {}
self.nn_idcs = {}
for image_dir in images_dir:
self.color_dir[image_dir] = os.path.join(image_dir, 'rgb')
self.pose_dir[image_dir] = os.path.join(image_dir, 'pose')
if not os.path.isdir(self.color_dir[image_dir]):
print("Error! root dir is wrong")
return
self.all_color[image_dir] = sorted(data_util.glob_imgs(self.color_dir[image_dir]))
self.all_poses[image_dir] = sorted(glob(os.path.join(self.pose_dir[image_dir], '*.txt')))
# Subsample the trajectory for training / test set split as well as the result matrix
file_lists = [self.all_color[image_dir], self.all_poses[image_dir]]
if sampling_pattern != 'all':
if sampling_pattern.split('_')[0] == 'skip':
skip_val = int(sampling_pattern.split('_')[-1])
for i in range(len(file_lists)):
dummy_list = deepcopy(file_lists[i])
file_lists[i].clear()
file_lists[i].extend(dummy_list[::skip_val + 1])
else:
print("Unknown sampling pattern!")
return None
# Buffer files
print("Buffering files...")
for image_dir in images_dir:
self.all_views[image_dir] = []
for i in range(len(self.all_color[image_dir])):
if not i % 10:
print(i)
self.all_views[image_dir].append(self.read_view_tuple(image_dir, i))
# Calculate the ranking of nearest neigbors
# print(len(data_util.get_nn_ranking([data_util.load_pose(pose) for pose in self.all_poses[image_dir]])[0]))
self.nn_idcs[image_dir], _ = data_util.get_nn_ranking([data_util.load_pose(pose) for pose in self.all_poses[image_dir]])
print("*" * 100)
print("Sampling pattern ", sampling_pattern)
print("Image size ", self.img_size)
print("*" * 100)
sizes = []
self.images_dir = images_dir
curr = 0
for image_dir in images_dir:
curr += len(self.all_color[image_dir])
sizes.append(curr)
self.sizes = np.array(sizes)
def load_rgb(self, path):
img = data_util.load_img(path, square_crop=True, downsampling_order=1, target_size=self.img_size)
img = img[:, :, :3].astype(np.float32) / 255. - 0.5
img = img.transpose(2,0,1)
return img
def read_view_tuple(self, image_dir, idx):
gt_rgb = self.load_rgb(self.all_color[image_dir][idx])
pose = data_util.load_pose(self.all_poses[image_dir][idx])
this_view = {'gt_rgb': torch.from_numpy(gt_rgb),
'pose': torch.from_numpy(pose)}
return this_view
def idx2imgdir(self, idx):
img_dir_idx = np.argmax(self.sizes>idx)
if img_dir_idx == 0:
return self.images_dir[img_dir_idx], idx
else:
return self.images_dir[img_dir_idx], idx - self.sizes[img_dir_idx-1]
def __len__(self):
a = [len(self.all_color[i]) for i in self.all_color]
return sum(a)
def __getitem__(self, idx):
trgt_views = []
# Read one target pose and its nearest neighbor
image_dir, iidx = self.idx2imgdir(idx)
trgt_views.append(self.all_views[image_dir][iidx])
nearest_view = self.all_views[image_dir][self.nn_idcs[image_dir][iidx][-np.random.randint(low=1, high=5)]]
# The second target pose is a random one
trgt_views.append(self.all_views[image_dir][np.random.choice(len(self.all_views[image_dir]))])
return trgt_views, nearest_view, image_dir
class TestDataset():
def __init__(self,
pose_dir):
super().__init__()
all_pose_paths = sorted(glob(os.path.join(pose_dir, '*.txt')))
self.all_poses = [torch.from_numpy(data_util.load_pose(path)) for path in all_pose_paths]
def __len__(self):
return len(self.all_poses)
def __getitem__(self, idx):
return self.all_poses[idx]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from ...sync import sync_plans
class Command(BaseCommand):
help = "Make sure your Stripe account has the plans"
def handle(self, *args, **options):
sync_plans()
|
class Solution:
def isPalindrome(self, x: int) -> bool:
arr = str(x)
cnt = len(arr)
for i in range(0,cnt):
if arr[i] != arr[cnt-1-i]:
return 0
return 1
|
from Topsis_Harsimran_101903288.topsis import rank
__version__='v1.2'
|
# -*- coding: utf-8 -*-
"""
All database interactions are performed by Manager
"""
import logging
import datetime
import os
from sqlalchemy.orm import joinedload
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import or_
from sqlalchemy import orm, func, over, Integer
from sqlalchemy.dialects.postgres import ARRAY
from newrelic.agent import BackgroundTask
from rtrss.scraper import Scraper
from rtrss.models import Topic, User, Category, Torrent
from rtrss.exceptions import (TopicException, OperationInterruptedException,
CaptchaRequiredException, TorrentFileException,
ItemProcessingFailedException,
DownloadLimitException)
from rtrss.database import session_scope
from rtrss import util, storage
from rtrss.caching import DiskCache
from rtrss.stats import get_stats
# Minimum and maximum number of torrents to store, per category
KEEP_TORRENTS_MIN = 25
KEEP_TORRENTS_MAX = 75
_logger = logging.getLogger(__name__)
class Manager(object):
def __init__(self, config):
self._storage = None
self.config = config
self.changed_categories = set()
@property
def storage(self):
if self._storage:
return self._storage
else:
self._storage = storage.make_storage(
self.config.FILESTORAGE_SETTINGS,
self.config.DATA_DIR
)
return self._storage
def run_task(self, task_name, *args, **kwargs):
"""Run task, catching all exceptions"""
app = util.get_newreilc_app('worker', 10.0)
if app:
with BackgroundTask(app, name=task_name, group='Task'):
return self.task_wrapper(task_name, *args, **kwargs)
else:
return self.task_wrapper(task_name, *args, **kwargs)
def task_wrapper(self, task_name, *args, **kwargs):
try:
getattr(self, task_name)(*args, **kwargs)
except OperationInterruptedException as e:
_logger.warn("Operation interrupted: {}".format(str(e)))
def update(self):
_logger.debug('Starting update')
torrents_changed = 0
for item in self.make_pending_list():
try:
torrents_changed += self.process_pending_topic(item)
except (TopicException, TorrentFileException) as e:
_logger.debug('Failed to proces topic: %s', e)
_logger.info('%d torrents added/updated', torrents_changed)
self.invalidate_cache()
def cleanup(self):
to_delete = list()
with session_scope() as db:
twc = (
db.query(Topic.category_id, Topic.updated_at, Torrent.id)
.join(Torrent, Torrent.id == Topic.id)
.subquery(name='twc')
)
s2 = (
db.query(
twc.c.category_id,
twc.c.updated_at,
over(
func.rank().label('rnk'),
partition_by=twc.c.category_id,
order_by=twc.c.updated_at.desc()
).label('rank')
)
.subquery(name='s2')
)
t1 = orm.aliased(Topic, name='t1')
query = (
db.query(
s2.c.category_id,
func.array_agg(t1.id, type_=ARRAY(Integer)).label('ids')
)
.join(t1, t1.category_id == s2.c.category_id)
.filter(t1.updated_at < s2.c.updated_at)
.group_by(s2.c.rank, s2.c.category_id)
.having(s2.c.rank == 25)
)
for (cat_id, topic_ids) in query.all():
self.changed_categories.add(cat_id)
to_delete.extend(topic_ids)
if to_delete:
db.query(Torrent).filter(Torrent.id.in_(to_delete)) \
.delete(synchronize_session=False)
db.expire_all()
db.query(Topic).filter(Topic.id.in_(to_delete)) \
.delete(synchronize_session=False)
if to_delete:
keys = ['{}.torrent'.format(tid) for tid in to_delete]
self.storage.bulk_delete(keys)
message = 'Cleanup removed {} torrents from {} categories'.format(
len(to_delete), len(self.changed_categories))
_logger.info(message)
self.invalidate_cache()
def daily_reset(self):
"""Reset user download counters"""
with session_scope() as db:
stats = get_stats(db)
db.query(User).update({User.downloads_today: 0})
stats_values = ["{}={}".format(k, v) for k, v in stats.items()]
_logger.info('stats {}'.format(' '.join(stats_values)))
def make_pending_list(self):
"""
Returns list of topics to process
"""
scraper = Scraper(self.config)
latest = scraper.get_latest_topics()
existing = load_topics(latest.keys())
existing_ids = existing.keys()
pending = list()
for tid, topic in latest.items():
if tid in existing_ids and not topic['changed']:
continue
topic['id'] = tid
topic['new'] = not topic['changed']
topic['old_infohash'] = existing.get(tid)
pending.append(topic)
return pending
def process_pending_topic(self, item):
"""Process new or updated torrent/topic. Returns 1 if torrent was added
or updated, 0 otherwise
:returns int
"""
tid = item['id']
user = select_user()
scraper = Scraper(self.config)
parsed = scraper.get_topic(tid, user)
with session_scope() as db:
db.add(user)
title = item['title']
is_new_topic = item['new']
updated_at = item['updated_at']
categories = parsed['categories']
infohash = parsed['infohash']
old_infohash = item.get('old_infohash')
category_id = self.ensure_category(categories.pop(), categories)
# Save topic only if it is new or infohash changed (but not removed)
if is_new_topic or (infohash and infohash != old_infohash):
save_topic(tid, category_id, updated_at, title)
# do not save torrent if no infohash
if not infohash:
return 0
# Torrent new or changed
if is_new_topic or old_infohash != infohash:
self.process_torrent(tid, infohash, old_infohash)
return 1
return 0
def ensure_category(self, c_dict, parents):
"""
Check if category exists, create if not. Create all parent
categories if needed. Returns category id
"""
category = find_category(c_dict['tracker_id'], c_dict['is_subforum'])
if category:
return category.id
if parents:
p_dict = parents.pop()
parent = find_category(p_dict['tracker_id'], p_dict['is_subforum'])
if parent:
parent_id = parent.id
else:
parent_id = self.ensure_category(p_dict, parents)
elif c_dict['tracker_id'] == 0:
parent_id = None
else:
msg = u'No parent category for {}({})'.format(c_dict['tracker_id'],
c_dict['title'])
_logger.warn(msg)
# Skip topic if we can't add its category
raise TopicException(msg)
category = Category(
tracker_id=c_dict['tracker_id'],
is_subforum=c_dict['is_subforum'],
title=c_dict['title'],
parent_id=parent_id,
)
with session_scope() as db:
db.add(category)
category = find_category(c_dict['tracker_id'], c_dict['is_subforum'])
_logger.info('Added category %s (%d)', category.title, category.id)
cache = DiskCache(os.path.join(self.config.DATA_DIR, 'cache'))
cache_key = 'category_tree.json'
del cache[cache_key]
return category.id
def process_torrent(self, tid, infohash, old_infohash=None):
scraper = Scraper(self.config)
user = select_user()
torrent_dict = None
retry_count = 0
while torrent_dict is None and retry_count < 3:
try:
# This call can raise TopicException, CaptchaRequiredException
# or TorrentFileException
torrent_dict = scraper.get_torrent(tid, user)
except CaptchaRequiredException:
# Retry with different user
user = select_user()
except DownloadLimitException: # User reached download limit
user.downloads_today = user.downloads_limit
with session_scope() as db:
db.add(user)
user.downloads_today += 1
with session_scope() as db:
db.add(user)
torrentfile = torrent_dict['torrentfile']
real_infohash = torrent_dict['infohash']
download_size = torrent_dict['download_size']
if infohash.lower() != real_infohash.lower():
msg = 'Torrent {} hash mismatch: {}/{}'.format(tid, infohash,
real_infohash)
_logger.error(msg)
raise TopicException(msg)
with session_scope() as db:
torrent = (
db.query(Torrent)
.filter(Torrent.infohash == infohash)
.first()
)
if torrent:
msg = 'Torrent with infohash {} already exists: {}'.format(
infohash, torrent)
_logger.error(msg)
raise TopicException(msg)
torrent = Torrent(
id=tid,
infohash=infohash,
size=download_size,
tfsize=len(torrentfile)
)
with session_scope() as db:
db.merge(torrent)
filename = '{}.torrent'.format(tid)
if old_infohash:
self.storage.delete(filename)
self.storage.put(
filename,
torrentfile,
mimetype='application/x-bittorrent'
)
def invalidate_cache(self):
"""Invalidates cache for all changed categories. Should be called after
all operations that may add, change or delete topics/torrents"""
for category_id in self.changed_categories:
pass # TODO implement this
def sync_categories(self):
"""Import all existing tracker categories into DB"""
_logger.info('Syncing tracker categories')
user = select_user(False)
scraper = Scraper(self.config)
with session_scope() as db:
old = db.query(func.count(Category.id)).scalar()
root = db.query(Category).get(0)
if not root: # Create root category
root = Category(
id=0,
title=u'Все разделы',
parent_id=None,
tracker_id=0,
is_subforum=False,
)
db.add(root)
for forum_id in scraper.get_forum_ids(user):
category = find_category(forum_id, True)
if category:
continue
try:
cat_list = scraper.get_forum_categories(forum_id, user)
except ItemProcessingFailedException as e:
_logger.error("Failed to import category: {}".format(e))
if not cat_list:
_logger.warn('Unable to get category list for %d', forum_id)
continue
self.ensure_category(cat_list.pop(), cat_list)
new = db.query(func.count(Category.id)).scalar()
_logger.info("Category sync completed, %d old, %d added", old,
new - old)
def populate_categories(self, count=1, total=None):
"""
Add count (or less) torrents to every category with less then count
torrents, no more than total torrents
"""
with session_scope() as db:
query = (
db.query(Category, func.count(Torrent.id))
.outerjoin(Topic)
.outerjoin(Torrent)
.filter(Category.is_subforum)
.group_by(Category.id)
.having(func.count(Torrent.id) < count)
.order_by(Category.id)
)
categories = query.all()
db.expunge_all()
if total is None:
if count == 1:
total = len(categories)
else:
raise ValueError('total parameter is required if count > 1')
message = (
'Found {} categories with <{} torrents, '
'going to download up to {} torrents'
.format(len(categories), count, total)
)
_logger.info(message)
if not len(categories):
return
total_added = 0
for cat, num_torrents in categories:
# If this category has some torrents - only add missing amount
to_add = count - num_torrents
# Do not add more than total
if total_added + to_add > total:
to_add = total - total_added
added = self.populate_category(cat.tracker_id, to_add)
total_added += added
_logger.debug('Added %d torrents to %s ', added, cat.title)
if total_added >= total:
break
_logger.info('Populate task added %d torrents', total_added)
def populate_category(self, forum_id, count):
"""
Add count torrents from subforum forum_id
:returns int Number of torrents added
"""
scraper = Scraper(self.config)
user = select_user()
try:
torrents = scraper.find_torrents(user, forum_id)
except ItemProcessingFailedException as e:
msg = "Failed to populate category {}: {}".format(forum_id, e)
_logger.error(msg)
torrents = []
with session_scope() as db:
db.add(user)
if not torrents:
_logger.debug('No torrents found in category %d', forum_id)
return 0
added = 0
for tdict in torrents:
with session_scope() as db:
exists = db.query(Topic).get(tdict['id'])
# Skip torrents that are already in database
if exists:
continue
tdict['new'] = True
try:
added += self.process_pending_topic(tdict)
except TopicException:
_logger.debug('Failed to add topic %d', tdict['id'])
if added == count:
break
return added
def daily_populate_task(self):
dlslots = estimate_free_download_slots()
# _logger.info("Daily populate going to download %d torrents", dlslots)
self.populate_categories(KEEP_TORRENTS_MIN, dlslots)
def select_user(with_dlslots=True):
"""
Select one random user, if can_download is true then user must have
download slots available
:returns User
"""
with session_scope() as db:
try:
query = db.query(User).filter(User.enabled.is_(True))
if with_dlslots:
query = query.filter(or_(
User.downloads_limit > User.downloads_today,
User.downloads_limit.is_(None)
))
user = query.order_by(func.random()).limit(1).one()
except NoResultFound:
raise OperationInterruptedException('No suitable users found')
else:
db.expunge(user)
return user
def load_topics(ids):
"""
Loads existing topics from database
:returns dict(topic_id: infohash)
"""
topics = dict()
with session_scope() as db:
existing = (
db.query(Topic)
.options(joinedload(Topic.torrent))
.filter(Topic.id.in_(ids))
.all()
)
for t in existing:
topics[t.id] = t.torrent.infohash if t.torrent else None
return topics
def save_topic(tid, category_id, updated_at, title):
"""
Insert or update topic.
"""
topic = Topic(
id=tid,
category_id=category_id,
title=title,
updated_at=updated_at
)
_logger.debug('Saving topic %s', topic)
with session_scope() as db:
topic = db.merge(topic)
db.commit()
def find_category(tracker_id, is_subforum):
with session_scope() as db:
category = (
db.query(Category).filter(
Category.tracker_id == tracker_id,
Category.is_subforum == is_subforum
).first()
)
db.expunge_all()
return category
def estimate_free_download_slots(days=7):
"""Calculates estimated download slots available based on number
of torrents, downloaded each day during past week
"""
today = datetime.datetime.utcnow().date()
week = (datetime.datetime.utcnow() - datetime.timedelta(days)).date()
with session_scope() as db:
num_downloads = (
db.query(func.count(Torrent.id))
.join(Topic)
.filter(func.date(Topic.updated_at) >= week)
.filter(func.date(Topic.updated_at) < today)
.scalar()
)
daily_slots = (
db.query(func.sum(User.downloads_limit))
.filter(User.enabled)
.scalar()
)
slots_left_today = (
db.query(func.sum(User.downloads_limit - User.downloads_today))
.filter(User.enabled)
.scalar()
)
estimate = daily_slots - (num_downloads / days)
if estimate > slots_left_today:
estimate = slots_left_today
if estimate > 1000:
estimate = 1000
return int(estimate * 0.9)
|
from ._CarlaActorInfo import *
from ._CarlaActorList import *
from ._CarlaCollisionEvent import *
from ._CarlaControl import *
from ._CarlaEgoVehicleControl import *
from ._CarlaEgoVehicleInfo import *
from ._CarlaEgoVehicleInfoWheel import *
from ._CarlaEgoVehicleStatus import *
from ._CarlaLaneInvasionEvent import *
from ._CarlaMapInfo import *
from ._CarlaStatus import *
from ._CarlaWalkerControl import *
from ._CarlaWorldInfo import *
|
import unittest
from enum import Enum
from typing import List
class State(Enum):
DEATH = 0
LIVE = 1
class Transition(Enum):
DEATH_TO_DEATH = -1
DEATH_TO_LIVE = 0
LIVE_TO_DEATH = 1
LIVE_TO_LIVE = 2
class Solution:
@staticmethod
def gameOfLife(board: List[List[int]]) -> None:
"""
According to Wikipedia's article: "The Game of Life, also known simply as Life, is a
cellular automaton devised by the British mathematician John Horton Conway in 1970."
The board is made up of an m x n grid of cells, where each cell has an initial state: live
(represented by a 1) or dead (represented by a 0). Each cell interacts with its eight
neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the
above Wikipedia article):
Any live cell with fewer than two live neighbors dies as if caused by under-population.
Any live cell with two or three live neighbors lives on to the next generation.
Any live cell with more than three live neighbors dies, as if by over-population.
Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
The next state is created by applying the above rules simultaneously to every cell in the
current state, where births and deaths occur simultaneously. Given the current state of the
m x n grid board, return the next state.
"""
row_len, column_len = len(board), len(board[0])
for row_index in range(row_len):
for column_index in range(column_len):
live_cell = 0
for i in range(-1, 2):
for j in range(-1, 2):
if i == 0 and j == 0:
continue
tr, tl = row_index + i, column_index + j
if 0 <= tr < row_len and 0 <= tl < column_len:
if board[tr][tl] >= Transition.LIVE_TO_DEATH.value:
live_cell += 1
current_cell = board[row_index][column_index]
if current_cell == State.DEATH.value:
if live_cell == 3:
board[row_index][column_index] = Transition.DEATH_TO_LIVE.value
else:
board[row_index][column_index] = Transition.DEATH_TO_DEATH.value
else:
if live_cell <= 1:
board[row_index][column_index] = Transition.LIVE_TO_DEATH.value
elif live_cell <= 3:
board[row_index][column_index] = Transition.LIVE_TO_LIVE.value
else:
board[row_index][column_index] = Transition.LIVE_TO_DEATH.value
for row_index in range(row_len):
for column_index in range(column_len):
transition = board[row_index][column_index]
if transition in (Transition.DEATH_TO_DEATH.value, Transition.LIVE_TO_DEATH.value):
board[row_index][column_index] = State.DEATH.value
else:
board[row_index][column_index] = State.LIVE.value
class GameOfLife(unittest.TestCase):
def test_case_1(self):
board = [[0, 1, 0], [0, 0, 1], [1, 1, 1], [0, 0, 0]]
Solution.gameOfLife(board)
expected = [[0, 0, 0], [1, 0, 1], [0, 1, 1], [0, 1, 0]]
self.assertListEqual(board, expected)
def test_case_2(self):
board = [[1, 1], [1, 0]]
Solution.gameOfLife(board)
expected = [[1, 1], [1, 1]]
self.assertListEqual(board, expected)
|
import re
import json
import copy
import mimetypes
import collections
from ashiba import utils
from lxml import etree
class Dom(collections.defaultdict):
def __init__(self, *args, **kwargs):
super(Dom, self).__init__(lambda: DomElement(''), *args, **kwargs)
for k in self:
self[k] = DomElement(self[k]['_meta']['nodeName'], self[k])
# Should this be in the to_dict method?
self.init_state = copy.deepcopy(self.to_dict())
def __repr__(self):
return "Dom(%s)" % dict(self.items())
def changes(self):
cur_state = self.to_dict()
return utils.dict_diff(cur_state, self.init_state)
def to_dict(self):
return {k:v.to_dict() for k,v in self.items()}
class GenericDomElement(dict):
def __init__(self, *args, **kwargs):
super(GenericDomElement, self).__init__(*args, **kwargs)
if self.get('_meta') is None:
self['_meta'] = {}
#utils.autovivify(self['_meta'])
def __getitem__(self, key):
try:
return super(GenericDomElement, self).__getitem__(key)
except KeyError:
return None
def __repr__(self):
return "{}({})".format(self.__class__.__name__,
dict(self.items()))
def inner_html(self):
return None
def add_class(self, class_name):
if class_name not in self['_meta'].setdefault('class', []):
self['_meta']['class'].append('+' + class_name)
def remove_class(self, class_name):
self['_meta'].setdefault('class', []).append('-' + class_name)
def style(self, prop=None, val=None):
if prop is None:
return self['_meta'].get('style', {})
elif val is None:
return self['_meta'].get('style', {}).get(prop)
else:
self['_meta'].setdefault('style', {})[prop] = val
def attr(self, name=None, value=None):
if name is None or value is None:
raise TypeError("attr requires two values, name and value")
self[name] = value
@classmethod
def from_dict(cls, in_dict):
return cls(in_dict)
@classmethod
def from_json(cls, in_json):
return cls.from_dict(json.loads(in_json))
def to_dict(self):
inner_html = self.inner_html()
if inner_html:
self['_meta']['innerHTML'] = inner_html
return dict(self)
def to_json(self):
return json.dumps(self.to_dict)
_translate_node_name = {}
def DomElement(node_name, *args, **kwargs):
base_dict = dict(*args, **kwargs)
cls = None
if node_name.lower() in _translate_node_name:
cls = _translate_node_name[node_name.lower()]
else:
for class_name in base_dict.get('_meta', {}).get('class', []):
if class_name in _translate_node_name:
cls = _translate_node_name[class_name]
break
else:
cls = GenericDomElement
obj = cls(*args, **kwargs)
if node_name:
obj['_meta']['nodeName'] = node_name
return obj
def nodeName(node_name):
def decorator(cls):
_translate_node_name[node_name.lower()] = cls
return cls
return decorator
@nodeName('SELECT')
class Select(GenericDomElement):
"""
A dropdown box. Add items to the dropdown with add_item/add_items.
Recommended events to bind:
change
"""
# Superclass methods
def __init__(self, *args, **kwargs):
super(Select, self).__init__(*args, **kwargs)
# Could probably be replaced with attr later
self._list_items = []
## Use lxml etrees in the future for this stuff
#self.list_items = [(x.attrib['value'], x.text) for x in e.findall('option')]
list_item_template = '<option value=["\'](.*?)["\']>(.*?)</option>'
self._list_items = re.findall(list_item_template,
self['_meta']['innerHTML'])
def inner_html(self):
list_item_template = '<option value="{}">{}</option>'
inner_html = ""
for pair in self._list_items:
inner_html += list_item_template.format(*pair)
return inner_html
# Non-superclass methods
def add_item(self, value, text=None):
"""
Add an item to the dropdown.
Args:
value: This is the value that the select object will take on.
text : (Optional) This is the display text.
Example:
>>> s.add_item('benz', 'Mercedes Benz')
"""
if isinstance(value, (list, tuple)):
if len(value) == 2:
value, text = value
elif len(value) == 1:
value = text = value[0]
else:
raise TypeError("add_item takes at most 2 values (%i given)" \
% len(value))
elif text is None:
text = value
self._list_items.append((value, text))
def add_items(self, items):
"""
Add multiple items to a dropdown. Accepts a list of items, either
singletons or pairs.
Example:
>>> s.add_items([('benz', 'Mercedes Benz'), 'ford'])
"""
for item in items:
self.add_item(item)
def empty(self):
"""
Remove all items from the dropdown.
"""
self._list_items = []
def remove_item(self, value):
"""
Remove an item from the dropdown list by specifying its value.
This will fail silently if that item is not in the list.
"""
self._list_items = [x for x in self._list_items if x[0] != value]
def list_items(self):
return self._list_items
@nodeName('jqui-dialog')
class Dialog(GenericDomElement):
def __init__(self, *args, **kwargs):
super(Dialog, self).__init__(*args, **kwargs)
@property
def title(self):
return self.get('title', '')
@title.setter
def title(self, t):
self['title'] = t
@property
def body(self):
return self['_meta'].get('innerHTML', '')
@body.setter
def body(self, b):
self['_meta']['innerHTML'] = b
def get_tab_info(root):
"""
Returns the prefix, separator, and max index for a set of jQueryUI tabs
"""
hrefs = [y.attrib['href'] for y in
[x.find('a') for x in root.find('ul').findall('li')]
]
splits = [r.groups() for r in
[re.match('#(.*)([-_]+)(\d+)$', h) for h in hrefs]
if r]
names = {s[0] for s in splits}
seps = {s[1] for s in splits}
indices = [s[2] for s in splits]
if len(names) > 1:
raise ValueError("Tab names lack a common prefix")
elif len(names) == 0:
raise ValueError("Tab names seem to be missing")
return names.pop(), seps.pop(), max(indices)
@nodeName('jqui-tabs')
class TabSet(GenericDomElement):
def __init__(self, *args, **kwargs):
super(TabSet, self).__init__(*args, **kwargs)
root = etree.fromstring(
'<div>' + self['_meta']['innerHTML'] + '</div>')
titles = [x.findtext('a') for x in root.find('ul').findall('li')]
bodies = [x.findtext('p') for x in root.findall('div')]
self.tabs = [Tab(t, b) for t, b in zip(titles, bodies)]
self.id_prefix, self.id_sep, self.id_index = get_tab_info(root)
self.changed = False
def add_tab(self, title="", body=""):
self.tabs.append(Tab(title, body))
self.changed = True
def del_tab(self, idx):
del self.tabs[idx]
self.changed = True
def tab(self, idx):
return self.tabs[idx]
def empty(self):
self.tabs = []
self.changed = True
def inner_html(self):
if not self.changed:
return self['_meta']['innerHTML']
else:
li_template = '\t<li><a href="#{}{}{}">{}</a></li>'
li_list = [li_template.format(
self.id_prefix, self.id_sep, idx, tab.title)
for idx, tab in enumerate(self.tabs)]
ul_section = '<ul>\n{}\n</ul>'.format('\n'.join(li_list))
body_template = '<div id="{}{}{}"><p>{}</p></div>'
body_list = [body_template.format(
self.id_prefix, self.id_sep, idx, tab.body)
for idx, tab in enumerate(self.tabs)]
body_section = '\n'.join(body_list)
return ul_section + '\n' + body_section
def to_dict(self):
if self.changed:
self['_meta']['eval'] = "$(this).tabs('refresh');"
return super(TabSet, self).to_dict()
class Tab(object):
def __init__(self, title="", body=""):
self.title = title
self.body = body
def __repr__(self):
if len(self.body) > 16:
body = self.body[:16] + '...'
else:
body = self.body
return "Tab({}, {})".format(self.title.__repr__(), body.__repr__())
@nodeName('IMG')
class Image(GenericDomElement):
def set_image(self, image, tipo='', encoding=None):
if tipo == '':
self['src'] = image
if not tipo.startswith('.'):
tipo = '.' + tipo
if tipo not in mimetypes.types_map:
raise ValueError(
"Mimetype not found for extension '{}'".format(tipo))
mimetype = mimetypes.types_map[tipo]
if encoding is None:
self['src'] = "data:{},{}".format(mimetype, image)
else:
self['src'] = "data:{};{},{}".format(mimetype, encoding, image)
@nodeName('TABLE')
class DataTable(GenericDomElement):
def __init__(self, *args, **kwargs):
if 'data' in kwargs:
self.data = kwargs['data']
del kwargs['data']
else:
self.data = None
super(DataTable, self).__init__(*args, **kwargs)
def inner_html(self):
if 'dataframe' in str(type(self.data)).lower():
html = self.data.to_html(index=False)
html = re.sub('border="1"', '', html)
return re.sub('\s+', ' ', html)
else:
return ''
def to_dict(self):
"""
This is a hack here. We should be able to take care of it in init.
"""
if self.data is not None:
dt_options = {'bDestroy': True,
'bAutoWidth': True,
'bLengthChange': False,
}
self['_meta']['eval'] ="$(this).dataTable({});".format(
json.dumps(dt_options))
return super(DataTable, self).to_dict()
|
#Ejercicio GIT_DPG
#M.Carmen Delgado Muñoz y Ángela González Martín
def sumacuadrados(a,b):
return a**2+b**2
def multcuadrados(a,b):
return (a**2)*(b**2)
#Ahora vamos a añadir otra función
def sumacubos(a,b):
return a**3+b**3
|
import os
import shutil
from collections import defaultdict
from statistics import mean, stdev
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, requests, DocumentArrayMemmap
from pympler import asizeof, tracker
from .pages import Pages
from .utils.timecontext import TimeContext
NUM_REPETITIONS = 5
NUM_REQUESTS = 100
TARGET_FILE = 'searchers_compare.json'
def _get_docs(number_of_documents, embedding_size):
return [
Document(embedding=np.random.rand(embedding_size), id=str(i))
for i in range(number_of_documents)
]
def _get_dam(number_of_documents, embedding_size, dir_path, **kwargs):
tmp_path = f'{dir_path}/memmap_{number_of_documents}_{embedding_size}_tmp'
path = f'{dir_path}/memmap_{number_of_documents}_{embedding_size}'
if os.path.exists(path):
return path
da = DocumentArrayMemmap(tmp_path)
docs = _get_docs(number_of_documents, embedding_size)
da.extend(docs)
da.save()
shutil.copytree(tmp_path, path)
da.clear()
da._last_mmap = None
return path
def _get_da(number_of_documents, embedding_size, dir_path, **kwargs):
path = f'{dir_path}/docs.bin'
if os.path.exists(path):
return path
da = DocumentArray()
docs = _get_docs(number_of_documents, embedding_size)
da.extend(docs)
da.save(path, file_format='binary')
da.clear()
return path
def _get_document_array(dam_index, **kwargs):
return _get_dam(**kwargs) if dam_index else _get_da(**kwargs)
class DocumentArraySearcher(Executor):
def __init__(
self,
indexed_docs_path,
dam_index,
warmup=False,
top_k: int = 50,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.indexed_docs_path = indexed_docs_path
self._index_docs = (
DocumentArray.load(indexed_docs_path, file_format='binary')
if not dam_index
else DocumentArrayMemmap(indexed_docs_path)
)
if warmup:
self._index_docs.get_attributes('embedding')
self._top_k = top_k
@requests
def search(self, docs, **kwargs):
docs.match(
self._index_docs,
metric='cosine',
use_scipy=False,
limit=self._top_k,
)
@pytest.mark.skipif(
'JINA_BENCHMARK_SEARCHERS' not in os.environ,
reason='This test take a lot of time, to be run explicitly and isolated from the rest',
)
@pytest.mark.parametrize(
'name,indexed_docs,docs_per_request,emb_size',
[
('Tiny Index', 100, 1, 128),
('Small Index', 10000, 1, 128),
('Medium Index', 100000, 1, 128),
# ('Big Index', 1000000, 1, 128),
('Batch requesting', 100000, 32, 128),
('Big embeddings', 100000, 1, 512),
],
)
@pytest.mark.parametrize(
'dam_index,warmup', [(False, False), (True, False), (True, True)]
)
def test_search_compare(
name,
indexed_docs,
docs_per_request,
emb_size,
dam_index,
warmup,
ephemeral_tmpdir,
json_writer,
):
def _get_indexer():
path = _get_document_array(
dam_index=dam_index,
number_of_documents=indexed_docs,
embedding_size=emb_size,
dir_path=str(ephemeral_tmpdir),
)
return DocumentArraySearcher(
indexed_docs_path=path, dam_index=dam_index, warmup=warmup
)
query_docs = [
DocumentArray(_get_docs(docs_per_request, embedding_size=emb_size))
] * NUM_REQUESTS
data_points = defaultdict(list)
all_search_timings = []
def _func():
with TimeContext() as indexer_context:
indexer = _get_indexer()
print(f' indexer created/loaded in {indexer_context.duration / 1e6} ms')
data_points['index_time'].append(indexer_context.duration)
data_points['index_memory'].append(asizeof.asizeof(indexer))
tr = tracker.SummaryTracker()
sum1 = tr.create_summary()
timings = []
for i in range(NUM_REQUESTS):
with TimeContext() as seach_context:
indexer.search(query_docs[i])
timings.append(seach_context.duration)
sum2 = tr.create_summary()
diff = tr.diff(sum1, sum2)
print(f' search finished in {sum(timings) / 1e6} ms')
data_points['search_time'].append(sum(timings))
all_search_timings.extend(timings)
data_points['search_memory'].append(sum([ob_sum[2] for ob_sum in diff]))
shutil.rmtree(str(ephemeral_tmpdir), ignore_errors=True)
os.makedirs(str(ephemeral_tmpdir))
for i in range(NUM_REPETITIONS):
_func()
results = {}
for field in ['index_time', 'index_memory', 'search_time', 'search_memory']:
results[f'mean_{field}'], results[f'std_{field}'] = get_mean_and_std(
data_points[field]
)
results['p90'] = get_percentile(all_search_timings, 90)
results['p99'] = get_percentile(all_search_timings, 99)
json_writer.append_raw(
target_file=TARGET_FILE,
dict_=dict(
name=name,
page=Pages.INDEXER_COMPARISON,
iterations=NUM_REPETITIONS,
results=results,
metadata=dict(
indexed_docs=indexed_docs,
embedding_size=emb_size,
docs_per_request=docs_per_request,
num_requests=NUM_REQUESTS,
dam_index=dam_index,
warmup_embeddings=warmup,
),
),
)
def get_mean_and_std(data):
mean_ = mean(data)
std_ = stdev(data) if len(data) > 1 else None
return mean_, std_
def get_percentile(timings, percentile):
array = np.array(timings)
return np.percentile(array, percentile)
|
def book(k,i,a):
if k not in a:
a[i]=k
else:
print ("Seat Booked already")
def print_ticket(a):
print (a)
def cancel_ticket(t,a):
for i in range(0,len(a)-1):
if(a[i]==t):
del a[i]
print("ticket cancelled successfully and the ticket is",t)
def availablity(a):
for i in range(0,6):
if i not in a:
print i
def chart(a):
print ("<-------------chart prepared------------->")
for i in range(0,len(a)):
if(a[i]!=0):
print ("Customer-",i,"Seat No-",a[i])
a=[0]*5
book(2,0,a)
book(3,1,a)
#book(4,2,a)
#book(5,3,a)
#book(1,4,a)
book(2,0,a)
print_ticket(a)
#cancel_ticket(3,a)
#cancel_ticket(4,a)
#cancel_ticket(5,a)
#print_ticket(a)
chart(a)
availablity(a) |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
from docutils import nodes
class category(nodes.Element):
pass
def category_role(name, rawtext, text, lineno, inliner,
options={}, content=[]):
node = category(text)
return [node], []
def visit_category_node_html(self, node):
self.body.append('<span class="category">')
self.body.append(node.rawsource)
def depart_category_node_html(self, node):
self.body.append('</span>')
def visit_category_node_latex(self, node):
self.body.append('\n\n\\textsf{%s}' % node.rawsource)
def depart_category_node_latex(self, node):
pass
class entry(nodes.Element):
pass
def entry_role(name, rawtext, text, lineno, inliner,
options={}, content=[]):
node = entry(text)
return [node], []
def visit_entry_node_html(self, node):
self.body.append('<span class="entry">')
self.body.append(node.rawsource)
def depart_entry_node_html(self, node):
self.body.append('</span>')
def visit_entry_node_latex(self, node):
self.body.append('\n\n\\texttt{%s}' %
node.rawsource.replace('_', r'\_'))
def depart_entry_node_latex(self, node):
pass
class soft(nodes.Element):
pass
def soft_role(name, rawtext, text, lineno, inliner,
options={}, content=[]):
node = soft(text)
return [node], []
def visit_soft_node_html(self, node):
self.body.append('<span class="soft">')
self.body.append(node.rawsource)
def depart_soft_node_html(self, node):
self.body.append('</span>')
def visit_soft_node_latex(self, node):
self.body.append('\\textcolor[gray]{0.5}{%s}' % node.rawsource)
def depart_soft_node_latex(self, node):
pass
def setup(app):
app.add_node(
category,
html=(visit_category_node_html, depart_category_node_html),
latex=(visit_category_node_latex, depart_category_node_latex)
)
app.add_role(
'category', category_role)
app.add_node(
entry,
html=(visit_entry_node_html, depart_entry_node_html),
latex=(visit_entry_node_latex, depart_entry_node_latex)
)
app.add_role(
'entry', entry_role)
app.add_node(
soft,
html=(visit_soft_node_html, depart_soft_node_html),
latex=(visit_soft_node_latex, depart_soft_node_latex)
)
app.add_role(
'soft', soft_role)
|
import os
import re
import time
import requests
from bs4 import BeautifulSoup
import crower_table
def get_html(url):
"""
:param url: 爬取的网址参数,先将爬的内容缓存起来
:return: 返回一个soup类型
"""
out_time = time.strftime("%Y%m%d", time.localtime())
path_url = ''.join(re.findall(r'\w+', url))
path = 'temp/' + path_url + '-' + out_time
if not os.path.isfile(path):
print('writing file')
content = requests.get(url=url, headers={
'user-agent': 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.3; Win64; x64)'}).text
crower_table.write_file_content(path, content)
return content
else:
return crower_table.get_file_content(path)
def get_proxy_ip(x, validated=False):
url = 'http://www.xicidaili.com/nn/{0}'.format(x)
soup = BeautifulSoup(get_html(url), 'lxml')
results = crower_table.get_table_content(soup.table)
test_results = [validate_ip(one, validated) for one in results]
return {'results': results, 'status': test_results}
def validate_ip(one, validated):
if validated:
one['status'] = 'SUCCESS'
return True
ip = one['IP地址'] + ':' + one['端口']
protocol = one['类型']
test_url = 'http://baidu.com'
try:
proxy_host = {protocol: protocol + "://" + ip}
html = requests.get(test_url, proxies=proxy_host, timeout=3,
headers={'user-agent': 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.3; Win64; x64)'})
if html.status_code == 200:
one['status'] = 'SUCCESS'
print('success', proxy_host)
return True
else:
one['status'] = 'FAILED'
print('Failed', proxy_host)
return False
except Exception:
one['status'] = 'ERROR'
return 'error'
def concat_sql(results, title_dicts):
statements = ''
for result in results:
statements += '("%s", "%s", "%s", "%s"),'% (
result[title_dicts['IP']], result[title_dicts['PORT']], result[title_dicts['PROTOCOL']],
result[title_dicts['STATUS']])
print(statements)
return 'insert into `pool` (ip, port, protocol, status) VALUES ' + statements[:-1]
# concat_sql(get_proxy_ip(1)["results"], title_dicts)
|
import unittest
from project.factory.factory import Factory
from project.factory.paint_factory import PaintFactory
class TestPainFactory(unittest.TestCase):
def setUp(self):
self.paint = PaintFactory('Paint', 10)
self.paint.valid_ingredients = ["white", "yellow", "blue", "green", "red"]
def test_set_up_corect(self):
self.assertEqual(self.paint.name, 'Paint')
self.assertEqual(self.paint.capacity, 10)
self.assertListEqual(self.paint.valid_ingredients, ["white", "yellow", "blue", "green", "red"])
self.assertEqual(self.paint.ingredients, {})
def test_paint_is_inherit_from_factory(self):
self.assertTrue(issubclass(PaintFactory, Factory))
def test_add_ingredient_when_type_not_in_valid_should_raise_error(self):
with self.assertRaises(TypeError) as ex:
self.paint.add_ingredient('black', 1)
self.assertEqual(str(ex.exception), "Ingredient of type black not allowed in PaintFactory")
def test_add_ingredient_when_type_is_good_but_capacity_is_less_should_raise_error(self):
with self.assertRaises(ValueError) as ex:
self.paint.add_ingredient('white', 20)
self.assertEqual(str(ex.exception), "Not enough space in factory")
def test_add_ingredient_when_all_is_ok(self):
self.paint.add_ingredient('white', 10)
self.assertEqual(self.paint.ingredients['white'], 10)
self.assertEqual(self.paint.capacity, 10)
def test_add_ingr_when_values_and_capcity_is_less_then_value(self):
self.paint.add_ingredient('white', 5)
with self.assertRaises(ValueError) as ex:
self.paint.add_ingredient('white', 6)
self.assertEqual(str(ex.exception), "Not enough space in factory")
def test_add_infredient_for_second_time(self):
self.paint.add_ingredient('white', 3)
self.assertEqual(self.paint.ingredients['white'], 3)
self.paint.add_ingredient('white', 3)
self.assertEqual(self.paint.ingredients['white'], 6)
def test_remove_ingredient_when_type_not_iningredients_should_raise_error(self):
with self.assertRaises(KeyError) as ex:
self.paint.remove_ingredient('black', 10)
self.assertEqual(str(ex.exception), "'No such ingredient in the factory'")
def test_remove_ingredient_when_quanity_is_more_then_ingredianets_quantity_shoud_raise_error(self):
self.paint.add_ingredient('white', 5)
with self.assertRaises(ValueError) as ex:
self.paint.remove_ingredient('white', 11)
self.assertEqual(str(ex.exception), 'Ingredients quantity cannot be less than zero')
def test_rempve_ingredient_all_param_is_good(self):
self.paint.add_ingredient('white', 10)
self.paint.remove_ingredient('white', 5)
self.assertEqual(self.paint.ingredients['white'], 5)
def test_products(self):
self.paint.add_ingredient('white', 10)
self.assertEqual(self.paint.products, {'white': 10})
if __name__ == '__main__':
unittest.main()
|
__author__ = 'NovikovII'
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from xml.etree import ElementTree
#print(os.getcwd())
print(dir(ElementTree))
file_route = 'C:\share\TestFAST\ptest_internet\configuration.xml'
file1 = open(file_route, 'r')
tree = ElementTree.parse(file1)
root = tree.getroot()
# print(root)
# print(root.tag)
# print(root.attrib)
# for i in root:
# print(i.tag, i.attrib)
# for i in tree.iter():
# print(i.text)
# for i in tree.iter('ip'):
# print(i.attrib, i.text)
# for i in root:
# print(i.attrib)
# for a in i.attrib:
# print(a)
# if i.attrib == "FUT-BOOK-1":
# print(123)
# print(i)
# for i in root.findall('MarketDataGroup'):
# for j in i.findall('connections'):
# for x in j.findall('connection'):
# print(x.find('ip').text, x.find('port').text)
# # for y in x.findall('ip'):
# # print(y.text)
#
# print(root[1][0][0][3].text)
# for i in tree.iter('port'):
# tree.remove(i)
connections = ElementTree.Element('new_cont')
type = ElementTree.SubElement(connections, 'new_c').set('bb', 'aa')
ElementTree.dump(connections)
tree.write('C:\share\TestFAST\ptest_internet\configuration_1.xml')
file1.close() |
# -*- coding: utf-8 -*-
import os
import logging
from dotenv import find_dotenv, load_dotenv
from constants import *
from utils import json_from_file
from src.features.build_features import collect_feature
import shutil
from settings import *
def prepare_processed_training_data():
"""
Generate all features into processes/... folder from interim/...
"""
logger = logging.getLogger(__name__)
logger.info('Making processed training data set from interim data')
# Init absolute path of folders
processed_folder_path = os.path.join(DATA_PROCESSED_ROOT, DATASET_NAME)
interim_folder_path = os.path.join(DATA_INTERIM_ROOT, DATASET_NAME)
if os.path.exists(processed_folder_path):
shutil.rmtree(processed_folder_path)
os.makedirs(processed_folder_path)
for event_name in DATASET_EVENTS:
event_folder_path = os.path.join(interim_folder_path, event_name)
list_tweet_ids = [name for name in os.listdir(event_folder_path) if
os.path.isfile(os.path.join(event_folder_path, name))]
processed_event_folder_path = os.path.join(processed_folder_path, event_name)
os.makedirs(processed_event_folder_path)
train_processed_file = open(os.path.join(processed_event_folder_path, 'train.txt'),"w")
train_processed_label_file = open(os.path.join(processed_event_folder_path, 'train_label.txt'), "w")
tweet_count = len(list_tweet_ids)
for index, id in enumerate(list_tweet_ids):
print event_name , '+', index
source_tweet = json_from_file(os.path.join(event_folder_path, id))
features = collect_feature(source_tweet)
features_str = "\t".join([str(i) for i in features])
train_processed_file.write(features_str)
if index != tweet_count-1 :
train_processed_file.write('\n')
train_processed_label_file.write(str(VERACITY_LABELS_MAPPING[source_tweet['veracity']]))
if index != tweet_count-1 :
train_processed_label_file.write('\n')
train_processed_file.close()
train_processed_label_file.close()
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
prepare_processed_training_data()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: AC_simulation_n.py
# Create Date: 2015-07-26 11:35:19
# Usage: AC_simulation_n.py
# Descripton:
class Solution:
# @param {integer[]} nums
# @return {string[]}
def summaryRanges(self, nums):
res = []
if not nums:
return res
start = 0
nums.append(nums[-1])
for i in range(0, len(nums) - 1):
if nums[i] + 1 != nums[i + 1]:
if start == i:
res.append(str(nums[i]))
else:
res.append(str(nums[start]) + '->' + str(nums[i]))
start = i + 1
nums.pop()
return res
|
from itertools import product
import numpy as np
import scipy.sparse
import numba
from ..utils import isscalar, PositinalArgumentPartial, _zero_of_dtype
from ..compatibility import range, zip, zip_longest
def elemwise(func, *args, **kwargs):
"""
Apply a function to any number of arguments.
Parameters
----------
func : Callable
The function to apply. Must support broadcasting.
args : tuple, optional
The arguments to the function. Can be :obj:`SparseArray` objects
or :obj:`scipy.sparse.spmatrix` objects.
kwargs : dict, optional
Any additional arguments to pass to the function.
Returns
-------
COO
The result of applying the function.
Raises
------
ValueError
If the operation would result in a dense matrix, or if the operands
don't have broadcastable shapes.
See Also
--------
:obj:`numpy.ufunc` : A similar Numpy construct. Note that any :code:`ufunc` can be used
as the :code:`func` input to this function.
Notes
-----
Previously, operations with Numpy arrays were sometimes supported. Now,
it is necessary to convert Numpy arrays to :obj:`COO` objects.
"""
# Because we need to mutate args.
from .core import COO
from ..sparse_array import SparseArray
args = list(args)
posargs = []
pos = []
for i, arg in enumerate(args):
if isinstance(arg, scipy.sparse.spmatrix):
args[i] = COO.from_scipy_sparse(arg)
elif isscalar(arg) or (isinstance(arg, np.ndarray)
and not arg.shape):
# Faster and more reliable to pass ()-shaped ndarrays as scalars.
args[i] = np.asarray(arg)[()]
pos.append(i)
posargs.append(args[i])
elif isinstance(arg, SparseArray) and not isinstance(arg, COO):
args[i] = COO(arg)
elif not isinstance(arg, COO):
return NotImplemented
# Filter out scalars as they are 'baked' into the function.
func = PositinalArgumentPartial(func, pos, posargs)
args = [arg for arg in args if not isscalar(arg)]
if len(args) == 0:
return func(**kwargs)
return _elemwise_n_ary(func, *args, **kwargs)
@numba.jit(nopython=True)
def _match_arrays(a, b): # pragma: no cover
"""
Finds all indexes into a and b such that a[i] = b[j]. The outputs are sorted
in lexographical order.
Parameters
----------
a, b : np.ndarray
The input 1-D arrays to match. If matching of multiple fields is
needed, use np.recarrays. These two arrays must be sorted.
Returns
-------
a_idx, b_idx : np.ndarray
The output indices of every possible pair of matching elements.
"""
if len(a) == 0 or len(b) == 0:
return np.empty(0, dtype=np.uintp), np.empty(0, dtype=np.uintp)
a_ind, b_ind = [], []
nb = len(b)
ib = 0
match = 0
for ia, j in enumerate(a):
if j == b[match]:
ib = match
while ib < nb and j >= b[ib]:
if j == b[ib]:
a_ind.append(ia)
b_ind.append(ib)
if b[match] < b[ib]:
match = ib
ib += 1
return np.array(a_ind, dtype=np.uintp), np.array(b_ind, dtype=np.uintp)
def _elemwise_n_ary(func, *args, **kwargs):
"""
Apply a function to any number of arguments with broadcasting.
Parameters
----------
func : Callable
The function to apply to arguments. Must support broadcasting.
args : list
Input :obj:`COO` or :obj:`numpy.ndarray`s.
kwargs : dict
Additional arguments to pass to the function.
Returns
-------
COO
The output array.
Raises
------
ValueError
If the input shapes aren't compatible or the result will be dense.
"""
from .core import COO
args = list(args)
args_zeros = tuple(_zero_of_dtype(np.dtype(arg)) for arg in args)
func_value = func(*args_zeros, **kwargs)
func_zero = _zero_of_dtype(func_value.dtype)
if func_value != func_zero:
raise ValueError("Performing this operation would produce "
"a dense result: %s" % str(func))
data_list = []
coords_list = []
cache = {}
for mask in product([True, False], repeat=len(args)):
if not any(mask):
continue
ci, di = _unmatch_coo(func, args, mask, cache, **kwargs)
coords_list.extend(ci)
data_list.extend(di)
result_shape = _get_nary_broadcast_shape(*[arg.shape for arg in args])
# Concatenate matches and mismatches
data = np.concatenate(data_list) if len(data_list) else np.empty((0,), dtype=func_value.dtype)
coords = np.concatenate(coords_list, axis=1) if len(coords_list) else \
np.empty((0, len(result_shape)), dtype=np.min_scalar_type(max(result_shape) - 1))
nonzero = data != func_zero
data = data[nonzero]
coords = coords[:, nonzero]
return COO(coords, data, shape=result_shape, has_duplicates=False)
def _match_coo(*args, **kwargs):
"""
Matches the coordinates for any number of input :obj:`COO` arrays.
Equivalent to "sparse" broadcasting for all arrays.
Parameters
----------
args : Tuple[COO]
The input :obj:`COO` arrays.
return_midx : bool
Whether to return matched indices or matched arrays. Matching
only supported for two arrays. ``False`` by default.
cache : dict
Cache of things already matched. No cache by default.
Returns
-------
matched_idx : List[ndarray]
The indices of matched elements in the original arrays. Only returned if
``return_midx`` is ``True``.
matched_arrays : List[COO]
The expanded, matched :obj:`COO` objects. Only returned if
``return_midx`` is ``False``.
"""
from .core import COO
from .common import linear_loc
return_midx = kwargs.pop('return_midx', False)
cache = kwargs.pop('cache', None)
if kwargs:
raise ValueError('Unknown kwargs %s' % kwargs.keys())
if return_midx and (len(args) != 2 or cache is not None):
raise NotImplementedError('Matching indices only supported for two args, and no cache.')
matched_arrays = [args[0]]
cache_key = [id(args[0])]
for arg2 in args[1:]:
cache_key.append(id(arg2))
key = tuple(cache_key)
if cache is not None and key in cache:
matched_arrays = cache[key]
continue
cargs = [matched_arrays[0], arg2]
current_shape = _get_broadcast_shape(matched_arrays[0].shape, arg2.shape)
params = [_get_broadcast_parameters(arg.shape, current_shape) for arg in cargs]
reduced_params = [all(p) for p in zip(*params)]
reduced_shape = _get_reduced_shape(arg2.shape,
reduced_params[-arg2.ndim:])
reduced_coords = [_get_reduced_coords(arg.coords, reduced_params[-arg.ndim:])
for arg in cargs]
linear = [linear_loc(rc, reduced_shape) for rc in reduced_coords]
sorted_idx = [np.argsort(idx) for idx in linear]
linear = [idx[s] for idx, s in zip(linear, sorted_idx)]
matched_idx = _match_arrays(*linear)
if return_midx:
matched_idx = [sidx[midx] for sidx, midx in zip(sorted_idx, matched_idx)]
return matched_idx
coords = [arg.coords[:, s] for arg, s in zip(cargs, sorted_idx)]
mcoords = [c[:, idx] for c, idx in zip(coords, matched_idx)]
mcoords = _get_matching_coords(mcoords, params, current_shape)
mdata = [arg.data[sorted_idx[0]][matched_idx[0]] for arg in matched_arrays]
mdata.append(arg2.data[sorted_idx[1]][matched_idx[1]])
matched_arrays = [COO(mcoords, md, shape=current_shape) for md in mdata]
if cache is not None:
cache[key] = matched_arrays
return matched_arrays
def _unmatch_coo(func, args, mask, cache, **kwargs):
"""
Matches the coordinates for any number of input :obj:`COO` arrays.
First computes the matches, then filters out the non-matches.
Parameters
----------
func : Callable
The function to compute matches
args : tuple[COO]
The input :obj:`COO` arrays.
mask : tuple[bool]
Specifies the inputs that are zero and the ones that are
nonzero.
kwargs: dict
Extra keyword arguments to pass to func.
Returns
-------
matched_coords : list[ndarray]
The matched coordinates.
matched_data : list[ndarray]
The matched data.
"""
from .core import COO
matched_args = [a for a, m in zip(args, mask) if m]
unmatched_args = [a for a, m in zip(args, mask) if not m]
matched_arrays = _match_coo(*matched_args, cache=cache)
pos = tuple(i for i, m in enumerate(mask) if not m)
posargs = [_zero_of_dtype(arg.dtype) for arg, m in zip(args, mask) if not m]
result_shape = _get_nary_broadcast_shape(*[arg.shape for arg in args])
partial = PositinalArgumentPartial(func, pos, posargs)
matched_func = partial(*[a.data for a in matched_arrays], **kwargs)
unmatched_mask = matched_func != _zero_of_dtype(matched_func.dtype)
if not unmatched_mask.any():
return [], []
func_data = matched_func[unmatched_mask]
func_coords = matched_arrays[0].coords[:, unmatched_mask]
func_array = COO(func_coords, func_data, shape=matched_arrays[0].shape).broadcast_to(result_shape)
if all(mask):
return [func_array.coords], [func_array.data]
unmatched_mask = np.ones(func_array.nnz, dtype=np.bool)
for arg in unmatched_args:
matched_idx = _match_coo(func_array, arg, return_midx=True)[0]
unmatched_mask[matched_idx] = False
coords = np.asarray(func_array.coords[:, unmatched_mask], order='C')
data = np.asarray(func_array.data[unmatched_mask], order='C')
return [coords], [data]
def _get_nary_broadcast_shape(*shapes):
"""
Broadcast any number of shapes to a result shape.
Parameters
----------
shapes : tuple[tuple[int]]
The shapes to broadcast.
Returns
-------
tuple[int]
The output shape.
Raises
------
ValueError
If the input shapes cannot be broadcast to a single shape.
"""
result_shape = ()
for shape in shapes:
try:
result_shape = _get_broadcast_shape(shape, result_shape)
except ValueError:
shapes_str = ', '.join(str(shape) for shape in shapes)
raise ValueError('operands could not be broadcast together with shapes %s'
% shapes_str)
return result_shape
def _get_broadcast_shape(shape1, shape2, is_result=False):
"""
Get the overall broadcasted shape.
Parameters
----------
shape1, shape2 : tuple[int]
The input shapes to broadcast together.
is_result : bool
Whether or not shape2 is also the result shape.
Returns
-------
result_shape : tuple[int]
The overall shape of the result.
Raises
------
ValueError
If the two shapes cannot be broadcast together.
"""
# https://stackoverflow.com/a/47244284/774273
if not all((l1 == l2) or (l1 == 1) or ((l2 == 1) and not is_result) for l1, l2 in
zip(shape1[::-1], shape2[::-1])):
raise ValueError('operands could not be broadcast together with shapes %s, %s' %
(shape1, shape2))
result_shape = tuple(max(l1, l2) for l1, l2 in
zip_longest(shape1[::-1], shape2[::-1], fillvalue=1))[::-1]
return result_shape
def _get_broadcast_parameters(shape, broadcast_shape):
"""
Get the broadcast parameters.
Parameters
----------
shape : tuple[int]
The input shape.
broadcast_shape
The shape to broadcast to.
Returns
-------
params : list
A list containing None if the dimension isn't in the original array, False if
it needs to be broadcast, and True if it doesn't.
"""
params = [None if l1 is None else l1 == l2 for l1, l2
in zip_longest(shape[::-1], broadcast_shape[::-1], fillvalue=None)][::-1]
return params
def _get_reduced_coords(coords, params):
"""
Gets only those dimensions of the coordinates that don't need to be broadcast.
Parameters
----------
coords : np.ndarray
The coordinates to reduce.
params : list
The params from which to check which dimensions to get.
Returns
-------
reduced_coords : np.ndarray
The reduced coordinates.
"""
reduced_params = [bool(param) for param in params]
return coords[reduced_params]
def _get_reduced_shape(shape, params):
"""
Gets only those dimensions of the coordinates that don't need to be broadcast.
Parameters
----------
coords : np.ndarray
The coordinates to reduce.
params : list
The params from which to check which dimensions to get.
Returns
-------
reduced_coords : np.ndarray
The reduced coordinates.
"""
reduced_shape = tuple(l for l, p in zip(shape, params) if p)
return reduced_shape
def _get_expanded_coords_data(coords, data, params, broadcast_shape):
"""
Expand coordinates/data to broadcast_shape. Does most of the heavy lifting for broadcast_to.
Produces sorted output for sorted inputs.
Parameters
----------
coords : np.ndarray
The coordinates to expand.
data : np.ndarray
The data corresponding to the coordinates.
params : list
The broadcast parameters.
broadcast_shape : tuple[int]
The shape to broadcast to.
Returns
-------
expanded_coords : np.ndarray
List of 1-D arrays. Each item in the list has one dimension of coordinates.
expanded_data : np.ndarray
The data corresponding to expanded_coords.
"""
first_dim = -1
expand_shapes = []
for d, p, l in zip(range(len(broadcast_shape)), params, broadcast_shape):
if p and first_dim == -1:
expand_shapes.append(coords.shape[1])
first_dim = d
if not p:
expand_shapes.append(l)
all_idx = _cartesian_product(*(np.arange(d, dtype=np.min_scalar_type(d - 1)) for d in expand_shapes))
dt = np.result_type(*(np.min_scalar_type(l - 1) for l in broadcast_shape))
false_dim = 0
dim = 0
expanded_coords = np.empty((len(broadcast_shape), all_idx.shape[1]), dtype=dt)
expanded_data = data[all_idx[first_dim]]
for d, p, l in zip(range(len(broadcast_shape)), params, broadcast_shape):
if p:
expanded_coords[d] = coords[dim, all_idx[first_dim]]
else:
expanded_coords[d] = all_idx[false_dim + (d > first_dim)]
false_dim += 1
if p is not None:
dim += 1
return np.asarray(expanded_coords), np.asarray(expanded_data)
# (c) senderle
# Taken from https://stackoverflow.com/a/11146645/774273
# License: https://creativecommons.org/licenses/by-sa/3.0/
def _cartesian_product(*arrays):
"""
Get the cartesian product of a number of arrays.
Parameters
----------
arrays : Tuple[np.ndarray]
The arrays to get a cartesian product of. Always sorted with respect
to the original array.
Returns
-------
out : np.ndarray
The overall cartesian product of all the input arrays.
"""
broadcastable = np.ix_(*arrays)
broadcasted = np.broadcast_arrays(*broadcastable)
rows, cols = np.prod(broadcasted[0].shape), len(broadcasted)
dtype = np.result_type(*arrays)
out = np.empty(rows * cols, dtype=dtype)
start, end = 0, rows
for a in broadcasted:
out[start:end] = a.reshape(-1)
start, end = end, end + rows
return out.reshape(cols, rows)
def _get_matching_coords(coords, params, shape):
"""
Get the matching coords across a number of broadcast operands.
Parameters
----------
coords : list[numpy.ndarray]
The input coordinates.
params : list[Union[bool, none]]
The broadcast parameters.
Returns
-------
numpy.ndarray
The broacasted coordinates
"""
matching_coords = []
dims = np.zeros(len(coords), dtype=np.uint8)
for p_all in zip(*params):
for i, p in enumerate(p_all):
if p:
matching_coords.append(coords[i][dims[i]])
break
else:
matching_coords.append(coords[dims[0]])
for i, p in enumerate(p_all):
if p is not None:
dims[i] += 1
dtype = np.min_scalar_type(max(shape) - 1)
return np.asarray(matching_coords, dtype=dtype)
def broadcast_to(x, shape):
"""
Performs the equivalent of :obj:`numpy.broadcast_to` for :obj:`COO`. Note that
this function returns a new array instead of a view.
Parameters
----------
shape : tuple[int]
The shape to broadcast the data to.
Returns
-------
COO
The broadcasted sparse array.
Raises
------
ValueError
If the operand cannot be broadcast to the given shape.
See also
--------
:obj:`numpy.broadcast_to` : NumPy equivalent function
"""
from .core import COO
if shape == x.shape:
return x
result_shape = _get_broadcast_shape(x.shape, shape, is_result=True)
params = _get_broadcast_parameters(x.shape, result_shape)
coords, data = _get_expanded_coords_data(x.coords, x.data, params, result_shape)
return COO(coords, data, shape=result_shape, has_duplicates=False,
sorted=True)
|
from pydantic import BaseModel
from typing import Optional
from tottle.types.objects.location import Location
class Venue(BaseModel):
location: Optional[Location]
title: Optional[str] = None
address: Optional[str] = None
foursquare_id: Optional[str] = None
foursquare_type: Optional[str] = None
|
# import psycopg2 as psql
# import time
from abc import ABC
from typing import List
from ultitrackerapi import models
class Backend(ABC):
def get_user(self, username: str) -> models.User:
pass
def add_user(self, user: models.UserInDB) -> bool:
pass
def username_exists(self, username: str) -> bool:
pass
def authenticate_user(self, username: str, password: str) -> models.User:
pass
def get_game(self, game_id: str, user: models.User) -> models.GameResponse:
pass
def get_game_list(self, user: models.User) -> models.GameListResponse:
pass
def add_game(
self,
user: models.User,
game_id: str,
authorized_users: List[str] = [],
data: dict = {},
) -> bool:
pass
def insert_annotation(
self,
user: models.User,
img_id: str,
annotation_table: models.AnnotationTable,
annotation_data: dict
) -> bool:
pass
class InMemoryBackend(Backend):
def __init__(self, game_db: dict = {}, user_db: dict = {}):
self._game_db = game_db
self._user_db = user_db
def get_user(self, username: str) -> models.User:
return self._user_db.get(username, None)
def initialize_user(self, user: models.User):
if user.username not in self._game_db:
self._game_db[user.username] = models.GameList(game_list=[])
def add_user(self, user: models.UserInDB) -> bool:
if user.username in self._user_db:
return False
user.disabled = False
self._user_db.update({user.username: user})
return True
def username_exists(self, username: str) -> bool:
return username in self._user_db
def get_game(self, game_id: str, user: models.User) -> models.GameResponse:
self.initialize_user(user)
game_response = None
user_game_list = self._game_db[user.username]
for game in user_game_list.game_list:
if game_id == game.game_id:
game_response = models.GameResponse(
data=game.data, game_id=game.game_id
)
break
return game_response
def get_game_list(self, user: models.User) -> models.GameListResponse:
self.initialize_user(user)
return models.GameListResponse(
game_list=[
models.GameResponse(data=game.data, game_id=game.game_id)
for game in self._game_db[user.username].game_list
]
)
def add_game(
self,
user: models.User,
game_id: str,
additional_authorized_users: List[str] = [],
data: dict = {},
) -> bool:
self.initialize_user(user)
self._game_db[user.username].add_game(
models.Game(
authorized_users=[user.username] + additional_authorized_users,
data=data,
game_id=game_id,
)
)
return True
|
def day1():
for i in range(0, len(numbers)-1):
for j in range(i, len(numbers)):
if numbers[i] + numbers[j] == 2020:
print("day 1\nNumbers: {},{}\nProduct: {}".format(numbers[i], numbers[j], numbers[i]* numbers[j]))
break
def day2():
for i in range(0, len(numbers)-2):
for j in range(i, len(numbers)-1):
if numbers[i] + numbers[j] < 2020 :
for k in range(j, len(numbers)):
if numbers[i] + numbers[j] + numbers[k] == 2020:
print("day 2\nNumbers: {},{},{}\nProduct: {}".format(numbers[i], numbers[j], numbers[k], numbers[i]* numbers[j]*numbers[k]))
break
try:
f = open('inputs/day1')
lines = f.readlines()
numbers =[int(e.strip()) for e in lines]
print(numbers)
day1()
day2()
finally:
f.close()
|
import unittest
from urwid.compat import B
from pyfx.view.json_lib import NodeFactory, DEFAULT_NODE_IMPLS
class ArrayNodeTest(unittest.TestCase):
"""
unit tests for :py:class:`pyfx.view.json_lib.array.array_node.ArrayNode`
"""
def setUp(self):
self._node_factory = NodeFactory(DEFAULT_NODE_IMPLS)
def test_empty_list(self):
""" test rendering of an empty JSON object"""
data = []
# act
node = self._node_factory.create_root_node(data)
widget = node.get_widget()
contents = []
while widget is not None:
node = widget.get_node()
if not node.is_expanded():
node.toggle_expanded()
widget = node.get_widget()
contents.append(widget.render((18,)).content())
widget = widget.next_inorder()
# restart and scan from the end
node = self._node_factory.create_root_node(data)
widget = node.get_end_node().get_widget()
contents_from_end = []
while widget is not None:
contents_from_end.append(widget.render((18,)).content())
widget = widget.prev_inorder()
texts = [[[t[2] for t in row] for row in content]
for content in contents]
texts_from_end = [[[t[2] for t in row]
for row in content] for content in contents_from_end]
texts_from_end.reverse()
# verify
self.assertEqual(2, len(texts))
expected = [
[[B("[ ")]],
[[B("] ")]],
]
self.assertEqual(expected, texts)
self.assertEqual(expected, texts_from_end)
def test_simple_array(self):
"""
test rendering a not-nested array
"""
data = [
1,
2,
"str"
]
# act
node = self._node_factory.create_root_node(data)
widget = node.get_widget()
contents = []
while widget is not None:
node = widget.get_node()
if not node.is_expanded():
node.toggle_expanded()
widget = node.get_widget()
contents.append(widget.render((18,)).content())
widget = widget.next_inorder()
texts = [[[t[2] for t in row] for row in content]
for content in contents]
# verify
self.assertEqual(5, len(texts))
expected = [
[[B("[ ")]],
[[B(" "), B('1'), B(' ')]],
[[B(" "), B('2'), B(' ')]],
[[B(" "), B('"str"'), B(' ')]],
[[B("] ")]]
]
self.assertEqual(expected, texts)
def test_nested_array(self):
"""
test rendering a nested array
"""
data = [
1,
2,
[
"str",
True
]
]
# act
node = self._node_factory.create_root_node(data)
widget = node.get_widget()
contents = []
while widget is not None:
node = widget.get_node()
if not node.is_expanded():
node.toggle_expanded()
widget = node.get_widget()
contents.append(widget.render((18,)).content())
widget = widget.next_inorder()
texts = [[[t[2] for t in row] for row in content]
for content in contents]
# verify
self.assertEqual(8, len(texts))
expected = [
[[B("[ ")]],
[[B(" "), B('1'), B(' ')]],
[[B(" "), B('2'), B(' ')]],
[[B(" "), B("[ ")]],
[[B(" "), B('"str"'), B(' ')]],
[[B(" "), B('true'), B(' ')]],
[[B(" "), B("] ")]],
[[B("] ")]]
]
self.assertEqual(expected, texts)
def test_array_with_object_child(self):
"""
test rendering an array with object as a child
"""
data = [
1,
2,
{
"test": True
}
]
# act
node = self._node_factory.create_root_node(data)
widget = node.get_widget()
contents = []
while widget is not None:
node = widget.get_node()
if not node.is_expanded():
node.toggle_expanded()
widget = node.get_widget()
contents.append(widget.render((18,)).content())
widget = widget.next_inorder()
texts = [[[t[2] for t in row] for row in content]
for content in contents]
# verify
self.assertEqual(7, len(texts))
expected = [
[[B("[ ")]],
[[B(" "), B('1'), B(' ')]],
[[B(" "), B('2'), B(' ')]],
[[B(" "), B("{ ")]],
[[B(" "), B('"test"'), B(": "), B("true")]],
[[B(" "), B("} ")]],
[[B("] ")]]
]
self.assertEqual(expected, texts)
def test_prev_order(self):
data = [
1,
2
]
node = self._node_factory.create_root_node(data)
# start from the end
widget = node.get_end_node().get_widget()
contents = []
while widget is not None:
contents.append(widget.render((18,)).content())
widget = widget.prev_inorder()
texts = [[[t[2] for t in row] for row in content]
for content in contents]
texts.reverse()
# verify
self.assertEqual(4, len(texts))
expected = [
[[B("[ ")]],
[[B(" "), B('1'), B(' ')]],
[[B(" "), B('2'), B(' ')]],
[[B("] ")]]
]
self.assertEqual(expected, texts)
|
import sys
sys.path.append(".")
from unittest import TestCase
from mykrobe.variants.schema.models import VariantCall
from mykrobe.variants.schema.models import Variant
from mykrobe.predict import TBPredictor
class AMRPredictTest(TestCase):
def setUp(self):
self.variant_snp = Variant.create(start=0, end=1, reference_bases="A",
alternate_bases=["T"])
self.predictor = TBPredictor(variant_calls={},
called_genes={})
def teardown(self):
pass
def test_wt_vars(self):
call = {
"variant": None,
"genotype": [
0,
1],
"genotype_likelihoods": [
0.1,
0.9,
0.12],
"info": {
"contamination_depths": [],
"coverage": {
"alternate": {
"percent_coverage": 100.0,
"median_depth": 15,
"min_depth": 2},
"reference": {
"percent_coverage": 100.0,
"median_depth": 139,
"min_depth": 128}},
"expected_depths": [152]}}
assert self.predictor._coverage_greater_than_threshold(call, [
""]) == False
|
### rpg_queries.py
#
# runs exploratory queries on the MongoDB rpg database,
# answers questions from module 1
### ----- Connect to the database ------
# load environment variables for the MongoDB login
import os
from dotenv import load_dotenv
load_dotenv()
DB_USER = os.getenv("MONGO_USER", default="OOPS")
DB_PASSWORD = os.getenv("MONGO_PASSWORD", default="OOPS")
CLUSTER_NAME = os.getenv("MONGO_CLUSTER_NAME", default="OOPS")
# open a connection to the MongoDB database
import pymongo
connection_uri = f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}.mongodb.net/test?retryWrites=true&w=majority"
client = pymongo.MongoClient(connection_uri)
### ----- Questions -----
# Using the original schema copied from sqlite
print("----- Using the old sqlite schema -----")
db = client.rpg_database
# How many total characters are there?
print("How many total characters are there?")
count = db.charactercreator_character.count_documents({})
print(count, 'characters')
# How many of each specific subclass?
print('\nHow many of each specific subclass?')
print(db.charactercreator_cleric.count_documents({}), 'clerics')
print(db.charactercreator_fighter.count_documents({}), 'fighters')
print(db.charactercreator_mage.count_documents({}), 'mages')
print(db.charactercreator_necromancer.count_documents({}), 'necromancers')
print(db.charactercreator_thief.count_documents({}), 'thieves')
# How many total items?
print('\nHow many total items?')
item_count = db.armory_item.count_documents({})
print(item_count, 'items')
# How many of the items are weapons? How many are not?
print('\nHow many of the items are weapons? How many are not?')
weapon_count = db.armory_weapon.count_documents({})
print(weapon_count, 'are weapons')
print(item_count - weapon_count, 'are not weapons')
# The last four questions required aggregates,
# and to be honest I could not get them working
# # How many items does each character have? (1st 20 rows)
# print('\nHow many items does each character have? (1st 20 rows)')
# pipeline = [
# {"$group": {"_id": "$Character_id", "count": {"$sum": 1}}}
# ]
# for line in list(db.charactercreator_inventory.aggregate(pipeline)):
# print(line)
# # How many weapons does each character have? (1st 20 rows)
# # On average, how many items does each character have?
# # On average, how many weapons does each character have?
### ----- Questions -----
# Using the schema I modified yesterday (only 2 collections)
print('\n\n----- Now using the new schema -----')
db = client.rpg_database_2
# How many total characters are there?
print("How many total characters are there?")
count = db.characters.count_documents({})
print(count, 'characters')
# How many of each specific subclass?
print("\nHow many of each specific class?")
classes = db.characters.distinct('class') #get all distinct classes
for c in classes:
# count each distinct class
count = db.characters.count_documents({"class":c})
print(c + 's:', count)
print("\nHow many of each specific subclass?")
classes = db.characters.distinct('subclass') #get all distinct classes
for c in classes:
# count each distinct subclass
count = db.characters.count_documents({"subclass":c})
# pick the first character with this subclass and get its base class
base_class = db.characters.find({"subclass":c}, limit=1)[0]['class']
print(f'{c}s: {count} (base class: {base_class})')
# How many total items?
print('\nHow many total items?')
count = db.items.count_documents({})
print(count, 'items')
# How many of the items are weapons? How many are not?
print('\nHow many of the items are weapons? How many are not?')
count = db.items.count_documents({"type":"weapon"})
print(count, 'weapons')
count = db.items.count_documents({"type":{"$ne":"weapon"}})
print(count, 'are not weapons')
# The last 4 questions cannot be answered here because I did not
# migrate inventory data in the new schema
# (I wasn't sure how to store a many to many relationship in MongoDB)
# How many weapons does each character have? (1st 20 rows)
# How many weapons does each character have? (1st 20 rows)
# On average, how many items does each character have?
# On average, how many weapons does each character have?
# close the connection once we're done
client.close()
|
from itertools import combinations
from cpu import cpu, read, write
PRE = """south
take fixed point
north
west
west
west
take hologram
east
east
east
north
take candy cane
west
take antenna
south
take whirled peas
north
west
take shell
east
east
north
north
take polygon
south
west
take fuel cell
west"""
def aoc(data):
c = cpu(data)
s, _ = read(c)
inv = [
line.split(maxsplit=1)[1] for line in PRE.split("\n") if line.startswith("take")
]
print(inv)
print(s)
for line in PRE.split("\n"):
s, _ = write(c, line + "\n")
print(s)
prev = inv
for l in range(len(inv), 0, -1):
for want in combinations(inv, l):
for i in inv:
if i in want and not i in prev:
write(c, f"take {i}\n")
if i in prev and not i in want:
write(c, f"drop {i}\n")
prev = want
s, _ = write(c, "west\n")
print(s)
if "airlock" in s:
return int(s.split()[-8])
|
# -*- coding: utf-8 -*-
import re
from facebook import *
import str_formater
WORDS = ["FACEBOOK", "FEJSBUK", "POWIADOMIENIE", "POWIADOMIENIA"]
def handle(text, mic, profile, logger, modules):
"""
Responds to user-input, typically speech text, with a summary of
the user's Facebook notifications, including a count and details
related to each individual notification.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone number)
"""
oauth_access_token = profile['keys']['FB_TOKEN']
graph = GraphAPI(oauth_access_token)
try:
results = graph.request("me/notifications")
except GraphAPIError:
logger.error("error getting response form facebook api, for key: %s" % oauth_access_token, exc_info=True)
mic.say(
"Nie mam uprawnienia do twojego konta na Fejsbuku. Sprawdź ustawienia.")
return
except:
logger.error("error getting response form facebook api, for key: %s" % oauth_access_token, exc_info=True)
mic.say(
"Wybacz, ale ta usługa jest chwilowo niedostępna.")
if not len(results['data']):
mic.say("Brak nowych powiadomień na Fejsbuku")
return
updates = []
logger.debug(results)
if results['data']:
for notification in results['data']:
#str_formater.checkFormat(notification['title'], logger)
title = str_formater.unicodeToUTF8(notification['title'], logger)
updates.append(title)
logger.debug("from:" + repr(notification['from']) + " to:" + repr(notification['to']) + " created_time:" + repr(notification['created_time']) + " unread:" + repr(notification['unread']) )
count = len(results['data'])
mic.say("Masz " + str(count) +
" nowych powiadomień na Fejsbuku.|" + "| ".join(updates) )
return
def isValid(text):
"""
Returns True if the input is related to Facebook notifications.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\b(powiadomienie|powiadomienia|fejsbuk|Facebook)\b', text, re.IGNORECASE))
|
import pandas as pd
import pytest
import os
from fuzzydata.core.generator import generate_schema
from tests.conftest import artifact_fixtures
@pytest.mark.dependency()
@pytest.mark.parametrize('artifact', artifact_fixtures)
def test_generate(artifact, request):
tmp_schema = generate_schema(20)
concrete_artifact = request.getfixturevalue(artifact)
concrete_artifact.generate(100, tmp_schema)
@pytest.mark.dependency(depends=["test_generate"])
@pytest.mark.parametrize('artifact', artifact_fixtures)
def test_serialize_deserialize(artifact, request):
concrete_artifact = request.getfixturevalue(artifact)
df_file = concrete_artifact.filename
concrete_artifact.serialize()
assert os.path.exists(df_file)
concrete_artifact.destroy()
concrete_artifact.deserialize()
assert isinstance(concrete_artifact.to_df(), concrete_artifact.pd.DataFrame) |
from datetime import datetime, timedelta
from airflow.hooks import S3_hook
from typing import Optional
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataQualityOperator(BaseOperator):
"""
This operator executes 2 quality checks on the current database
- First it check that the tables tha come with the attribute tables, come with the same number of rows like in
the file imported
- Second we check that the tables that come with the attribute tables_with_rows have at least one row on the table.
"""
query_format = "date > '{start}' and date < '{end}' "
ui_color = '#e67e22'
@apply_defaults
def __init__(self,
conn_id: str = "",
aws_con: str = "",
aws_bucket_name: str = "",
tables: Optional[dict] = None,
tables_with_rows: Optional[dict] = None,
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
if tables is None:
tables = dict()
self.conn_id = conn_id
self.tables = tables
self.tables_with_rows = tables_with_rows
self.aws_bucket_name = aws_bucket_name
self.aws_con = aws_con
def execute(self, context):
redshift_hook = PostgresHook(postgres_conn_id=self.conn_id)
end = datetime.strptime(context["ds"], "%Y-%m-%d") + timedelta(days=1)
filter_query = self.query_format.format(start=context["ds"],
end=end.strftime("%Y-%m-%d"))
remote_providers = ["bisq", "paxful"]
total = 0
for provider in remote_providers:
# we check all the files for each provider and sum the number of rows to the total
filename = f"{provider}({context['ds']}).json"
hook = S3_hook.S3Hook(self.aws_con)
total += hook.read_key(filename, self.aws_bucket_name).count('\n')
failted_tests = []
for table in self.tables:
self.log.info(f"Starting data quality on table with total : {table}")
records = redshift_hook.get_records(f"SELECT count(*) FROM {table} where {filter_query} ;")
# check if in the database we have the same number of rows for this period of time like on the files.
if len(records) < 1 or records[0][0] != total:
self.log.error(f"Data quality failed for table : {table}. count {records[0][0]}, total file:{total}")
failted_tests.append(f"SELECT count(*) FROM {table} where {filter_query} ;")
else:
self.log.info(f"Data quality Passed on table : {table}!!!")
for table in self.tables_with_rows:
self.log.info(f"Starting data quality on table : {table}")
records = redshift_hook.get_records(f"SELECT count(*) FROM {table} where {filter_query};")
if len(records) < 1 or records[0][0] < 1:
self.log.error(f"Data quality failed for table : {table}. count {records[0][0]}, total file:{total}")
failted_tests.append(f"SELECT count(*) FROM {table} where {filter_query} ;")
else:
self.log.info(f"Data quality Passed on table : {table}!!!")
if len(failted_tests) > 0:
self.log.info(failted_tests)
raise ValueError('Data quality check failed')
self.log.info(f"Data quality done")
|
import sys
import sqlite3
import binascii
import re
def check_task(task_db, info_hash_or_uri):
m = re.match(r'urn:btih:(\w+)', info_hash_or_uri)
if m:
info_hash = m.groups(1)
else:
info_hash = info_hash_or_uri
conn = sqlite3.connect(task_db)
c = conn.cursor()
bin_info_hash = binascii.unhexlify(info_hash)
rows = c.execute(
"SELECT COUNT(*) FROM BtTask WHERE InfoId = ?;", (bin_info_hash,)
)
for row in rows.fetchone():
_n, = row
if int(_n) > 0:
return 0
c.close()
return 1
if __name__ == '__main__':
if len(sys.argv) != 3:
print("USAGE python ./check_task.py TaskDb.dat INFO_HASH_OR_URI")
sys.exit(2)
task_db, info_hash_or_uri = sys.argv[1:3]
sys.exit(check_task(task_db, info_hash_or_uri))
|
from ._dispatch_wrapper import DispatchWrapper
class Page(DispatchWrapper):
""" The Page class manages properties for the document print out.
Example
-------
>>> import wellcad.com
>>> app = wellcad.com.Application()
>>> app.new_borehole()
<wellcad.com._borehole.Borehole object at 0x0000018B3DAF9D30>
>>> borehole = app.get_active_borehole()
>>> page = borehole.page
"""
@property
def depth_range(self):
"""int : Identify the depth range mode
The available modes are the following:
* 0 = depth range not defined (maximum depth range to be printed)
* 1 = depth range defined by the user
"""
return self._dispatch.DepthRange
@depth_range.setter
def depth_range(self, mode):
self._dispatch.DepthRange = mode
@property
def document_height(self):
"""float: The document height in mm."""
return self._dispatch.DocumentHeight
@property
def document_width(self):
"""float: The document width in mm."""
return self._dispatch.DocumentWidth
@document_width.setter
def document_width(self, value):
self._dispatch.DocumentWidth = value
@property
def nb_of_depth_range(self):
"""double: The number of defined depth ranges."""
return self._dispatch.NbOfDepthRange
@property
def paper_mode(self):
"""int: 0 for page-by-page and 1 for fanfold."""
return self._dispatch.PaperMode
@paper_mode.setter
def paper_mode(self, mode):
self._dispatch.PaperMode = mode
@property
def print_titles_on_top(self):
"""bool: Show the log titles at the top of the printout."""
return self._dispatch.PrintTitlesOnTop
@print_titles_on_top.setter
def print_titles_on_top(self, show):
self._dispatch.PrintTitlesOnTop = show
@property
def print_titles_on_bottom(self):
"""bool: Show the log titles at the bottom of the printout."""
return self._dispatch.PrintTitlesOnBottom
@print_titles_on_bottom.setter
def print_titles_on_bottom(self, show):
self._dispatch.PrintTitlesOnBottom = show
@property
def print_titles_on_bottom_on_each_page(self):
"""bool: Repeat the log titles at the bottom of each printed page."""
return self._dispatch.PrintTitlesOnBottomOnEachPage
@print_titles_on_bottom_on_each_page.setter
def print_titles_on_bottom_on_each_page(self, show):
self._dispatch.PrintTitlesOnBottomOnEachPage = show
@property
def print_titles_on_top_on_each_page(self):
"""bool: Repeat the log titles at the top of each printed page."""
return self._dispatch.PrintTitlesOnTopOnEachPage
@print_titles_on_top_on_each_page.setter
def print_titles_on_top_on_each_page(self, flag):
self._dispatch.PrintTitlesOnTopOnEachPage = flag
@property
def top_margin(self):
"""int: The top margin of the page to print in mm."""
return self._dispatch.TopMargin
@top_margin.setter
def top_margin(self, value):
self._dispatch.TopMargin = value
@property
def bottom_margin(self):
"""int: The bottom margin of the page to print in mm."""
return self._dispatch.BottomMargin
@bottom_margin.setter
def bottom_margin(self, value):
self._dispatch.BottomMargin = value
@property
def left_margin(self):
"""int: The left margin of the page to print in mm."""
return self._dispatch.LeftMargin
@left_margin.setter
def left_margin(self, value):
self._dispatch.LeftMargin = value
@property
def right_margin(self):
"""int: The right margin of the page to print in mm."""
return self._dispatch.RightMargin
@right_margin.setter
def right_margin(self, value):
self._dispatch.RightMargin = value
@property
def numbering(self):
"""int: The page numbering mode
The available modes are the following:
* 0 = none
* 1 = left
* 2 = right
* 3 = center
* 4 = alternating
"""
return self._dispatch.Numbering
@numbering.setter
def numbering(self, mode):
self._dispatch.Numbering = mode
@property
def print_header(self):
"""bool: Option to print the document header or not."""
return self._dispatch.PrintHeader
@print_header.setter
def print_header(self, flag):
self._dispatch.PrintHeader = flag
def add_depth_range(self, top, bottom):
"""Adds a new depth range to be printed in current master depth units.
Parameters
----------
top : float
Top depth of the interval that will be added to the print list.
bottom : float
Bottom depth of the interval that will be added to the print list.
"""
return self._dispatch.AddDepthRange(top, bottom)
def remove_depth_range(self, index):
"""Remove an entry from the list of depth ranges.
Parameters
----------
index : int
Zero based index of the entry to be removed from the list.
"""
return self._dispatch.RemoveDepthRange(index)
|
import paddle
import paddle.nn.functional as F
from paddle.nn import Conv2D, ReLU, Linear, Layer
import gym_super_mario_bros
from gym.spaces import Box
from gym import Wrapper
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT
import cv2
import math
import numpy as np
import subprocess as sp
import random
import time
from pyglet import clock
from nes_py._image_viewer import ImageViewer
from pynput.keyboard import Listener
record_key = []
def on_press(key):
global record_key
key = str(key)[1]
if key in ('a', 'A', 'd', 'D', 'o', 'O'):
record_key.append(ord(key))
record_key = list(set(record_key))
if len(record_key) > 2:
record_key.pop(0)
def on_release(key):
global record_key
key = str(key)[1]
if key in ('a', 'A', 'd', 'D', 'o', 'O'):
try:
record_key.remove(ord(key))
except:
pass
# the sentinel value for "No Operation"
_NOP = 0
class Monitor:
def __init__(self, width, height, saved_path):
self.command = ["ffmpeg", "-y", "-f", "rawvideo", "-vcodec", "rawvideo", "-s", "{}X{}".format(width, height),
"-pix_fmt", "rgb24", "-r", "60", "-i", "-", "-an", "-vcodec", "mpeg4", saved_path]
try:
self.pipe = sp.Popen(self.command, stdin=sp.PIPE, stderr=sp.PIPE)
except FileNotFoundError:
pass
def record(self, image_array):
self.pipe.stdin.write(image_array.tostring())
def process_frame(frame):
if frame is not None:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (84, 84))[None, :, :] / 255.
return frame
else:
return np.zeros((1, 84, 84))
class CustomReward(Wrapper):
def __init__(self, env=None, monitor=None):
super(CustomReward, self).__init__(env)
self.observation_space = Box(low=0, high=255, shape=(1, 84, 84))
self.curr_score = 0
if monitor:
self.monitor = monitor
else:
self.monitor = None
self.img = None
def step(self, action):
state, reward, done, info = self.env.step(action)
# if self.monitor:
# self.monitor.record(state)
self.img = state
state = process_frame(state)
reward += (info["score"] - self.curr_score) / 40.
self.curr_score = info["score"]
if done:
if info["flag_get"]:
reward += 50
else:
reward -= 50
return state, reward / 10., done, info
def reset(self):
self.curr_score = 0
self.img = self.env.reset()
return process_frame(self.img)
class CustomSkipFrame(Wrapper):
def __init__(self, env, skip=4):
super(CustomSkipFrame, self).__init__(env)
self.observation_space = Box(low=0, high=255, shape=(skip, 84, 84))
self.skip = skip
self.states = np.zeros((skip, 84, 84), dtype=np.float32)
def step(self, action):
total_reward = 0
last_states = []
for i in range(self.skip):
state, reward, done, info = self.env.step(action)
total_reward += reward
if i >= self.skip / 2:
last_states.append(state)
if done:
self.reset()
return self.states[None, :, :, :].astype(np.float32), total_reward, done, info
max_state = np.max(np.concatenate(last_states, 0), 0)
self.states[:-1] = self.states[1:]
self.states[-1] = max_state
return self.states[None, :, :, :].astype(np.float32), total_reward, done, info
def reset(self):
state = self.env.reset()
self.states = np.concatenate([state for _ in range(self.skip)], 0)
return self.states[None, :, :, :].astype(np.float32)
def create_train_env(world, stage, actions, output_path=None):
env = gym_super_mario_bros.make("SuperMarioBros-{}-{}-v0".format(world, stage))
if output_path:
monitor = Monitor(256, 240, output_path)
else:
monitor = None
env = JoypadSpace(env, actions)
env = CustomReward(env, monitor)
env = CustomSkipFrame(env)
return env
def conv_out(In):
return (In-3+2*1)//2+1
# (input−kernel_size+2*padding)//stride+1
class MARIO(Layer):
def __init__(self, actions, obs_dim):
super(MARIO, self).__init__()
self.channels = 32
self.kernel = 3
self.stride = 2
self.padding = 1
self.fc = self.channels*math.pow(conv_out(conv_out(conv_out(conv_out(obs_dim[-1])))),2)
self.conv0 = Conv2D(out_channels=self.channels,
kernel_size=self.kernel,
stride=self.stride,
padding=self.padding,
dilation=[1, 1],
groups=1,
in_channels=obs_dim[1])
self.relu0 = ReLU()
self.conv1 = Conv2D(out_channels=self.channels,
kernel_size=self.kernel,
stride=self.stride,
padding=self.padding,
dilation=[1, 1],
groups=1,
in_channels=self.channels)
self.relu1 = ReLU()
self.conv2 = Conv2D(out_channels=self.channels,
kernel_size=self.kernel,
stride=self.stride,
padding=self.padding,
dilation=[1, 1],
groups=1,
in_channels=self.channels)
self.relu2 = ReLU()
self.conv3 = Conv2D(out_channels=self.channels,
kernel_size=self.kernel,
stride=self.stride,
padding=self.padding,
dilation=[1, 1],
groups=1,
in_channels=self.channels)
self.relu3 = ReLU()
self.linear0 = Linear(in_features=int(self.fc), out_features=512)
self.linear1 = Linear(in_features=512, out_features=actions)
self.linear2 = Linear(in_features=512, out_features=1)
def forward(self, x):
x = paddle.to_tensor(data=x)
x = self.conv0(x)
x = self.relu0(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = paddle.reshape(x, shape=[1, -1])
x = self.linear0(x)
logits = self.linear1(x)
value = self.linear2(x)
return logits, value
def main(world, stage, callback=None, listener=None):
actions = SIMPLE_MOVEMENT
obs_dim = [1, 4, 84, 84]
env = create_train_env(world, stage, actions,"./video/mario_{}_{}.avi".format(world, stage))
env_human = create_train_env(world, stage, actions,"./video/mario_{}_{}.avi".format(world, stage))
# get the mapping of keyboard keys to actions in the environment
if hasattr(env_human, 'get_keys_to_action'):
keys_to_action = env_human.get_keys_to_action()
elif hasattr(env_human.unwrapped, 'get_keys_to_action'):
keys_to_action = env_human.unwrapped.get_keys_to_action()
else:
raise ValueError('env has no get_keys_to_action method')
# create the image viewer
viewer = ImageViewer(
env_human.spec.id if env_human.spec is not None else env_human.__class__.__name__,
env_human.observation_space.shape[0], # height
env_human.observation_space.shape[1], # width
monitor_keyboard=True,
relevant_keys=set(sum(map(list, keys_to_action.keys()), []))
)
# prepare frame rate limiting
target_frame_duration = 1 / env_human.metadata['video.frames_per_second']
last_frame_time = 0
paddle.disable_static()
params = paddle.load('./models/mario_{}_{}.pdparams'.format(world, stage))
model = MARIO(len(actions), obs_dim)
model.set_dict(params)
model.eval()
state = env.reset()
state_human = env_human.reset()
env_human.img = cv2.cvtColor(env_human.unwrapped.screen, cv2.COLOR_BGR2RGB)
start_img = cv2.resize(np.concatenate([env_human.img, env.img], axis=1), (960, 512))
start_img_0 = cv2.putText(start_img, "READY?", (60,256), cv2.FONT_HERSHEY_COMPLEX_SMALL, 10, (255, 255, 255), 5)
cv2.imshow('mario challenge', start_img_0)
cv2.waitKey(1000)
start_img = cv2.resize(np.concatenate([env_human.img, env.img], axis=1), (960, 512))
start_img_3 = cv2.putText(start_img, "3", (380,256), cv2.FONT_HERSHEY_COMPLEX_SMALL, 15, (255, 255, 255), 5)
cv2.imshow('mario challenge', start_img_3)
cv2.waitKey(1000)
start_img = cv2.resize(np.concatenate([env_human.img, env.img], axis=1), (960, 512))
start_img_2 = cv2.putText(start_img, "2", (380,256), cv2.FONT_HERSHEY_COMPLEX_SMALL, 15, (255, 255, 255), 5)
cv2.imshow('mario challenge', start_img_2)
cv2.waitKey(1000)
start_img = cv2.resize(np.concatenate([env_human.img, env.img], axis=1), (960, 512))
start_img_1 = cv2.putText(start_img, "1", (380,256), cv2.FONT_HERSHEY_COMPLEX_SMALL, 15, (255, 255, 255), 5)
cv2.imshow('mario challenge', start_img_1)
cv2.waitKey(1000)
human_reward = 0
agent_reward = 0
while True:
current_frame_time = time.time()
# limit frame rate
if last_frame_time + target_frame_duration > current_frame_time:
continue
# save frame beginning time for next refresh
last_frame_time = current_frame_time
# clock tick
clock.tick()
# reset if the environment is done
logits, value = model(state)
policy = F.softmax(logits).numpy()
action = np.argmax(policy)
state, reward, done, info = env.step(action)
state = np.array(state).astype('float32')
action = keys_to_action.get(tuple(record_key), _NOP)
_s, _r, _d, _i = env_human.step(action)
human_reward += _r
agent_reward += reward
env_human.img = cv2.cvtColor(env_human.unwrapped.screen, cv2.COLOR_BGR2RGB)
cv2.imshow('mario challenge', cv2.resize(np.concatenate([env_human.img, env.img], axis=1), (960, 512)))
cv2.waitKey(15)
# viewer.show(env.unwrapped.screen)
# pass the observation data through the callback
if callback is not None:
callback(_s, _r, _d, _i)
# shutdown if the escape key is pressed
if viewer.is_escape_pressed:
break
if done or _d:
if human_reward >= agent_reward:
end_img = cv2.putText(cv2.resize(np.concatenate([env_human.img, env.img], axis=1), (960, 512))
, "WIN!!!", (170,256), cv2.FONT_HERSHEY_COMPLEX_SMALL, 10, (255, 255, 255), 5)
else:
end_img = cv2.putText(cv2.resize(np.concatenate([env_human.img, env.img], axis=1), (960, 512))
, "LOSE~~", (40,256), cv2.FONT_HERSHEY_COMPLEX_SMALL, 10, (255, 255, 255), 5)
cv2.imshow('mario challenge', end_img)
cv2.waitKey(2000)
done, _d = False, False
break
viewer.close()
env.close()
env_human.close()
if __name__ == "__main__":
with Listener(on_press = on_press, on_release = on_release) as listener:
cv2.namedWindow('mario challenge', cv2.WINDOW_NORMAL)
cv2.resizeWindow('mario challenge', (960, 512))
env_list = [(1, 1), (1, 2), (1, 3), (1, 4), (2, 1), (2, 2), (2, 3), (2, 4), (3, 1), (3, 2), (3, 3), (3, 4), (4, 1), (4, 2), (4, 3)]
while True:
world , stage = random.choice(env_list)
main(world, stage, listener = listener)
listener.join() |
#NAME: NIRZARI IYER
#Assignment-3
#ID NUMBER: 1001117633
#BATCH TIME- 6:00 to 8:00 p.m.
import MySQLdb
import io
import os
import cloudstorage as gcs
import csv
import timeit
import json
from bottle import Bottle
from google.appengine.api import app_identity
from StringIO import StringIO
from bottle import route, request, response, template, get, HTTPResponse
bottle = Bottle()
#location of file into default bucket on google cloud storage
bucket_name = os.environ.get('BUCKET_NAME', app_identity.get_default_gcs_bucket_name())
bucket = '/' + bucket_name
filename = bucket + '/earthquake.csv'
#declare cursor globally
connobj = MySQLdb.connect(unix_socket='/cloudsql/cloudcomp2-979:simple' ,user='root')
c = connobj.cursor()
#Get filename from user
@bottle.route('/uploadform')
def uploadform():
return template('upload_form')
#Upload file into bucket on google cloud storage
@bottle.route('/uploadfile', method='POST')
def uploadfile():
#Calculate start time
start = timeit.default_timer()
filecontent = request.files.get('filecontent')
rawfilecontent = filecontent.file.read()
write_retry_params = gcs.RetryParams(backoff_factor=1.1)
gcs_file = gcs.open(filename,'w',content_type='text/plain',retry_params=write_retry_params)
gcs_file.write(rawfilecontent)
gcs_file.close()
#Calculate end time
stop = timeit.default_timer()
#Calculate total time
time_taken = stop - start
return template('upload_file',time_taken=time_taken)
#Read data from bucket and Insert data into google MySQLdb
def parse(filename, delimiter,c):
with gcs.open(filename, 'r') as gcs_file:
csv_reader = csv.reader(StringIO(gcs_file.read()), delimiter=',',
quotechar='"')
# Skip the header line
csv_reader.next()
try:
start = timeit.default_timer()
for row in csv_reader:
time = timestamp(row[0])
updated = timestamp(row[12])
for i in range (0,14):
if row[i] == '':
row[i] = "''"
place = str(row[13])
place = place.replace("'","")
insert = "INSERT INTO earthquake (time, latitude, longitude, depth, mag, magType, nst, gap, dmin, rms, net, id, updated,\
place, type) values('"+time+"',"+row[1]+","+row[2]+","+row[3]+","+row[4]+",'"+row[5]+"',"+row[6]+","+row[7]+",\
"+row[8]+","+row[9]+",'"+row[10]+"','"+row[11]+"','"+updated+"','"+place+"','"+row[14]+"')"
c.execute(insert)
stop = timeit.default_timer()
insert_time = stop - start
return insert_time
except Exception as e:
print ("Data can't be inserted" + str(e))
#coverting time format
def timestamp(string):
ans = string[:10] + ' ' + string[11:19]
return ans
#query to get result for different magnitude for each week
def query(mag,c):
query = 'SELECT week(time) as week, count(*) as count, mag as mag FROM earthquake WHERE mag = '+str(mag)+' GROUP BY week(time), mag'
c.execute(query)
ans_query = c.fetchall()
return ans_query
#query for magnitude greater than 5
def bigquery(mag,c):
query = 'SELECT week(time) as week, count(*) as count, mag as mag FROM earthquake WHERE mag > '+str(mag)+' GROUP BY week(time), mag'
c.execute(query)
ans_query = c.fetchall()
return ans_query
#function to format generated result
def ans_format(mag):
table = "<table border='2'><tr><th>Week</th><th>Number of quakes</th><th>Magnitude</th></tr>"
ans = ""
for x in mag:
ans = ans +"<tr><td>" + str(x[0]) + "</td><td>" + str(x[1]) + "</td><td>" + str(x[2]) +"</td></tr>"
table += ans + "</table>"
return table
#Displays the webinterface for user to enter magnitude and location
@bottle.route('/webinterface')
def webinterface():
return template('webinterface')
@bottle.route('/dynamic_query', method = "POST")
def dynamic_query():
if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
dict_data = request.forms.dict
print dict_data
query_final = create_query(dict_data)
connectdb = 'USE db'
c.execute(connectdb)
query_ans = c.execute(query_final)
query_result = c.fetchall()
print query_result
query_output = query_format(query_result)
print query_output
return HTTPResponse(body=str(query_output), status=200)
#function to create dynamic query
def create_query(dict_data):
q1 = "SELECT * FROM earthquake WHERE "
q2 = "mag "
param1 = ""
if dict_data["param1"][0] == "eq":
param1 = "= "
elif dict_data["param1"][0]== "gt":
param1 = "> "
elif dict_data["param1"][0] == "gte":
param1 = ">= "
elif dict_data["param1"][0] == "lt":
param1 = "< "
elif dict_data["param1"][0] == "lte":
param1 = "<= "
q3 = param1
mag = dict_data["mag"][0]
q4 = mag
param2 = ""
if dict_data["param2"][0] == "or":
param2 = " or "
elif dict_data["param2"][0] == "and":
param2 = " and "
q5 = param2
q6 = "place LIKE "
loc = dict_data["loc"][0]
q7 = loc
query_final = str(q1 + q2 + q3 + q4 + q5 + q6 + "'%" +q7+ "%'")
return query_final
def query_format(query_result):
table = "<table border='2'><tr><th>time</th><th>latitude</th><th>longitude</th><th>depth</th><th>mag</th><th>magType</th><th>nst</th>"\
"<th>gap</th><th>dmin</th><th>rms</th><th>net</th><th>id</th><th>updated</th><th>place</th><th>type</th></tr>"
ans = ""
for x in query_result:
print x
ans += "<tr>"
ans += "<td>"+x[0].strftime("%d/%m/%Y %H:%M:%S")+"</td>"
ans += "<td>"+str(x[1])+"</td>"
ans += "<td>"+str(x[2])+"</td>"
ans += "<td>"+str(x[3])+"</td>"
ans += "<td>"+str(x[4])+"</td>"
ans += "<td>"+str(x[5])+"</td>"
ans += "<td>"+str(x[6])+"</td>"
ans += "<td>"+str(x[7])+"</td>"
ans += "<td>"+str(x[8])+"</td>"
ans += "<td>"+str(x[9])+"</td>"
ans += "<td>"+str(x[10])+"</td>"
ans += "<td>"+str(x[11])+"</td>"
ans += "<td>"+x[12].strftime("%d/%m/%Y %H:%M:%S")+"</td>"
ans += "<td>"+str(x[13])+"</td>"
ans += "<td>"+str(x[14])+"</td>"
ans += "</tr>"
table += ans + "</table>"
return table
@bottle.route('/')
def main():
try:
createdb = 'CREATE DATABASE IF NOT EXISTS db'
c.execute(createdb)
connectdb = 'USE db'
c.execute(connectdb)
table = 'CREATE TABLE IF NOT EXISTS earthquake '\
'(time TIMESTAMP,'\
'latitude DOUBLE,'\
'longitude DOUBLE,'\
'depth DOUBLE,'\
'mag DOUBLE,'\
'magType varchar(500),'\
'nst DOUBLE,'\
'gap DOUBLE,'\
'dmin DOUBLE,'\
'rms DOUBLE,'\
'net varchar(500),'\
'id varchar(500),'\
'updated TIMESTAMP,'\
'place VARCHAR(500),'\
'type VARCHAR(500))'
c.execute(table)
insert_time = parse(filename,',',c)
mag2 = query(2,c)
mag3 = query(3,c)
mag4 = query(4,c)
mag5 = query(5,c)
maggt5 = bigquery(5,c)
ans_mag2 = ans_format(mag2)
ans_mag3 = ans_format(mag3)
ans_mag4 = ans_format(mag4)
ans_mag5 = ans_format(mag5)
ans_maggt5 = ans_format(maggt5)
ans = "Final Result: <br><br> Time taken to Insert data into MySQL database is: <br>" +str(insert_time)+"<br><br>" \
"Earthquake of magnitude 2: <br> "+str(ans_mag2)+"<br><br> Earthquake of magnitude 3: <br>" \
+str(ans_mag3)+ "<br><br> Earthquake of magnitude 4: <br>" +str(ans_mag4)+ "<br><br> Earthquake" \
"of magnitude 5: <br>" +str(ans_mag5)+ "<br><br> Earthquake of magnitude greater than 5: <br>" +str(ans_maggt5)
return ans
except Exception as e:
print str(e)
return e
# Define an handler for 404 errors.
@bottle.error(404)
def error_404(error):
"""Return a custom error 404."""
return 'Sorry, nothing at this URL.'
# [END all]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 5 16:30:52 2019
@author: Ramon Barros
CE-QUAL-W2 Calibration Tool v0.0.1
MODEL ZOOPLANCTON GROUPS RATES & CONSTANTS
paramcontrol(name, calibrate, value, low, high, guess)
name = Parameter or setting name,
calibrate = boolean, True if the parameter must be calibrated (value will be not used)
value = if calibrate = False, value will be inputed to the parameter field
low = minimum value for calibration purposes
high = maximum value for calibration purposes
guess = optimum guess for calibration purposes'''
"""
from cqw2_calibrate.paramcontrol import paramcontrol
###############################################################################
#//////////////// ZOOPLANCTON GROUPS RATES & CONSTANTS ////////////////////////
###############################################################################
zooplankton_rates_constants = {
'number_of_zooplankton_groups' : paramcontrol('NZP', False," 0",1,1,1),
#respiration and nutrient rates
'zooplankton_growth_rate': [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZG_1', False," 1.5",1,1,1),
],
'zooplankton_respiration_rate': [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZR_1', False," 0.1",1,1,1),
],
'zooplankton_mortality_rate': [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZM_1', False," 0.01",1,1,1),
],
'zooplankton_assimilation_eff' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZEFF_1', False," 0.500",1,1,1),
],
'zooplankton_preference_POM' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('PREFP_1', False," 0.500",1,1,1),
],
'zooplankton_min_feeding' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZOOMIN_1', False," 0.010",1,1,1),
],
'zooplankton_halfsaturation_food' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZS2P_1', False," 0.3",1,1,1),
],
#zooplankton temperature rates
'zooplankton_lower_temp_growth' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZT1_1', False," 0",1,1,1),
],
'zooplankton_lower_temp_max_growth': [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZT2_1', False," 15",1,1,1),
],
'zooplankton_upper_temp_max_growth' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZT3_1', False," 20",1,1,1),
],
'zooplankton_upper_temp_growth' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZT4_1', False," 36",1,1,1),
],
'zooplankton_fraction_algal_growth_T1' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZK1_1', False," 0.01",1,1,1),
],
'zooplankton_fraction_algal_growth_T2' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZK2_1', False," 0.9",1,1,1),
],
'zooplankton_fraction_algal_growth_T3' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZK3_1', False," 0.99",1,1,1),
],
'zooplankton_fraction_algal_growth_T4' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZK4_1', False," 0.1",1,1,1),
],
#zooplankton Stoichiometry
'zooplankton_fraction_P' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZP_1', False," 0.015",1,1,1),
],
'zooplankton_fraction_N' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZN_1', False," 0.08",1,1,1),
],
'zooplankton_fraction_C' : [ #The number of entries must be equal to the number_of_zooplankton_groups!
paramcontrol('ZC_1', False," 0.45",1,1,1),
],
'zooplankton_preference_algae_1' : [
#The number of entries must be equal to the number_of_zooplankton_groups!
#add "zooplankton_preference_algae_2" if there are 2 groups of algae being simulated!
paramcontrol('PREFA_1', False," 1.1",1,1,1),
],
'zooplankton_preference_zooplankton_1' : [
#The number of entries must be equal to the number_of_zooplankton_groups!
#add "zooplankton_preference_zooplankton_2" if there are 2 groups of zooplankton being simulated!
paramcontrol('PREFZ_1', False," 1.1",1,1,1),
],
} |
from . import Plugin
class UdpPlugin(Plugin):
targets = [
{
'targets': [
{
'match': '^servers\.(?P<server>[^\.]+)\.(?P<protocol>udp)\.(?P<type>In|Out)(?P<unit>Datagrams)$',
'configure': lambda self, target: self.fix_underscores(target, ['type', 'unit'])
},
{
'match': '^servers\.(?P<server>[^\.]+)\.(?P<protocol>udp)\.(?P<type>[^\.]+)Errors$',
'tags': {'unit': 'Err/s'},
'configure': lambda self, target: self.fix_underscores(target, 'type'),
},
{
'match': '^servers\.(?P<server>[^\.]+)\.(?P<protocol>udp)\.(?P<type>NoPorts)$',
'tags': {'unit': 'Event/s'},
'configure': lambda self, target: self.fix_underscores(target, 'type')
}
],
'target_type': 'rate'
}
]
# vim: ts=4 et sw=4:
|
#
# Copyright (c) 2019 UCT Prague.
#
# test_id_acl_unittests.py is part of Invenio Explicit ACLs
# (see https://github.com/oarepo/invenio-explicit-acls).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from helpers import create_record
from invenio_indexer.api import RecordIndexer
from invenio_search import current_search_client
from invenio_explicit_acls.acls import IdACL
from invenio_explicit_acls.record import SchemaEnforcingRecord
RECORD_SCHEMA = 'records/record-v1.0.0.json'
ANOTHER_SCHEMA = 'records/blah-v1.0.0.json'
def test_id_acl_get_record_acl(app, db, es, es_acl_prepare, test_users):
pid, record = create_record({'$schema': RECORD_SCHEMA}, clz=SchemaEnforcingRecord)
pid1, record1 = create_record({'$schema': RECORD_SCHEMA}, clz=SchemaEnforcingRecord)
with db.session.begin_nested():
acl = IdACL(name='test', schemas=[RECORD_SCHEMA],
priority=0, operation='get', originator=test_users.u1,
record_id=str(record.id))
db.session.add(acl)
acl2 = IdACL(name='test 2', schemas=[ANOTHER_SCHEMA],
priority=0, operation='get', originator=test_users.u1,
record_id=str(record1.id))
db.session.add(acl2)
acls = list(IdACL.get_record_acls(record))
assert len(acls) == 1
assert isinstance(acls[0], IdACL)
assert acls[0].id == acl.id
def test_id_acl_prepare_schema_acl(app, db, es, es_acl_prepare, test_users):
# should pass as it does nothing
IdACL.prepare_schema_acls(RECORD_SCHEMA)
def test_id_acl_get_matching_resources(app, db, es, es_acl_prepare, test_users):
pid, record = create_record({'$schema': RECORD_SCHEMA}, clz=SchemaEnforcingRecord)
pid1, record1 = create_record({'$schema': RECORD_SCHEMA}, clz=SchemaEnforcingRecord)
RecordIndexer().index(record)
RecordIndexer().index(record1)
current_search_client.indices.refresh()
current_search_client.indices.flush()
with db.session.begin_nested():
acl = IdACL(name='test', schemas=[RECORD_SCHEMA],
priority=0, operation='get', originator=test_users.u1, record_id=str(record.id))
db.session.add(acl)
acl1 = IdACL(name='test', schemas=[RECORD_SCHEMA],
priority=0, operation='get', originator=test_users.u1, record_id=str(record1.id))
db.session.add(acl1)
ids = list(acl.get_matching_resources())
assert len(ids) == 1
assert ids[0] == str(pid.object_uuid)
ids = list(acl1.get_matching_resources())
assert len(ids) == 1
assert ids[0] == str(pid1.object_uuid)
def test_id_acl_update(app, db, es, es_acl_prepare, test_users):
# should pass as it does nothing
with db.session.begin_nested():
acl = IdACL(name='test', schemas=[RECORD_SCHEMA],
priority=0, operation='get', originator=test_users.u1, record_id='1111-11111111-11111111-1111')
db.session.add(acl)
acl.update()
def test_id_acl_delete(app, db, es, es_acl_prepare, test_users):
# should pass as it does nothing
with db.session.begin_nested():
acl = IdACL(name='test', schemas=[RECORD_SCHEMA],
priority=0, operation='get', originator=test_users.u1, record_id='1111-11111111-11111111-1111')
db.session.add(acl)
acl.delete()
def test_id_acl_repr(app, db, es, es_acl_prepare, test_users):
# should pass as it does nothing
with db.session.begin_nested():
acl = IdACL(name='test', schemas=[RECORD_SCHEMA],
priority=0, operation='get', originator=test_users.u1, record_id='1111-11111111-11111111-1111')
db.session.add(acl)
assert repr(acl) == "ID ACL on 1111-11111111-11111111-1111"
def test_id_acl_record_str(app, db, es, es_acl_prepare, test_users):
pid1, record1 = create_record({})
pid2, record2 = create_record({'title': 'blah'})
pid3, record3 = create_record({'title': {'_': 'blah', 'cs': 'blah cs', 'en': 'blah en'}})
acl1 = IdACL(name='test', schemas=[RECORD_SCHEMA],
priority=0, operation='get', originator=test_users.u1, record_id=str(record1.id))
acl2 = IdACL(name='test', schemas=[RECORD_SCHEMA],
priority=0, operation='get', originator=test_users.u1, record_id=str(record2.id))
acl3 = IdACL(name='test', schemas=[RECORD_SCHEMA],
priority=0, operation='get', originator=test_users.u1, record_id=str(record3.id))
acl4 = IdACL(name='test', schemas=[RECORD_SCHEMA],
priority=0, operation='get', originator=test_users.u1, record_id='1111-11111111-11111111-1111')
assert acl1.record_str == "%s: {'control_number': '1'}" % record1.id
assert acl2.record_str == "%s: blah" % record2.id
assert acl3.record_str == "%s: blah" % record3.id
assert acl4.record_str == "No record for ID ACL on 1111-11111111-11111111-1111"
|
import json
import urllib
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from crits.actors.forms import AddActorForm, AddActorIdentifierForm
from crits.actors.handlers import generate_actor_csv, generate_actor_jtable
from crits.actors.handlers import generate_actor_identifier_jtable
from crits.actors.handlers import generate_actor_identifier_csv
from crits.actors.handlers import get_actor_details, add_new_actor, actor_remove
from crits.actors.handlers import create_actor_identifier_type
from crits.actors.handlers import get_actor_tags_by_type, update_actor_tags
from crits.actors.handlers import add_new_actor_identifier, actor_identifier_types
from crits.actors.handlers import actor_identifier_type_values
from crits.actors.handlers import attribute_actor_identifier
from crits.actors.handlers import set_identifier_confidence, remove_attribution
from crits.actors.handlers import set_actor_name, set_actor_description
from crits.actors.handlers import update_actor_aliases
from crits.core import form_consts
from crits.core.data_tools import json_handler
from crits.core.user_tools import user_can_view_data, is_admin
@user_passes_test(user_can_view_data)
def actor_identifiers_listing(request,option=None):
"""
Generate the Actor Identifier listing page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', 'csv', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_actor_identifier_csv(request)
return generate_actor_identifier_jtable(request, option)
@user_passes_test(user_can_view_data)
def actors_listing(request,option=None):
"""
Generate the Actor listing page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', 'csv', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_actor_csv(request)
return generate_actor_jtable(request, option)
@user_passes_test(user_can_view_data)
def actor_search(request):
"""
Search for Actors.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
query = {}
query[request.GET.get('search_type', '')]=request.GET.get('q', '').strip()
return HttpResponseRedirect(reverse('crits.actors.views.actors_listing')
+ "?%s" % urllib.urlencode(query))
@user_passes_test(user_can_view_data)
def actor_detail(request, id_):
"""
Generate the Actor details page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param id_: The Actor ObjectId to get details for.
:type id_: str
:returns: :class:`django.http.HttpResponse`
"""
template = "actor_detail.html"
analyst = request.user.username
(new_template, args) = get_actor_details(id_,
analyst)
if new_template:
template = new_template
return render_to_response(template,
args,
RequestContext(request))
@user_passes_test(user_can_view_data)
def add_actor(request):
"""
Add an Actor. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
data = request.POST
form = AddActorForm(request.user, data)
if form.is_valid():
cleaned_data = form.cleaned_data
name = cleaned_data['name']
aliases = cleaned_data['aliases']
description = cleaned_data['description']
source = cleaned_data['source']
reference = cleaned_data['source_reference']
method = cleaned_data['source_method']
campaign = cleaned_data['campaign']
confidence = cleaned_data['confidence']
analyst = request.user.username
bucket_list = cleaned_data.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME)
ticket = cleaned_data.get(form_consts.Common.TICKET_VARIABLE_NAME)
result = add_new_actor(name,
aliases=aliases,
description=description,
source=source,
source_method=method,
source_reference=reference,
campaign=campaign,
confidence=confidence,
analyst=analyst,
bucket_list=bucket_list,
ticket=ticket)
return HttpResponse(json.dumps(result,
default=json_handler),
mimetype='application/json')
return HttpResponse(json.dumps({'success': False,
'form':form.as_table()}),
mimetype="application/json")
return render_to_response("error.html",
{'error': 'Expected AJAX/POST'},
RequestContext(request))
@user_passes_test(user_can_view_data)
def remove_actor(request, id_):
"""
Remove an Actor.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param id_: The ObjectId of the Actor to remove.
:type id_: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST":
if is_admin(request.user):
actor_remove(id_, request.user.username)
return HttpResponseRedirect(reverse('crits.actors.views.actors_listing'))
error = 'You do not have permission to remove this item.'
return render_to_response("error.html",
{'error': error},
RequestContext(request))
return render_to_response('error.html',
{'error':'Expected AJAX/POST'},
RequestContext(request))
@user_passes_test(user_can_view_data)
def get_actor_identifier_types(request):
"""
Get Actor Identifier types. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
result = actor_identifier_types(True)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def get_actor_identifier_type_values(request):
"""
Get Actor Identifier type values. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
type_ = request.POST.get('type', None)
username = request.user.username
result = actor_identifier_type_values(type_, username)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def new_actor_identifier_type(request):
"""
Create an Actor Identifier type. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
identifier_type = request.POST.get('identifier_type', None)
if not identifier_type:
return HttpResponse(json.dumps({'success': False,
'message': 'Need a name.'}),
mimetype="application/json")
result = create_actor_identifier_type(username, identifier_type)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def actor_tags_modify(request):
"""
Update tags for Actors based on a type of tag.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
tag_type = request.POST.get('tag_type', None)
actor_id = request.POST.get('oid', None)
tags = request.POST.get('tags', None)
username = request.user.username
if not tag_type:
return HttpResponse(json.dumps({'success': False,
'message': 'Need a tag type.'}),
mimetype="application/json")
result = update_actor_tags(actor_id, tag_type, tags, username)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def get_actor_tags(request):
"""
Get available tags for Actors based on a type of tag.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
tag_type = request.POST.get('type', None)
if not tag_type:
return HttpResponse(json.dumps({'success': False,
'message': 'Need a tag type.'}),
mimetype="application/json")
result = get_actor_tags_by_type(tag_type)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def add_identifier(request):
"""
Create an Actor Identifier. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
form = AddActorIdentifierForm(username, request.POST)
if form.is_valid():
identifier_type = request.POST.get('identifier_type', None)
identifier = request.POST.get('identifier', None)
source = request.POST.get('source', None)
method = request.POST.get('method', None)
reference = request.POST.get('reference', None)
if not identifier_type or not identifier:
return HttpResponse(json.dumps({'success': False,
'message': 'Need a name.'}),
mimetype="application/json")
result = add_new_actor_identifier(identifier_type,
identifier,
source,
method,
reference,
username)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
return HttpResponse(json.dumps({'success': False,
'form':form.as_table()}),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def attribute_identifier(request):
"""
Attribute an Actor Identifier. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
id_ = request.POST.get('id', None)
identifier_type = request.POST.get('identifier_type', None)
identifier = request.POST.get('identifier', None)
confidence = request.POST.get('confidence', 'low')
if not identifier_type or not identifier:
return HttpResponse(json.dumps({'success': False,
'message': 'Not all info provided.'}),
mimetype="application/json")
result = attribute_actor_identifier(id_,
identifier_type,
identifier,
confidence,
username)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def edit_attributed_identifier(request):
"""
Edit an attributed Identifier (change confidence). Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
id_ = request.POST.get('id', None)
identifier = request.POST.get('identifier_id', None)
confidence = request.POST.get('confidence', 'low')
if not identifier:
return HttpResponse(json.dumps({'success': False,
'message': 'Not all info provided.'}),
mimetype="application/json")
result = set_identifier_confidence(id_,
identifier,
confidence,
username)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def remove_attributed_identifier(request):
"""
Remove an Identifier attribution. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
id_ = request.POST.get('object_type', None)
identifier = request.POST.get('key', None)
if not identifier:
return HttpResponse(json.dumps({'success': False,
'message': 'Not all info provided.'}),
mimetype="application/json")
result = remove_attribution(id_,
identifier,
username)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def edit_actor_name(request, id_):
"""
Set actor name. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param id_: The ObjectId of the Actor.
:type id_: str
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
name = request.POST.get('name', None)
if not name:
return HttpResponse(json.dumps({'success': False,
'message': 'Not all info provided.'}),
mimetype="application/json")
result = set_actor_name(id_,
name,
username)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def edit_actor_description(request, id_):
"""
Set actor description. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param id_: The ObjectId of the Actor.
:type id_: str
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
description = request.POST.get('description', None)
if not description:
return HttpResponse(json.dumps({'success': False,
'message': 'Not all info provided.'}),
mimetype="application/json")
result = set_actor_description(id_,
description,
username)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
#TODO:
@user_passes_test(user_can_view_data)
def edit_actor_aliases(request):
"""
Update aliases for an Actor.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
aliases = request.POST.get('aliases', None)
actor_id = request.POST.get('oid', None)
username = request.user.username
result = update_actor_aliases(actor_id, aliases, username)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
|
""" MPI job submission
"""
from __future__ import absolute_import
import os, subprocess, logging
from threading import Thread
def sync_dir(local_dir, slave_node, slave_dir):
""" Sync the working directory from root node to slave node
"""
remote = slave_node + ":" + slave_dir
logging.info('rsync %s -> %s', local_dir, remote)
prog = 'rsync -az --rsh="ssh -o StrictHostKeyChecking=no -p %s" %s %s' % (22, local_dir, remote)
mkdir = 'ssh %s "mkdir -p %s"' % (slave_node, slave_dir)
subprocess.check_call([mkdir], shell = True)
subprocess.check_call([prog], shell = True)
def get_hosts(host_file):
""" Get host from MPI host file
"""
fh = open(host_file)
hosts = []
for line in fh.readlines():
splits = line.split(' ')
hosts.append(splits[0])
return hosts
def submit(args):
""" Submission script with MPI
"""
def mpi_submit(nworker):
if args.host_file is not None:
cmd = ' --mca plm_rsh_no_tree_spwan 1 --mca btl_tcp_if_include eno4 -hostfile %s' % (args.host_file)
else:
raise RuntimeError('--host-file can not none')
cmd += ' ' + ' '.join(args.command)
# sync program is necessary
local_dir = os.getcwd() + '/'
hosts = get_hosts(args.host_file)
for h in hosts:
sync_dir(local_dir, h, local_dir)
# start workers
if nworker > 0:
prog = 'mpirun -np %d %s' % (nworker, cmd)
logging.info('Start %d workers by mpirun: %s' % (nworker, prog))
subprocess.check_call(prog, shell = True)
else:
raise RuntimeError('--num-workers must > 0')
mpi_submit(args.num_workers)
|
import pytest
from dagster_graphql import DagsterGraphQLClientError, ReloadRepositoryLocationStatus
from .conftest import MockClient, python_client_test_suite
@python_client_test_suite
def test_reload_repo_location_success(mock_client: MockClient):
response = {"reloadRepositoryLocation": {"__typename": "RepositoryLocation"}}
mock_client.mock_gql_client.execute.return_value = response
assert (
mock_client.python_client.reload_repository_location("foo").status
== ReloadRepositoryLocationStatus.SUCCESS
)
@python_client_test_suite
def test_reload_repo_location_failure(mock_client: MockClient):
error_msg = "some reason"
response = {
"reloadRepositoryLocation": {
"__typename": "RepositoryLocationLoadFailure",
"error": {"message": error_msg},
}
}
mock_client.mock_gql_client.execute.return_value = response
result = mock_client.python_client.reload_repository_location("foo")
assert result.status == ReloadRepositoryLocationStatus.FAILURE
assert result.message == error_msg
@python_client_test_suite
def test_reload_repo_location_fails_with_query_error(mock_client: MockClient):
mock_client.mock_gql_client.execute.side_effect = Exception("foo")
with pytest.raises(DagsterGraphQLClientError) as _:
mock_client.python_client.reload_repository_location("foo")
|
dict_preposition={"अगला" :'Preposition',"ने":'Preposition',"ले":'Preposition',"टू":'Preposition',"इससे":'Preposition',
"अंदर" :'Preposition',"को":'Preposition',"वाले":'Preposition',
"अधीन " :'Preposition',
"अपेक्षा" :'Preposition',
"आगे" :'Preposition',
"आर-पार" :'Preposition',
"आस-पास" :'Preposition',
"उल्टा" :'Preposition',
"ऊपर" :'Preposition',
"ऐसा" :'Preposition',
"ओर" :'Preposition',
"का" :'Preposition',
"कारण" :'Preposition',
"की" :'Preposition',
"की ओर" :'Preposition',
"की ओर से" :'Preposition',
"की वजह से" :'Preposition',
"के" :'Preposition',
"के अतिरिक्त" :'Preposition',
"के अंदर" :'Preposition',
"के अनुसार" :'Preposition',
"के अलावा" :'Preposition',
"के ऊपर" :'Preposition',
"के कारण" :'Preposition',
"के खिलाफ" :'Preposition',
"के पार" :'Preposition',
"के पास" :'Preposition',
"के नज़दीक" :'Preposition',
"के पीछे" :'Preposition',
"के बजाय" :'Preposition',
"के बाद" :'Preposition',
"के बाद से" :'Preposition',
"के बारे में" :'Preposition',
"के बावजूद" :'Preposition',
"के बाहर" :'Preposition',
"के बीच" :'Preposition',
"के बीच में" :'Preposition',
"के माध्यम से" :'Preposition',
"के लिए" :'Preposition',
"के शीर्ष पर" :'Preposition',
"के सामने" :'Preposition',
"चारों ओर" :'Preposition',
"जब तक" :'Preposition',
"जरिए" :'Preposition',
"जहाँ तक" :'Preposition',
"जैसा" :'Preposition',
"जोड़" :'Preposition',
"तक" :'Preposition',
"तथा" :'Preposition',
"तरफ" :'Preposition',
"तरह" :'Preposition',
"तीन शब्द" :'Preposition',
"दूर" :'Preposition',
"दूर से" :'Preposition',
"दो शब्दों" :'Preposition',
"दौर " :'Preposition',
"दौरान" :'Preposition',
"द्वारा" :'Preposition',
"निकट" :'Preposition',
"नीचे" :'Preposition',
"पर" :'Preposition',
"परे" :'Preposition',
"पहले" :'Preposition',
"पहले से" :'Preposition',
"पार" :'Preposition',
"पास" :'Preposition',
"पीछे" :'Preposition',
"पूर्व" :'Preposition',
"प्रति" :'Preposition',
"बगल में" :'Preposition',
"बराबर" :'Preposition',
"बलबूते" :'Preposition',
"बाद" :'Preposition',
"बाहर" :'Preposition',
"बिना" :'Preposition',
"भर" :'Preposition',
"भांति" :'Preposition',
"भीतर" :'Preposition',
"मात्र" :'Preposition',
"मारे" :'Preposition',
"में" :'Preposition',
"योग्य" :'Preposition',
"लिए" :'Preposition',
"लेकिन" :'Preposition',
"विपरीत" :'Preposition',
"विरूद्ध" :'Preposition',
"संग" :'Preposition',
"समान" :'Preposition',
"समेत" :'Preposition',
"सहारे" :'Preposition',
"साथ" :'Preposition',
"सामने" :'Preposition',
"सिवा" :'Preposition',
"सिवाय" :'Preposition',
"से" :'Preposition',
"से पहले" :'Preposition',
"हेतु" :'Preposition',
"को":'Preposition',
} |
from argparse import ArgumentParser
from bbox_handler import BoundingBoxHandler
import json
import sys
import os
ap = ArgumentParser()
ap.add_argument('-i', '--input', required=True, help='File that contains PPOCR rotated bboxes')
ap.add_argument('-o', '--output', required=True, help='File name after combine rotated bboxes')
ap.add_argument(
'-d',
'--direction',
required = True,
choices = ['+90', '-90', 'both'],
help = 'Current right angle direction of input images'
)
ap.add_argument(
'--max_woh',
required = 'both' in sys.argv[-1],
type = float,
help = '(Required if direction == "both") Maximum ratio width over height to filter'
)
ap.add_argument(
'--overlap',
required = 'both' in sys.argv[-1],
type = float,
help = '(Required if direction == "both") Overlap threshold to suppress'
)
args = vars(ap.parse_args())
'''Example:
python unrotated_convertor.py \
-i "../../Dataset/Tale of Kieu version 1871 - Rotate/Cache.cach" \
-o "../../Dataset/Tale of Kieu version 1871/Cache.cach" \
-d "both" \
--max_woh 0.25 \
--overlap 0.5
'''
script_dir = os.path.dirname(os.path.abspath(__file__))
input_path = os.path.join(script_dir, args['input'])
output_path = os.path.join(script_dir, args['output'])
def rotate_bboxes_to_0deg(image_idx, file_path, bboxes):
angle = int(os.path.splitext(file_path)[0][-3:])
if args['direction'] == 'both':
if (image_idx % 2 == 0 and angle != 90) or \
(image_idx % 2 == 1 and angle != -90):
raise Exception('''
\nImage must have the following format:
\n- "+90" postfix in name for even index
\n- "-90" postfix in name for odd index
''')
elif int(args['direction']) != angle:
raise Exception('Image not meet current right angle direction')
for idx, bbox in enumerate(bboxes):
absolute_path = os.path.join(
os.path.dirname(input_path),
os.path.basename(file_path) # Get file name
)
bboxes[idx] = BoundingBoxHandler.RotateOneBox(absolute_path, bbox, -angle)
bboxes[idx]['points'] = BoundingBoxHandler.RectangleTransform(bboxes[idx]['points'])
print('Rotated', file_path, 'bouding boxes to 0 degree')
return bboxes
with open(input_path, 'r', encoding='utf-8') as file:
dataset_bboxes = {}
for line in file:
file_path, bboxes = line.rstrip('\n').split('\t')
dataset_bboxes[file_path] = json.loads(bboxes)
with open(output_path, 'w', encoding='utf-8') as file:
if args['direction'] in ['+90', '-90']:
for image_idx, item in enumerate(dataset_bboxes.items()):
file_path, bboxes = item
final_path = file_path.replace(args['direction'], '').replace(' - Rotate', '')
bboxes = rotate_bboxes_to_0deg(image_idx, file_path, bboxes)
bboxes = BoundingBoxHandler.WidthOverHeightFilter(bboxes, max_ratio=args['max_woh'])
file.write(f'{final_path}\t{bboxes}\n')
elif args['direction'] == 'both':
dataset_length = len(dataset_bboxes)
if dataset_length % 2 != 0:
raise Exception('Number of images to rotate must be even')
items = list(dataset_bboxes.items())
for image_idx in range(0, dataset_length, 2):
file_path_1, bboxes_1 = items[image_idx] # for +90 degree
file_path_2, bboxes_2 = items[image_idx + 1] # for -90 degree
final_path = file_path_1.replace('+90', '').replace(' - Rotate', '')
bboxes_1 = rotate_bboxes_to_0deg(image_idx, file_path_1, bboxes_1)
bboxes_2 = rotate_bboxes_to_0deg(image_idx + 1, file_path_2, bboxes_2)
final_bboxes = BoundingBoxHandler.WidthOverHeightFilter(
bboxes_1 + bboxes_2,
max_ratio = args['max_woh']
)
final_bboxes = BoundingBoxHandler.NonMaximumSuppression(
final_bboxes,
threshold = args['overlap'],
)
print('=> Merged', 'rotated bouding boxes for', final_path)
file.write(f'{final_path}\t{final_bboxes}\n')
|
from __future__ import division
import difflib
import collections
import traceback
import sys
import ast
import re
from StringIO import StringIO
import sympy
from sympy.core.relational import Relational
import sympy.parsing.sympy_tokenize as sympy_tokenize
from token import NAME
OTHER_SYMPY_FUNCTIONS = ('sqrt',)
Arguments = collections.namedtuple('Arguments', 'function args kwargs')
class Eval(object):
def __init__(self, namespace={}):
self._namespace = namespace
def get(self, name):
return self._namespace.get(name)
def set(self, name, value):
self._namespace[name] = value
def eval_node(self, node):
tree = ast.fix_missing_locations(ast.Expression(node))
return eval(compile(tree, '<string>', 'eval'), self._namespace)
def eval(self, x, use_none_for_exceptions=False, repr_expression=True):
globals = self._namespace
try:
x = x.strip()
x = x.replace("\r", "")
y = x.split('\n')
if len(y) == 0:
return ''
s = '\n'.join(y[:-1]) + '\n'
t = y[-1]
try:
z = compile(t + '\n', '', 'eval')
except SyntaxError:
s += '\n' + t
z = None
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
eval(compile(s, '', 'exec', division.compiler_flag), globals, globals)
if not z is None:
r = eval(z, globals)
if repr_expression:
r = repr(r)
else:
r = ''
if repr_expression:
sys.stdout.seek(0)
r = sys.stdout.read() + r
finally:
sys.stdout = old_stdout
return r
except:
if use_none_for_exceptions:
return
etype, value, tb = sys.exc_info()
# If we decide in the future to remove the first frame fromt he
# traceback (since it links to our code, so it could be confusing
# to the user), it's easy to do:
#tb = tb.tb_next
s = "".join(traceback.format_exception(etype, value, tb))
return s
class LatexVisitor(ast.NodeVisitor):
EXCEPTIONS = {'integrate': sympy.Integral, 'diff': sympy.Derivative}
formatters = {}
@staticmethod
def formats_function(name):
def _formats_function(f):
LatexVisitor.formatters[name] = f
return f
return _formats_function
def format(self, name, node):
formatter = LatexVisitor.formatters.get(name)
if not formatter:
return None
return formatter(node, self)
def visit_Call(self, node):
buffer = []
fname = node.func.id
# Only apply to lowercase names (i.e. functions, not classes)
if fname in self.__class__.EXCEPTIONS:
node.func.id = self.__class__.EXCEPTIONS[fname].__name__
self.latex = sympy.latex(self.evaluator.eval_node(node))
else:
result = self.format(fname, node)
if result:
self.latex = result
elif fname[0].islower() and fname not in OTHER_SYMPY_FUNCTIONS:
buffer.append("\\mathrm{%s}" % fname.replace('_', '\\_'))
buffer.append('(')
latexes = []
for arg in node.args:
if isinstance(arg, ast.Call) and getattr(arg.func, 'id', None) and arg.func.id[0].lower() == arg.func.id[0]:
latexes.append(self.visit_Call(arg))
else:
latexes.append(sympy.latex(self.evaluator.eval_node(arg)))
buffer.append(', '.join(latexes))
buffer.append(')')
self.latex = ''.join(buffer)
else:
self.latex = sympy.latex(self.evaluator.eval_node(node))
return self.latex
@LatexVisitor.formats_function('solve')
def format_solve(node, visitor):
expr = visitor.evaluator.eval_node(node.args[0])
buffer = [r'\mathrm{solve}\;', sympy.latex(expr)]
if not isinstance(expr, Relational):
buffer.append('=0')
if len(node.args) > 1:
buffer.append(r'\;\mathrm{for}\;')
for arg in node.args[1:]:
buffer.append(sympy.latex(visitor.evaluator.eval_node(arg)))
buffer.append(r',\, ')
if len(node.args) > 1:
buffer.pop()
return ''.join(buffer)
@LatexVisitor.formats_function('limit')
def format_limit(node, visitor):
if len(node.args) >= 3:
return sympy.latex(
sympy.Limit(*[visitor.evaluator.eval_node(arg) for arg in node.args]))
@LatexVisitor.formats_function('prime')
def format_prime(node, visitor):
number = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
return ''.join([number,
r'^\mathrm{',
ordinal(int(number)),
r'}\; \mathrm{prime~number}'])
@LatexVisitor.formats_function('isprime')
def format_isprime(node, visitor):
number = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
return ''.join([r'\mathrm{Is~}', number, r'\mathrm{~prime?}'])
@LatexVisitor.formats_function('nextprime')
def format_nextprime(node, visitor):
number = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
return r'\mathrm{Least~prime~greater~than~}' + number
@LatexVisitor.formats_function('factorint')
def format_factorint(node, visitor):
number = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
return r'\mathrm{Prime~factorization~of~}' + number
@LatexVisitor.formats_function('factor')
def format_factor(node, visitor):
expression = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
return r'\mathrm{Factorization~of~}' + expression
@LatexVisitor.formats_function('solve_poly_system')
def format_factorint(node, visitor):
equations = visitor.evaluator.eval_node(node.args[0])
variables = tuple(map(visitor.evaluator.eval_node, node.args[1:]))
if len(variables) == 1:
variables = variables[0]
return ''.join([r'\mathrm{Solve~} \begin{cases} ',
r'\\'.join(map(sympy.latex, equations)),
r'\end{cases} \mathrm{~for~}',
sympy.latex(variables)])
@LatexVisitor.formats_function('plot')
def format_plot(node, visitor):
if node.args:
function = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
else:
keywords = {}
for keyword in node.keywords:
keywords[keyword.arg] = visitor.evaluator.eval_node(keyword.value)
function = sympy.latex(keywords)
return r'\mathrm{Plot~}' + function
@LatexVisitor.formats_function('rsolve')
def format_rsolve(node, visitor):
recurrence = sympy.latex(sympy.Eq(visitor.evaluator.eval_node(node.args[0]), 0))
if len(node.args) == 3:
conds = visitor.evaluator.eval_node(node.args[2])
initconds = '\\\\\n'.join('&' + sympy.latex(sympy.Eq(eqn, val)) for eqn, val in conds.items())
text = r'&\mathrm{Solve~the~recurrence~}' + recurrence + r'\\'
condstext = r'&\mathrm{with~initial~conditions}\\'
return r'\begin{align}' + text + condstext + initconds + r'\end{align}'
else:
return r'\mathrm{Solve~the~recurrence~}' + recurrence
diophantine_template = (r"\begin{{align}}&{}\\&\mathrm{{where~}}"
r"{}\mathrm{{~are~integers}}\end{{align}}")
@LatexVisitor.formats_function('diophantine')
def format_diophantine(node, visitor):
expression = visitor.evaluator.eval_node(node.args[0])
symbols = None
if isinstance(expression, sympy.Basic):
symbols = expression.free_symbols
equation = sympy.latex(sympy.Eq(expression, 0))
result = r'\mathrm{Solve~the~diophantine~equation~}' + equation
if symbols:
result = diophantine_template.format(result, tuple(symbols))
return result
@LatexVisitor.formats_function('summation')
@LatexVisitor.formats_function('product')
def format_diophantine(node, visitor):
if node.func.id == 'summation':
klass = sympy.Sum
else:
klass = sympy.Product
return sympy.latex(klass(*map(visitor.evaluator.eval_node, node.args)))
@LatexVisitor.formats_function('help')
def format_help(node, visitor):
if node.args:
function = visitor.evaluator.eval_node(node.args[0])
return r'\mathrm{Show~documentation~for~}' + function.__name__
return r'\mathrm{Show~documentation~(requires~1~argument)}'
class TopCallVisitor(ast.NodeVisitor):
def __init__(self):
super(TopCallVisitor, self).__init__()
self.call = None
def visit_Call(self, node):
self.call = node
def visit_Name(self, node):
if not self.call:
self.call = node
# From http://stackoverflow.com/a/739301/262727
def ordinal(n):
if 10 <= n % 100 < 20:
return 'th'
else:
return {1 : 'st', 2 : 'nd', 3 : 'rd'}.get(n % 10, "th")
# TODO: modularize all of this
def latexify(string, evaluator):
a = LatexVisitor()
a.evaluator = evaluator
a.visit(ast.parse(string))
return a.latex
def topcall(string):
a = TopCallVisitor()
a.visit(ast.parse(string))
if hasattr(a, 'call'):
return getattr(a.call.func, 'id', None)
return None
def arguments(string_or_node, evaluator):
node = None
if not isinstance(string_or_node, ast.Call):
a = TopCallVisitor()
a.visit(ast.parse(string_or_node))
if hasattr(a, 'call'):
node = a.call
else:
node = string_or_node
if node:
if isinstance(node, ast.Call):
name = getattr(node.func, 'id', None) # when is it undefined?
args, kwargs = None, None
if node.args:
args = list(map(evaluator.eval_node, node.args))
kwargs = node.keywords
if kwargs:
kwargs = {kwarg.arg: evaluator.eval_node(kwarg.value) for kwarg in kwargs}
return Arguments(name, args, kwargs)
elif isinstance(node, ast.Name):
return Arguments(node.id, [], {})
return None
re_calls = re.compile(r'(Integer|Symbol|Float|Rational)\s*\([\'\"]?([a-zA-Z0-9\.]+)[\'\"]?\s*\)')
def re_calls_sub(match):
return match.groups()[1]
def removeSymPy(string):
try:
return re_calls.sub(re_calls_sub, string)
except IndexError:
return string
from sympy.parsing.sympy_parser import (
AppliedFunction, implicit_multiplication, split_symbols,
function_exponentiation, implicit_application, OP, NAME,
_group_parentheses, _apply_functions, _flatten, _token_callable)
def _implicit_multiplication(tokens, local_dict, global_dict):
result = []
for tok, nextTok in zip(tokens, tokens[1:]):
result.append(tok)
if (isinstance(tok, AppliedFunction) and
isinstance(nextTok, AppliedFunction)):
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and
nextTok[0] == OP and nextTok[1] == '('):
# Applied function followed by an open parenthesis
if (tok.function[1] == 'Symbol' and
len(tok.args[1][1]) == 3):
# Allow implicit function symbol creation
# TODO XXX need some way to offer alternative parsing here -
# sometimes we want this and sometimes not, hard to tell when
# (making it context-sensitive based on input function best)
continue
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
isinstance(nextTok, AppliedFunction)):
# Close parenthesis followed by an applied function
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
nextTok[0] == NAME):
# Close parenthesis followed by an implicitly applied function
result.append((OP, '*'))
elif (tok[0] == nextTok[0] == OP
and tok[1] == ')' and nextTok[1] == '('):
# Close parenthesis followed by an open parenthesis
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and nextTok[0] == NAME):
# Applied function followed by implicitly applied function
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == OP and nextTok[1] == '('):
# Constant followed by parenthesis
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == NAME and
not _token_callable(nextTok, local_dict, global_dict)):
# Constant followed by constant
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
(isinstance(nextTok, AppliedFunction) or nextTok[0] == NAME)):
# Constant followed by (implicitly applied) function
result.append((OP, '*'))
if tokens:
result.append(tokens[-1])
return result
def implicit_multiplication(result, local_dict, global_dict):
"""Makes the multiplication operator optional in most cases.
Use this before :func:`implicit_application`, otherwise expressions like
``sin 2x`` will be parsed as ``x * sin(2)`` rather than ``sin(2*x)``.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication)
>>> transformations = standard_transformations + (implicit_multiplication,)
>>> parse_expr('3 x y', transformations=transformations)
3*x*y
"""
for step in (_group_parentheses(implicit_multiplication),
_apply_functions,
_implicit_multiplication):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
def custom_implicit_transformation(result, local_dict, global_dict):
"""Allows a slightly relaxed syntax.
- Parentheses for single-argument method calls are optional.
- Multiplication is implicit.
- Symbol names can be split (i.e. spaces are not needed between
symbols).
- Functions can be exponentiated.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication_application)
>>> parse_expr("10sin**2 x**2 + 3xyz + tan theta",
... transformations=(standard_transformations +
... (implicit_multiplication_application,)))
3*x*y*z + 10*sin(x**2)**2 + tan(theta)
"""
for step in (split_symbols, implicit_multiplication,
implicit_application, function_exponentiation):
result = step(result, local_dict, global_dict)
return result
SYNONYMS = {
u'derivative': 'diff',
u'derive': 'diff',
u'integral': 'integrate',
u'antiderivative': 'integrate',
u'factorize': 'factor',
u'graph': 'plot',
u'draw': 'plot'
}
def synonyms(tokens, local_dict, global_dict):
"""Make some names synonyms for others.
This is done at the token level so that the "stringified" output that
Gamma displays shows the correct function name. Must be applied before
auto_symbol.
"""
result = []
for token in tokens:
if token[0] == NAME:
if token[1] in SYNONYMS:
result.append((NAME, SYNONYMS[token[1]]))
continue
result.append(token)
return result
def close_matches(s, global_dict):
"""
Checks undefined names to see if they are close matches to a defined name.
"""
tokens = sympy_tokenize.generate_tokens(StringIO(s.strip()).readline)
result = []
has_result = False
all_names = set(global_dict).union(SYNONYMS)
# strip the token location info to avoid strange untokenize results
tokens = [(tok[0], tok[1]) for tok in tokens]
for token in tokens:
if (token[0] == NAME and
token[1] not in all_names and
len(token[1]) > 1):
matches = difflib.get_close_matches(token[1], all_names)
if matches and matches[0] == token[1]:
matches = matches[1:]
if matches:
result.append((NAME, matches[0]))
has_result = True
continue
result.append(token)
if has_result:
return sympy_tokenize.untokenize(result).strip()
return None
|
'''
# GIL,全局解释器锁
本质上是类似操作系统的互斥锁 Mutex
1. CPython 引进 GIL 其实主要就是这么两个原因:
一是设计者为了规避类似于内存管理这样的复杂的竞争风险问题(race condition);
二是因为 CPython 大量使用 C 语言库,但大部分 C 语言库都不是原生线程安全的
(线程安全会降低性能和增加复杂度)
2. GIL 是如何工作的
1 . 一个线程在开始执行时,都会锁住 GIL,以阻止别的线程执行;
同样的,每一个线程执行完一段后,会释放 GIL,以允许别的线程开始利用资源
2. check_interval,意思是 CPython 解释器会去轮询检查线程 GIL 的锁住情况。
每隔一段时间,Python 解释器就会强制当前线程去释放 GIL,这样别的线程才能有执行的机会
3. Python 的线程安全(应用层面并不是)
函数本身是由多层堆栈的, 而这并不是线程安全的
>>> import dis
>>> dis.dis(foo)
LOAD_GLOBAL 0 (n)
LOAD_CONST 1 (1)
INPLACE_ADD
STORE_GLOBAL 0 (n)
GIL 的设计,主要是为了方便 CPython 解释器层面的编写者,而不是 Python 应用层面的程序员
作为 Python 的使用者,我们还是需要 lock 等工具,来确保线程安全
'''
import time
import concurrent.futures
import threading
import asyncio
def CountDown(n):
while n > 0:
n -= 1
def multipleThread(n):
for _ in range(2):
th = threading.Thread(target=CountDown, args=[n//2])
th.start()
th.join()
async def asyCountDown(n):
while n > 0:
n -= 1
async def asyncThread(n):
asyncio.create_task(asyCountDown(n))
# 应用级别也要保证线程安全
n = 0
lock = threading.Lock()
def foo():
'''
要保证线程安全
'''
global n
# with lock:
n += 1
def test():
threads = []
for i in range(100):
t = threading.Thread(target=foo)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print(n)
if __name__ == "__main__":
start_time = time.perf_counter()
CountDown(50000000)
end_time = time.perf_counter()
print('thread run time: ', end_time-start_time)
# start_time = time.perf_counter()
# multipleThread(100000000)
# end_time = time.perf_counter()
# print('multipleProcess run time: ', end_time-start_time)
start_time = time.perf_counter()
asyncio.run(asyncThread(50000000))
end_time = time.perf_counter()
print('asyncThread run time: ', end_time-start_time)
|
from skills_ml.job_postings.corpora import Word2VecGensimCorpusCreator
import json
sample_documents = [{
"incentiveCompensation": "",
"experienceRequirements": "Here are some experience and requirements",
"baseSalary": {
"maxValue": 0.0,
"@type": "MonetaryAmount",
"minValue": 0.0
},
"description": "We are looking for a person to fill this job",
"title": "Bilingual (Italian) Customer Service Rep (Work from Home)",
"employmentType": "Full-Time",
"industry": "Call Center / SSO / BPO, Consulting, Sales - Marketing",
"occupationalCategory": "",
"onet_soc_code": "41-1011.00",
"qualifications": "Here are some qualifications",
"educationRequirements": "Not Specified",
"skills": "Customer Service, Consultant, Entry Level",
"validThrough": "2014-01-02T00:00:00",
"jobLocation": {
"@type": "Place",
"address": {
"addressLocality": "Salisbury",
"addressRegion": "PA",
"@type": "PostalAddress"
}
},
"@context": "http://schema.org",
"alternateName": "Customer Service Representative",
"datePosted": "2013-05-12",
"@type": "JobPosting"
}]
class FakeJobPostingGenerator(object):
def __iter__(self):
for sample in sample_documents:
yield sample
@property
def metadata(self):
return {'entity_type': 'text corpus'}
def test_word2vec_corpus_creator():
it = FakeJobPostingGenerator()
corpus = [output for output in Word2VecGensimCorpusCreator(it)]
assert len(corpus) == 1
assert corpus[0] == 'we are looking for a person to fill this job here are some experience and requirements here are some qualifications customer service consultant entry level'.split()
|
from mmdet.apis import Inferencer
import mmcv
import cv2
from mmcv.runner import obj_from_dict
from mmdet import datasets
from mmdet.datasets.transforms import ImageTransform
config_file = 'configs/sipmask/sipmask_r50_caffe_fpn_gn_1x.py'
checkpoint_file = 'checkpoints/vis_sipmask_ms_1x_final.pth'
save_path = 'results/images'
device='cuda:0'
cfg = mmcv.Config.fromfile(config_file)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
inferencer = Inferencer(cfg.data.test.img_scale, cfg.data.test.img_norm_cfg, size_divisor=cfg.data.test.size_divisor, test_mode=True, with_label=False, with_mask=False)
# build the model from a config file and a checkpoint file
model = inferencer.init_detector(config_file, checkpoint_file, device=device)
# test a single image and show the results
# img = 'data/YouTubeVIS/valid/JPEGImages/fd5bf99712/00090.jpg'
# img = mmcv.imread(img)
# result = inference_detector(model, img)
# show_result(img, result, model.CLASSES)
# test a list of images and write the results to image files
# imgs = ['test1.jpg', 'test2.jpg']
# for i, result in enumerate(inference_detector(model, imgs)):
# show_result(imgs[i], result, model.CLASSES, out_file='result_{}.jpg'.format(i))
# test a video and show the results
video = mmcv.VideoReader('/SipMask-VIS/data/abc2_cuted.mp4')
for frame in video:
(result, data) = inferencer.inference_detector(model, frame, video.position -1 )
model.show_result(data, result, cfg.data.test.img_norm_cfg,
dataset=model.CLASSES,
save_vis = True,
save_path = save_path,
is_video = True)
|
import requests
import time
from math import sin,cos,atan2,sqrt,pi
# VARIABLES GLOBALES
TOKEN = 'tu_token'
ETIQUETA = 'InternationalSpaceStation'
VARIABLE = "distancia"
URL_BASE = 'http://industrial.api.ubidots.com/api/v1.6/devices/'
# COORDENADAS
LAT = tu_latitud
LONG = tu_longitud
def get_iss_position():
# POSICION ACTUAL ISS
req_iss = requests.get('http://api.open-notify.org/iss-now.json')
dict = req_iss.json()
lat_lng = dict['iss_position']
# GUARDAR LA POSICION ACTUAL
lat_iss = float(lat_lng['latitude'])
lng_iss = float(lat_lng['longitude'])
return lat_iss, lng_iss
def grad2rad(grad):
return grad * (pi/180)
def getDistance(lat_iss, lng_iss, lat, lng):
R = 6371 # RADIO DE LA TIERRA EN KM
dLat = grad2rad(lat-lat_iss)
dLng = grad2rad(lng-lng_iss)
a = sin(dLat/2) * sin(dLat/2) + cos(grad2rad(lat_iss)) * \
cos(grad2rad(lat)) * sin(dLng/2) * sin(dLng/2)
c = 2 * atan2(sqrt(a), sqrt(1-a))
d = R * c # DISTANCIA A LAS COORDENADAS EN KM
return d
def build_payload(variable, value, lat_iss, lng_iss):
# PAYLOAD A ENVIAR
payload = {variable: value, "posicion": {
"value": 1, "context": {"lat": lat_iss, "lng": lng_iss}}}
return payload
def send_ubidots(etiqueta, payload):
# HTTP REQUEST A UBIDOTS
url = "{0}{1}/?token={2}".format(URL_BASE, etiqueta, TOKEN)
status = 400
attempts = 0
while status >= 400 and attempts <= 5:
req = requests.post(url, json=payload)
status = req.status_code
attempts += 1
response = req.json()
return response
def main(etiqueta, variable, lat, lng):
lat_iss, lng_iss = get_iss_position()
distance = getDistance(lat_iss, lng_iss, lat, lng)
distance = round(distance, 1)
payload = build_payload(variable, distance, lat_iss, lng_iss)
response = send_ubidots(etiqueta, payload)
return response
if __name__ == '__main__':
while True:
try:
response = main(ETIQUETA, VARIABLE, LAT, LONG)
print("Respuesta JSON desde el servidor: \n{0}".format(response))
except:
pass
time.sleep(1)
|
#VERSION: 2.03
# AUTHORS: Christophe Dumez (chris@qbittorrent.org)
# Douman (custparasite@gmx.se)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from HTMLParser import HTMLParser
from re import compile as re_compile
class legittorrents(object):
url = 'http://www.legittorrents.info'
name = 'Legit Torrents'
supported_categories = {'all': '0', 'movies': '1', 'tv': '13',
'music': '2', 'games': '3', 'anime': '5', 'books': '6'}
def download_torrent(self, info):
print(download_file(info))
class MyHtmlParseWithBlackJack(HTMLParser):
""" Parser class """
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.current_item = None
self.save_item_key = None
def handle_starttag(self, tag, attrs):
""" Parser's start tag handler """
if self.current_item:
params = dict(attrs)
if tag == "a":
link = params["href"]
if link.startswith("index") and "title" in params:
# description link
self.current_item["name"] = params["title"][14:]
self.current_item["desc_link"] = "/".join((self.url, link))
elif link.startswith("download"):
self.current_item["link"] = "/".join((self.url, link))
elif tag == "td":
if ("width" in params and params["width"] == "30"
and "leech" not in self.current_item):
self.save_item_key = "leech" if "seeds" in self.current_item else "seeds"
elif tag == "tr":
self.current_item = {}
self.current_item["size"] = ""
self.current_item["engine_url"] = self.url
def handle_endtag(self, tag):
""" Parser's end tag handler """
if self.current_item and tag == "tr":
if len(self.current_item) > 4:
prettyPrinter(self.current_item)
self.current_item = None
def handle_data(self, data):
""" Parser's data handler """
if self.save_item_key:
self.current_item[self.save_item_key] = data.strip()
self.save_item_key = None
def search(self, what, cat='all'):
""" Performs search """
query = "".join((self.url, "/index.php?page=torrents&search=", what, "&category=",
self.supported_categories.get(cat, '0'), "&active=1"))
get_table = re_compile(r'(?s)<table\sclass="lista".*>(.*)</table>')
data = get_table.search(retrieve_url(query)).group(0)
# extract first ten pages of next results
next_pages = re_compile('(?m)<option value="(.*)">[0-9]+</option>')
next_pages = ["".join((self.url, page)) for page in next_pages.findall(data)[:10]]
parser = self.MyHtmlParseWithBlackJack(self.url)
parser.feed(data)
parser.close()
for page in next_pages:
parser.feed(get_table.search(retrieve_url(page)).group(0))
parser.close()
|
# Copyright 2020 William José Moreno Reyes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributors:
# - William José Moreno Reyes
"""
Define las tablas utilizadas por el módulo de Activo Fijo.
"""
from cacao_accounting.database import db
class FamiliaActivoFijo(db.Model):
__table_args__ = (db.UniqueConstraint("id", "nombre", name="moduloaf-familia"),)
id = db.Column(db.Integer(), nullable=False, primary_key=True)
activa = db.Column(db.Boolean())
nombre = db.Column(db.String(50), unique=True)
entidad = db.Column(db.String(10), db.ForeignKey("entidad.id"))
grupo = db.Column(db.Boolean())
padre = db.Column(db.String(50), db.ForeignKey("familia_activo_fijo.nombre"), nullable=True)
cta_activo = db.Column(db.String(50), db.ForeignKey("cuentas.codigo"))
vida_util = db.Column(db.Integer())
vida_util_fiscal = db.Column(db.Integer())
class UbicacionActivoFijo(db.Model):
__table_args__ = (db.UniqueConstraint("id", "nombre", name="moduloaf-ubicacion"),)
id = db.Column(db.Integer(), nullable=False, primary_key=True)
activa = db.Column(db.Boolean())
nombre = db.Column(db.String(50), unique=True)
entidad = db.Column(db.String(10), db.ForeignKey("entidad.id"))
grupo = db.Column(db.Boolean())
padre = db.Column(db.String(50), db.ForeignKey("ubicacion_activo_fijo.nombre"), nullable=True)
cta_depreciacion = db.Column(db.String(50), db.ForeignKey("cuentas.codigo"))
class ActivoFijo(db.Model):
__table_args__ = (db.UniqueConstraint("id", "nombre", name="moduloaf-activofijo"),)
id = db.Column(db.Integer(), nullable=False, primary_key=True)
alta = db.Column(db.Date())
codigo = db.Column(db.String(150))
agrupador = db.Column(db.Boolean())
individual = db.Column(db.Boolean())
padre = db.Column(db.String(50), db.ForeignKey("activo_fijo.nombre"), nullable=True)
fisico = db.Column(db.Boolean())
amortizable = db.Column(db.Boolean())
nombre = db.Column(db.String(150), unique=True)
marca = db.Column(db.String(150))
modelo = db.Column(db.String(150))
serie = db.Column(db.String(150))
motor = db.Column(db.String(150))
chasis = db.Column(db.String(150))
placa = db.Column(db.String(150))
registro = db.Column(db.String(150))
familia = db.Column(db.String(50), db.ForeignKey("familia_activo_fijo.nombre"))
ubicacion = db.Column(db.String(50), db.ForeignKey("ubicacion_activo_fijo.nombre"))
moneda_principal = db.Column(db.String(5), db.ForeignKey("moneda.id"))
costo = db.Column(db.Numeric())
moneda_secundaria = db.Column(db.String(5), db.ForeignKey("moneda.id"))
|
import sys
import os
import importlib
import tensorflow as tf
import pathlib
from pprint import pprint
from multiprocessing import Pool
from functools import partial
from datetime import datetime, timedelta, timezone
# This is so that the following imports work
sys.path.append(os.path.realpath("."))
import src.utils.utils as utils
import src.utils.filter_utils as filter_utils
import src.constants as constants
import src.algorithms.lstm_classifier.lstm_classifier as lstm_classifier
from src.app_config.app_config import AppConfig, ReviewChannelTypes, CategorizationAlgorithms
from src.review.review import Review
from src.algorithms.text_match.text_match import text_match
from src.algorithms.sentiment import get_sentiment
def add_review_sentiment_score(review):
# Add the sentiment to the review's derived insight and return the review
review.derived_insight.sentiment = get_sentiment(review.message)
# Return the review
return review
def text_match_categortization(review, app_config, topics):
# Find the category of the review
category_scores, category = text_match(review.message, topics)
# Add the category to the review's derived insight and return the review
review.derived_insight.category = category
# Add the category scores.
review.derived_insight.extra_properties[constants.CATEGORY_SCORES] = category_scores
# Return the review
return review
def lstm_classification(reviews, model, article_tokenizer, label_tokenizer, cleaned_labels):
articles = [review.message for review in reviews]
# Get the categories for each of the reviews
categories = lstm_classifier.predict_labels(
articles,
model,
article_tokenizer,
label_tokenizer
)
for index, review in enumerate(reviews):
review.derived_insight.extra_properties[constants.LSTM_CATEGORY] = cleaned_labels[categories[index]]
return reviews
def bug_feature_classification(review, topics):
_, category = text_match(review.message, topics)
# Add the bug-feature classification to the review's derived insight and return the review
review.derived_insight.extra_properties[constants.BUG_FEATURE] = category
# Return the review
return review
def run_algo():
app_configs = utils.open_json(
constants.APP_CONFIG_FILE.format(file_name=constants.APP_CONFIG_FILE_NAME)
)
for app_config_file in app_configs:
app_config = AppConfig(
utils.open_json(
app_config_file
)
)
# Path where the user reviews were stored after parsing.
parsed_user_reviews_file_path = constants.PARSED_USER_REVIEWS_FILE_PATH.format(
base_folder=app_config.fawkes_internal_config.data.base_folder,
dir_name=app_config.fawkes_internal_config.data.parsed_data_folder,
app_name=app_config.app.name,
)
# Loading the reviews
reviews = utils.open_json(parsed_user_reviews_file_path)
# Converting the json object to Review object
reviews = [Review.from_review_json(review) for review in reviews]
# Filtering out reviews which are not applicable.
reviews = filter_utils.filter_reviews_by_time(
filter_utils.filter_reviews_by_channel(
reviews, filter_utils.filter_disabled_review_channels(
app_config
),
),
datetime.now(timezone.utc) - timedelta(days=app_config.algorithm_config.algorithm_days_filter)
)
# Number of process to make
num_processes = min(constants.PROCESS_NUMBER, os.cpu_count())
if constants.CIRCLECI in os.environ:
num_processes = 2
# Adding sentiment
with Pool(num_processes) as process:
reviews = process.map(add_review_sentiment_score, reviews)
if app_config.algorithm_config.categorization_algorithm != None and app_config.algorithm_config.category_keywords_weights_file != None:
# We read from the topic file first
topics = {}
topics = utils.open_json(app_config.algorithm_config.category_keywords_weights_file)
# Adding text-match categorization
with Pool(num_processes) as process:
reviews = process.map(
partial(
text_match_categortization,
app_config=app_config,
topics=topics
),
reviews
)
if app_config.algorithm_config.bug_feature_keywords_weights_file != None:
# We read from the topic file first
topics = {}
topics = utils.open_json(app_config.algorithm_config.bug_feature_keywords_weights_file)
# Adding bug/feature classification
with Pool(num_processes) as process:
reviews = process.map(
partial(
bug_feature_classification,
topics=topics
),
reviews
)
if app_config.algorithm_config.categorization_algorithm == CategorizationAlgorithms.LSTM_CLASSIFICATION:
# Load the TensorFlow model
model = tf.keras.models.load_model(
constants.LSTM_CATEGORY_MODEL_FILE_PATH.format(
base_folder=app_config.fawkes_internal_config.data.base_folder,
dir_name=app_config.fawkes_internal_config.data.models_folder,
app_name=app_config.app.name,
)
)
# Load the article tokenizer file
tokenizer_json = utils.open_json(
constants.LSTM_CATEGORY_ARTICLE_TOKENIZER_FILE_PATH.format(
base_folder=app_config.fawkes_internal_config.data.base_folder,
dir_name=app_config.fawkes_internal_config.data.models_folder,
app_name=app_config.app.name,
),
)
article_tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(
tokenizer_json
)
# Load the label tokenizer file
tokenizer_json = utils.open_json(
constants.LSTM_CATEGORY_LABEL_TOKENIZER_FILE_PATH.format(
base_folder=app_config.fawkes_internal_config.data.base_folder,
dir_name=app_config.fawkes_internal_config.data.models_folder,
app_name=app_config.app.name,
),
)
label_tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(
tokenizer_json
)
cleaned_labels = {}
for review in reviews:
label = review.derived_insight.category
cleaned_label = re.sub(r'\W+', '', label)
cleaned_label = cleaned_label.lower()
cleaned_labels[cleaned_label] = label
# Adding LSTM categorization
reviews = lstm_classification(
reviews,
model,
article_tokenizer,
label_tokenizer,
cleaned_labels
)
# Create the intermediate folders
processed_user_reviews_file_path = constants.PROCESSED_USER_REVIEWS_FILE_PATH.format(
base_folder=app_config.fawkes_internal_config.data.base_folder,
dir_name=app_config.fawkes_internal_config.data.processed_data_folder,
app_name=app_config.app.name,
)
dir_name = os.path.dirname(processed_user_reviews_file_path)
pathlib.Path(dir_name).mkdir(parents=True, exist_ok=True)
utils.dump_json(
[review.to_dict() for review in reviews],
processed_user_reviews_file_path,
)
if __name__ == "__main__":
run_algo()
|
import asyncio
import json
import logging.config
import os
import time
from types import SimpleNamespace
from aiohttp import web
from jinja2 import Environment, FileSystemLoader
import utils.orm as orm
from utils import coroweb
from utils.jinja_filter import datetime_filter
from utils.middleware import (
auth_factory,
data_factory,
logger_factory,
response_factory,
)
def init_logging( # 初始化日志配置
default_path="conf/logging.json", default_level=logging.INFO
):
path = default_path
if os.path.exists(path):
with open(path, "r") as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
def init_jinja2(app, **kw): # 初始化jinja2的函数
logging.info("init jinja2...")
options = dict(
autoescape=kw.get("autoescape", True),
block_start_string=kw.get("block_start_string", "{%"),
block_end_string=kw.get("block_end_string", "%}"),
variable_start_string=kw.get("variable_start_string", "{{"),
variable_end_string=kw.get("variable_end_string", "}}"),
auto_reload=kw.get("auto_reload", True),
)
path = kw.get("path", None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
logging.info("set jinja2 template path: %s" % path)
env = Environment(loader=FileSystemLoader(path), **options)
filters = kw.get("filters", None)
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
app["__templating__"] = env
async def init(loop): # 初始化服务器
init_logging()
with open("conf/conf.json", "r") as f:
configs = json.load(f, object_hook=lambda d: SimpleNamespace(**d))
await orm.create_pool(loop=loop, **configs.db.__dict__)
app = web.Application(middlewares=[logger_factory, auth_factory, response_factory])
init_jinja2(app, filters=dict(datetime=datetime_filter))
coroweb.add_routes(app, "blog.handler")
coroweb.add_routes(app, "blog.api")
coroweb.add_static(app)
# DeprecationWarning: Application.make_handler(...) is deprecated, use AppRunner API instead
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, "127.0.0.1", 9000)
logging.info("server started at http://127.0.0.1:9000...")
await site.start()
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
|
# On suppose que les adresses IPv4 & masques fournis sont sous leur répresentation classique, c-à-d xxx.xxx.xxx.xxx où xxx<=255
import re #expressions régulières
def conv_IPnum_vers_IPbin (ip) : #convertit une representation numérique d'adresse IP en binaire
return "".join ( [dec2bin(int(i)) for i in ip.split('.') ] )
def conv_IPbin_vers_IPnum (ip) : #convertit une adresse IP binaire en représentation numérique
return "".join ( [str(int( i , 2)) +'.' for i in [ ip[0:8] , ip[8:16] , ip[16:24] , ip[24:32] ] ] )[:-1]
def dec2bin(dec): #convertit decimal vers binaire, rajoute des '0' pour retourner 8 bits
return "0"* (8-len("{0:b}".format(dec) ) ) +"{0:b}".format(dec)
def masque_vers_CIDR(masque): #convertit la notation numérique du masque en notation CIDR
return len( re.search('(11*)(0*)' , conv_IPnum_vers_IPbin(masque) ) .group(1) )
def adresse_reseau_bin(ip, masque): #retourne l'adresse réseau à partir d'une IP et d'un masque donné, en binaire
n = masque_vers_CIDR(masque)
return conv_IPnum_vers_IPbin(ip)[0:n]+'0'*(32-n)
def adresse_reseau(ip, masque): #retourne l'adresse réseau à partir d'une IP et d'un masque donné, en numérique
return conv_IPbin_vers_IPnum ( adresse_reseau_bin(ip,masque) )
def adresse_broadcast_bin(ip, masque): #retourne l'adresse broadcast à partir d'une IP et d'un masque donné, en binaire
n = masque_vers_CIDR(masque)
return conv_IPnum_vers_IPbin(ip)[0:n]+'1'*(32-n)
def adresse_broadcast(ip,masque): #retourne l'adresse broadcast à partir d'une IP et d'un masque donné, en numérique
return conv_IPbin_vers_IPnum ( adresse_broadcast_bin(ip,masque) )
def rang_adresse_assignables(ip, masque): #retourne la première et la dernière adresse assignable du réseau de l'adresse & du masque donnés
n = masque_vers_CIDR(masque)
return [ conv_IPbin_vers_IPnum ( adresse_reseau_bin(ip,masque)[:31]+'1' ) , conv_IPbin_vers_IPnum ( adresse_broadcast_bin(ip,masque)[:31]+'0' ) ]
def masque_generique(ip_debut, ip_fin): #retourne le masque générique pour sélectionner toutes les adresses comprises entre celles données en paramètre, utile pour les ACL
return "".join(str(int(ip_fin.split('.')[i] ) - int(ip_debut.split('.')[i] ) ) + '.' for i in range(4) )[:-1]
|
#!env python
import re
def c_to_mx_typename(c_type, special_map):
m = re.search("([a-zA-Z0-9]+)_t", c_type)
if m == None:
mx_type = c_type
else:
mx_type = m.groups()[0]
if c_type in special_map:
mx_type = special_map[c_type]
return mx_type.upper()
c_type = ('void', 'bool', 'double', 'float', 'uint64_t', 'int64_t', 'uint32_t', 'int32_t', 'uint16_t', 'int16_t', 'uint8_t', 'int8_t')
special_map = {'float': 'single', 'bool': 'logical' }
empty_trait = "template <class T>\nstruct mx_traits { };\n\n"
header_guard = """#ifndef HAVE_MX_TRAITS_HPP
#define HAVE_MX_TRAITS_HPP
#include <mex.h>
"""
trait_template = """// %s
template<> struct mx_traits<%s> {
static const mxClassID classId = mx%s_CLASS;
static inline const char* name() {
return "%s";
}
};
"""
mx_traits_header = open('include/mx_traits.hpp', 'wt')
mx_traits_header.write(header_guard)
mx_traits_header.write(empty_trait)
for type_curr in c_type:
for constness in ("", "const ",):
full_type = constness + type_curr
mx_traits_header.write(trait_template % (full_type, full_type, c_to_mx_typename(type_curr, special_map), full_type))
mx_traits_header.write("#endif // HAVE_MX_TRAITS_HPP\n")
mx_traits_header.close()
|
EPOCHS = 3
LOG_ITER = 25
TEST_ITER = 50
WRITE_PERIOD = 100
#1440 train examples
#360 train examples |
tempoGasto = int(input())
velocidadeMedia = int(input())
consumoMedio = 12
calculoDistancia = velocidadeMedia*tempoGasto
consumoTotal = calculoDistancia/consumoMedio
print("%.3f"%consumoTotal) |
class CompareGraphAttrs:
CHANGE_COUNT = "_change_count"
CHANGED_ATTRIBUTES = "_attr_diff"
CHANGED_DEP = "_changed_dep"
CHANGED_CHILD = "_changed_child"
ONLY_IN = "_only_in"
|
#!/usr/bin/env python
from plasTeX import Command
class fancypagestyle(Command):
args = 'style'
class fancyhead(Command):
args = '[ pos ] text'
class fancyfoot(fancyhead):
pass
class fancyhf(Command):
args = 'text'
class rightmark(Command):
pass
class leftmark(Command):
pass
class chaptermark(Command):
pass
class sectionmark(Command):
pass
class markboth(Command):
pass
class markright(Command):
pass
|
import os
import getpass
import pathlib
CREDENTIAL_FILE = os.path.join(str(pathlib.Path(__file__).parent.absolute()),
'.env')
def get_credentials(credential_file: str = CREDENTIAL_FILE):
if os.path.exists(credential_file):
[url, username,
password] = [a.strip() for a in open(credential_file).readlines()]
else:
url = input('URL: ')
username = input('Username: ')
password = getpass.getpass('Password: ')
f = open(credential_file, 'w')
f.write(f"{url}\n{username}\n{password}")
f.close()
return [url, username, password]
|
#!/usr/bin/env python
#Code to generate a full shell of diffusion-weighted, eddy distorted images using FSL's possum, along with data that can be used to
#establish a ground truth.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Handle arguments (before imports so --help can be fast)
def str2bool(v):
#Function allows boolean arguments to take a wider variety of inputs
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
import argparse
parser = argparse.ArgumentParser(description="Tidy up the simulations.")
parser.add_argument("simulation_dir",help="Path to the simulation directory (output_dir of generateFileStructure.py)")
parser.add_argument("num_images",help='Number of volumes.',type=int)
parser.add_argument("--simulate_artefact_free",help='Run simulation on datasets without eddy-current and motion artefacts. Default=True.', type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("--simulate_distorted",help='Run simulation datasets with eddy-current and motion artefacts. Default=False',type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("--noise_levels",help="Set sigma for the noise level in the dataset. Can pass multiple values seperated by spaces.",nargs="+",type=float)
parser.add_argument("--interleave_factor",help="Set this if the simulation slice order has been interleaved.",type=int,default=1)
parser.add_argument("--signal_dropout",help="Set this to simulate signal dropout.",type=str2bool,nargs='?',const=True,default=False)
parser.add_argument("--bvals",help="Supply bvals to prevent signal dropout being addded to b=0 volumes.")
args=parser.parse_args()
#Imports
import os
from subprocess import call
import numpy as np
from Library import possumLib as pl
from dipy.io import read_bvals_bvecs
#Assign args
simDir = os.path.abspath(args.simulation_dir)
numImages = args.num_images
normalImages = args.simulate_artefact_free
motionAndEddyImages = args.simulate_distorted
if args.noise_levels == None:
noiseLevel = [0.0]
else:
noiseLevel = args.noise_levels
print(noiseLevel)
interleaveFactor = args.interleave_factor
if args.bvals != None:
bvals, _ = read_bvals_bvecs(
args.bvals,
None)
else:
#If no bval create artifical file with all b=1000, so signal dropout is applied to every volume.
bvals = (1000,)* numImages
def saveImage(simDir,saveImageDir,fileName):
call(["mv", simDir + "/image_abs.nii.gz", os.path.join(saveImageDir,fileName)])
def saveNoiseyImage(simDir,saveImageDir,fileName):
call(["mv", simDir + "/imageNoise_abs.nii.gz", os.path.join(saveImageDir,fileName)])
def convertImageToFloat(imageDir, fileName):
pathToImage = os.path.join(imageDir, fileName)
call(["fslmaths", pathToImage, pathToImage, "-odt", "float"])
def readSignal(signalPath):
fid = open(signalPath,"rb")
testval= np.fromfile(fid, dtype=np.uint32,count = 1)
dummy=np.fromfile(fid, dtype=np.uint32,count = 1)
nrows=np.fromfile(fid, dtype=np.uint32,count = 1)[0]
ncols=np.fromfile(fid, dtype=np.uint32,count = 1)[0]
signal=np.fromfile(fid,dtype=np.float64,count = nrows*ncols)
signal = np.reshape(signal,(nrows, ncols),order='F')
return signal
def writeSignal(fname,mat):
mvals = mat
fidin = open(fname,"w")
magicnumber=42
dummy=0
[nrows,ncols]=mat.shape
header = np.array([magicnumber,dummy,nrows,ncols])
header.astype(np.uint32).tofile(fidin)
mvals = np.reshape(mvals,[1,ncols*2],order='F')
mvals.astype(np.float64).tofile(fidin)
fidin.close()
def unInterleaveSignal(signal, numSlices, interleaveFactor):
[nrows,ncols]=signal.shape
signalUninterleaved = np.zeros((nrows,ncols))
counter = 0
entriesPerSlice = int(ncols/numSlices)
for i in range(interleaveFactor):
for j in range(i,numSlices,interleaveFactor):
startIndexOld = counter* entriesPerSlice
endIndexOld = (counter + 1) * entriesPerSlice -1
startIndex = j* entriesPerSlice
endIndex = (j + 1) * entriesPerSlice -1
signalUninterleaved[:,startIndex:endIndex] = signal[:,startIndexOld:endIndexOld]
counter = counter + 1
return signalUninterleaved
resultsDir = simDir+"/Results"
for direction in range(numImages):
if motionAndEddyImages == True:
simDirDirectionMotionAndEddy = simDir+"/DirectionMotionAndEddy"+str(direction)
if interleaveFactor > 1 or args.signal_dropout == True:
signal = readSignal(simDirDirectionMotionAndEddy+'/signal')
signalUninterleaved = unInterleaveSignal(signal,55,interleaveFactor)
if args.signal_dropout == True:
motion_level = pl.get_motion_level(simDirDirectionMotionAndEddy)
if int(bvals[direction]) > 50:
signalUninterleaved = pl.add_signal_dropout(signalUninterleaved,motion_level,55,72*86)
writeSignal(simDirDirectionMotionAndEddy+'/signalUninterleaved',signalUninterleaved)
else:
call(["cp",simDirDirectionMotionAndEddy+'/signal',simDirDirectionMotionAndEddy+'/signalUninterleaved'])
if normalImages == True:
simDirDirection = simDir+"/Direction"+str(direction)
if interleaveFactor > 1:
signal = readSignal(simDirDirection+'/signal')
signalUninterleaved = unInterleaveSignal(signal,55,interleaveFactor)
writeSignal(simDirDirection+'/signalUninterleaved',signalUninterleaved)
else:
call(["cp",simDirDirection+'/signal',simDirDirection+'/signalUninterleaved'])
#Generate noise
for sigma in noiseLevel:
if normalImages == True:
call(["systemnoise","-s",str(sigma),"-i",simDirDirection+"/signalUninterleaved","-o",simDirDirection+"/signalNoise"])
call(["signal2image","-i",simDirDirection+"/signalNoise","-p",simDirDirection+"/pulse","-o",simDirDirection+"/imageNoise","-a"])
if motionAndEddyImages == True:
call(["systemnoise","-s",str(sigma),"-i",simDirDirectionMotionAndEddy+"/signalUninterleaved","-o",simDirDirectionMotionAndEddy+"/signalNoise"])
call(["signal2image","-i",simDirDirectionMotionAndEddy+"/signalNoise","-p",simDirDirectionMotionAndEddy+"/pulse","-o",simDirDirectionMotionAndEddy+"/imageNoise","-a"])
#Save
if motionAndEddyImages == True:
saveNoiseyImage(simDirDirectionMotionAndEddy,resultsDir,"diff+eddy+motion_sigma{}_image{}.nii.gz".format(sigma,direction))
convertImageToFloat(resultsDir,"diff+eddy+motion_sigma{}_image{}.nii.gz".format(sigma,direction))
if normalImages == True:
saveNoiseyImage(simDirDirection,resultsDir,"diff_sigma{}_image{}.nii.gz".format(sigma,direction))
convertImageToFloat(resultsDir,"diff_sigma{}_image{}.nii.gz".format(sigma,direction))
#Merge
if motionAndEddyImages == True:
for sigma in noiseLevel:
callMergeNoise = "fslmerge -t " + resultsDir + "/diff+eddy+motion_sigma{} ".format(sigma)
callDelNoise = "rm "
for i in range(numImages):
callMergeNoise += resultsDir + "/diff+eddy+motion_sigma{}_image{}.nii.gz ".format(sigma,i)
callDelNoise += resultsDir + "/diff+eddy+motion_sigma{}_image{}.nii.gz ".format(sigma,i)
os.system(callMergeNoise)
os.system(callDelNoise)
if normalImages == True:
for sigma in noiseLevel:
callMergeNoise = "fslmerge -t " + resultsDir + "/diff_sigma{} ".format(sigma)
callDelNoise = "rm "
for i in range(numImages):
callMergeNoise += resultsDir + "/diff_sigma{}_image{}.nii.gz ".format(sigma,i)
callDelNoise += resultsDir + "/diff_sigma{}_image{}.nii.gz ".format(sigma,i)
os.system(callMergeNoise)
os.system(callDelNoise)
|
def can_build(plat):
return plat == 'android'
def configure(env):
if env['platform'] == 'android':
env.android_add_dependency("compile 'com.google.firebase:firebase-core:9.8.0'")
env.android_add_dependency("compile 'com.google.firebase:firebase-messaging:9.8.0'")
env.android_add_dependency("compile 'com.google.firebase:firebase-crash:9.8.0'")
env.android_add_java_dir("android")
env.android_add_default_config("applicationId '[your-game-package]'")
env.disable_module()
|
import os
import torch
import numpy as np
import scipy.misc as m
from torch.utils import data
from PIL import Image
import sys
sys.path.append('.')
from collections import OrderedDict
import os
import numpy as np
import glob
import random
train_scenes = "scene_01"
test_scenes = ["scene_03","scene_07", "scene_10" ]
def ordered_glob(rootdir='.', instances='', split=''):
"""Performs recursive glob with given suffix and rootdir
"""
filenames = []
folders = glob.glob(rootdir + "/*")
for folder in folders:
folder_id = os.path.split(folder)[1]
for instance in instances:
if folder_id.find(instance) >= 0:
# remove for all training scenes
if folder.find(train_scenes) >= 0 or folder.find(test_scenes[0]) >= 0 or folder.find(test_scenes[1]) >= 0 or folder.find(test_scenes[2]) >= 0 :
folder_path = folder + "/*"
filenames_folder = glob.glob(folder_path)
filenames_folder.sort()
filenames.extend(filenames_folder)
return filenames
def recursive_glob(rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
class cnn_core50(data.Dataset):
""" core50 loader
"""
def __init__(self, root, split="train", is_transform=False,
img_size=(224, 224), augmentations=None, instances=None):
"""__init__
"""
self.root = root
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.n_classes = 50
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.files = {}
self.images_base = os.path.join(self.root, self.split)
self.files[split] = ordered_glob(rootdir=self.images_base, split=self.split, instances=instances)
self.instances = instances
self.novel_classes = [0, 1, 9, 10, 12, 19, 21, 22, 27, 30, 32, 37, 38, 42, 43, 49]
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
def __len__(self):
"""__len__"""
return len(self.files[self.split])
def __getitem__(self, index):
"""__getitem__
:param index:
"""
img_path = self.files[self.split][index].rstrip()
lbl = np.array([int(img_path[-10:-8])-1])
img = Image.open(img_path)
old_size = img.size
ratio = float(self.img_size[0])/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
img = img.resize(new_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (self.img_size[0], self.img_size[1]))
new_im.paste(img, ((self.img_size[0]-new_size[0])//2,
(self.img_size[1]-new_size[1])//2))
img = np.array(new_im, dtype=np.uint8)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl, img_path
def transform(self, img, lbl):
"""transform
:param img:
:param lbl:
"""
img = img.astype(np.float64)
img = img.astype(float) / 255.0
# NHWC -> NCWH
img = img.transpose(2, 0, 1)
classes = np.unique(lbl)
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
if __name__ == '__main__':
import torchvision
import matplotlib.pyplot as plt
local_path = 'path_to/core50_dataset'
dst = cnn_tless(local_path, is_transform=True, augmentations=None)
bs = 4
trainloader = data.DataLoader(dst, batch_size=bs, num_workers=0, shuffle=True)
for i, data in enumerate(trainloader):
imgs, labels = data
imgs = imgs.numpy()[:, ::-1, :, :]
imgs = np.transpose(imgs, [0,2,3,1])
f, axarr = plt.subplots(bs,1)
for j in range(bs):
axarr[j].imshow(imgs[j])
print(labels)
plt.show()
a = raw_input()
if a == 'ex':
break
else:
plt.close() |
from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
url(r'^login/$', views.login, name='login'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^profile/$', views.user_profile_update, name='user_profile_update'),
url(r'^password-change/$', views.password_change, name='password_change'),
url(r'^password-change/done/$',
views.password_change_done, name='password_change_done'),
url(r'^signup/$',
views.user_signup, name='signup'),
url(r'^verify/(?P<verification_key>[-:\w]+)/$',
views.user_verify, name='user_verify'),
url(r'^verification-request/$',
views.request_verification, name='request_verification'),
url(r'^password-reset/$', views.password_reset, name='password_reset'),
url(r'^password-reset/done/$',
views.password_reset_done, name='password_reset_done'),
path('password-reset/<uidb64>/<token>/',
views.password_reset_confirm, name='password_reset_confirm'),
url(r'^password-reset/complete/$',
views.password_reset_complete, name='password_reset_complete'),
url(r'^agreement/$',
views.coc_agree, name='coc_agreement'),
]
|
from spaceone.identity.connector.auth_plugin_connector import AuthPluginConnector
|
"""
Copyright 2012 Daniel Lytkin.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from Tkinter import Frame, StringVar, Radiobutton, LabelFrame, Label
from spectrum.calculations import numeric
from spectrum.calculations.groups import ClassicalGroup, SporadicGroup, AlternatingGroup, ExceptionalGroup
from spectrum.calculations.numeric import Constraints
from spectrum.gui.gui_elements import NumberBox, OptionList
__author__ = 'Daniel Lytkin'
class GroupSelect(Frame):
"""This is a widget with ability to choose specific group for further
actions.
"""
def __init__(self, parent=None, default_type="Classical", **kw):
Frame.__init__(self, parent, **kw)
self._parent = parent
self._init_components()
self._type_radio_buttons[default_type].select()
def _init_components(self):
# group type selection (alternating, classical, sporadic, exceptional)
group_type_frame = LabelFrame(self, text="Group type", padx=10, pady=5)
group_type_frame.pack(expand=True, fill='x')
# group type radio buttons (Alternating, Classical etc.)
self._group_type = StringVar()
self._type_radio_buttons = dict()
for group_type in ("Alternating", "Classical", "Exceptional", "Sporadic"):
radiobutton = Radiobutton(group_type_frame, variable=self._group_type, value=group_type, text=group_type)
radiobutton.pack(anchor='nw')
self._type_radio_buttons[group_type] = radiobutton
# set group type selection handler
self._group_type.trace("w", lambda n, i, m: self._group_type_selection())
# spacer
Frame(self, height=10).pack()
# parameters for each group (degree for alternating, field and dimension for classical etc.)
# notice that we do not pack LabelFrame contents. We do that in _group_type_selection method instead.
group_params_frame = LabelFrame(self, text="Parameters", padx=10, pady=5)
group_params_frame.pack(expand=True, fill='x')
# alternating
self._alt_params = Frame(group_params_frame)
self._alt_params.columnconfigure(1, weight=1)
Label(self._alt_params, text="Degree").grid(sticky='w')
self._alt_degree = NumberBox(self._alt_params, constraints=Constraints(min=5))
self._alt_degree.grid(row=0, column=1, sticky='we')
# classical
self._clas_params = Frame(group_params_frame)
self._clas_params.columnconfigure(1, weight=1)
Label(self._clas_params, text="Type").grid(row=0, sticky='w')
self._clas_type = OptionList(self._clas_params, values=ClassicalGroup.types())
self._clas_type.variable.trace("w", lambda n, i, m: self._classical_group_type_selection())
self._clas_type.grid(row=0, column=1, sticky='we')
Label(self._clas_params, text="Dimension").grid(row=1, sticky='w')
self._clas_dim = NumberBox(self._clas_params)
self._clas_dim.grid(row=1, column=1, sticky='we')
Label(self._clas_params, text="Field order").grid(row=2, sticky='w')
self._clas_field = NumberBox(self._clas_params, constraints=Constraints(primality=numeric.PRIME_POWER))
self._clas_field.grid(row=2, column=1, sticky='we')
self._classical_group_type_selection()
# exceptional
self._ex_params = Frame(group_params_frame)
self._ex_params.columnconfigure(1, weight=1)
Label(self._ex_params, text="Type").grid(row=0, sticky='w')
self._ex_type = OptionList(self._ex_params, values=ExceptionalGroup.types())
self._ex_type.setvar(value=ExceptionalGroup.types()[0])
self._ex_type.grid(row=0, column=1, sticky='we')
Label(self._ex_params, text="Field order").grid(row=1, sticky='w')
self._ex_field = NumberBox(self._ex_params, constraints=Constraints(primality=numeric.PRIME_POWER))
self._ex_field.grid(row=1, column=1, sticky='we')
# sporadic
self._spor_params = Frame(group_params_frame)
self._spor_params.columnconfigure(1, weight=1)
Label(self._spor_params, text="Group").grid(row=0, sticky='w')
self._sporadic_group = OptionList(self._spor_params, values=SporadicGroup.all_groups())
self._sporadic_group.grid(row=0, column=1, sticky='we')
@property
def selected_group(self):
"""Returns currently selected group
"""
if self._group_type.get() == "Alternating":
self._alt_degree.refresh_input()
return AlternatingGroup(self._alt_degree.get_number())
if self._group_type.get() == "Classical":
self._clas_dim.refresh_input()
self._clas_field.refresh_input()
return ClassicalGroup(self._clas_type.variable.get(),
self._clas_dim.get_number(), self._clas_field.get_number())
if self._group_type.get() == "Sporadic":
return SporadicGroup(self._sporadic_group.variable.get())
if self._group_type.get() == "Exceptional":
self._ex_field.refresh_input()
return ExceptionalGroup(self._ex_type.variable.get(), self._ex_field.get_number())
def _group_type_selection(self):
"""Process the change of selected group type
"""
def set_visible(widget, visible):
if visible:
widget.pack(expand=True, fill='both')
else:
widget.forget()
group_type = self._group_type.get()
set_visible(self._alt_params, group_type == "Alternating")
set_visible(self._clas_params, group_type == "Classical")
set_visible(self._spor_params, group_type == "Sporadic")
set_visible(self._ex_params, group_type == "Exceptional")
def _classical_group_type_selection(self):
name = self._clas_type.variable.get()
self._clas_dim.set_constraints(ClassicalGroup.dim_constraints(name))
self._clas_field.set_constraints(ClassicalGroup.field_constraints(name))
|
from typing import List, Optional, Tuple, Any
from itertools import chain
class SubstituteGeneratorsCombiner:
def __init__(self, subst_generators: List):
self.subst_generators = subst_generators
def generate_substitutes(
self,
sentences: List[List[str]],
target_ids: List[int],
target_pos: Optional[List[str]] = None,
return_probs: bool = False,
target_lemmas: Optional[List[str]] = None
) -> Tuple[List[List[str]], Any]:
"""
Generates substitutes for a given batch of instances.
Args:
sentences: list of contexts
target_ids: list of target indexes
target_pos: list of target word pos tags
return_probs: return substitute probabilities if True
target_lemmas: list of target lemmas
Returns:
substitutes, vocabulary and optionally substitute probabilities
"""
# TODO: combine probs from different generators
assert not return_probs
substitutes = []
for substgen in self.subst_generators:
substs, _ = substgen.generate_substitutes(
sentences,
target_ids,
target_pos=target_pos,
return_probs=False,
target_lemmas=target_lemmas,
)
substitutes.append(substs)
combined_substitutes = []
for substs in zip(*substitutes):
combined_substitutes.append(list(chain(*substs)))
return combined_substitutes, None
|
import asyncio
from pathlib import Path
import uuid
import pytest
from trinity.db.orm import (
SchemaVersion,
Base,
_setup_schema,
_check_is_empty,
_check_tables_exist,
_check_schema_version,
_get_session,
get_tracking_database,
)
from trinity.exceptions import BadDatabaseError
@pytest.fixture
def session():
path = Path(':memory:')
return _get_session(path)
@pytest.fixture
def db_path(tmpdir):
path = Path(tmpdir.join('nodedb.sqlite'))
return path
#
# Schema initialization tests
#
def test_setup_schema(session):
assert _check_schema_version(session) is False
_setup_schema(session)
assert _check_schema_version(session) is True
def test_check_schema_version_false_when_no_tables(session):
assert _check_is_empty(session)
assert _check_schema_version(session) is False
def test_check_schema_version_false_when_no_entry(session):
_setup_schema(session)
assert _check_schema_version(session) is True
# delete the entry
schema_version = session.query(SchemaVersion).one()
session.delete(schema_version)
session.commit()
assert _check_schema_version(session) is False
def test_check_schema_version_false_when_wrong_version(session):
_setup_schema(session)
assert _check_schema_version(session) is True
# change version to unknown value
schema_version = session.query(SchemaVersion).one()
schema_version.version = 'unknown'
session.add(schema_version)
session.commit()
assert _check_schema_version(session) is False
def test_check_tables_exist(session):
assert _check_tables_exist(session) is False
_setup_schema(session)
assert _check_tables_exist(session) is True
def test_check_tables_exist_missing_table(session):
assert _check_tables_exist(session) is False
_setup_schema(session)
assert _check_tables_exist(session) is True
engine = session.get_bind()
assert engine.has_table('schema_version') is True
table = Base.metadata.tables['schema_version']
table.drop(engine)
assert engine.has_table('schema_version') is False
assert _check_tables_exist(session) is False
def test_check_schema_version_false_when_multiple_entries(session):
_setup_schema(session)
assert _check_schema_version(session) is True
session.add(SchemaVersion(version='unknown'))
session.commit()
assert _check_schema_version(session) is False
def test_get_tracking_db_from_empty():
session = get_tracking_database(Path(':memory:'))
assert _check_schema_version(session) is True
def test_get_tracking_db_from_valid_existing(db_path):
session_a = get_tracking_database(db_path)
assert _check_schema_version(session_a) is True
del session_a
# ensure the session was persisted to disk
session_b = _get_session(db_path)
assert _check_schema_version(session_b) is True
del session_b
session_c = get_tracking_database(db_path)
assert _check_schema_version(session_c) is True
def test_get_tracking_db_errors_bad_schema_version(db_path):
session_a = get_tracking_database(db_path)
assert _check_schema_version(session_a) is True
# change version to unknown value
schema_version = session_a.query(SchemaVersion).one()
schema_version.version = 'unknown'
session_a.add(schema_version)
session_a.commit()
del session_a
# ensure the session was persisted to disk
session_b = _get_session(db_path)
assert _check_schema_version(session_b) is False
del session_b
with pytest.raises(BadDatabaseError):
get_tracking_database(db_path)
@pytest.mark.asyncio
async def test_db_can_have_different_concurrent_sessions(db_path):
_setup_schema(_get_session(db_path))
async def read_and_write():
for _ in range(10):
session = _get_session(db_path)
# change version to unknown value
schema_version = session.query(SchemaVersion).one()
await asyncio.sleep(0.01)
schema_version.version = str(uuid.uuid4)
session.add(schema_version)
session.commit()
await asyncio.sleep(0.01)
await asyncio.gather(
read_and_write(),
read_and_write(),
read_and_write(),
read_and_write(),
)
schema_version = _get_session(db_path).query(SchemaVersion).one()
print(schema_version.version)
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import itertools
import collections
from abc import ABC, abstractmethod
from typing import (cast,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
TypeVar,
Union,
TYPE_CHECKING)
from functools import wraps
from dataclasses import dataclass, field
import numpy as np
import pandas as pd # type: ignore
import xarray as xr
from scipy import interpolate # type: ignore
from .base import _TimeStepResolver
from .edges import _map_to_edges_geoframe
from ..cases import CaseStudy
from ..types import Num, StrOrPath
from .._docs import docstringtemplate
if TYPE_CHECKING: # pragma: no cover
import numpy.typing as npt
# Generic for decorators
F = TypeVar('F', bound=Callable[..., Any])
def _extract(func: F) -> F:
@wraps(func)
def wrapper(self, t_step: int,
value: Num,
x: Optional[Sequence[Num]] = None,
y: Optional[Sequence[Num]] = None) -> xr.Dataset:
do_interp = sum((bool(x is not None),
bool(y is not None)))
if do_interp == 1:
raise RuntimeError("x and y must both be set")
t_step = self._resolve_t_step(t_step)
if t_step not in self._t_steps:
self._load_t_step(t_step)
ds = func(self, t_step, value, x, y)
if not do_interp: return ds
return ds.interp({"$x$": xr.DataArray(x),
"$y$": xr.DataArray(y)})
return cast(F, wrapper)
@dataclass
class _FacesDataClassMixin(_TimeStepResolver):
xmax: Num #: maximum range in x-direction, in metres
_t_steps: Dict[int, pd.Timestamp] = field(default_factory=dict,
init=False,
repr=False)
_frame: Optional[pd.DataFrame] = field(default=None,
init=False,
repr=False)
class Faces(ABC, _FacesDataClassMixin):
"""Class for extracting results on the faces of the simulation grid. Use in
conjunction with the :class:`.Result` class.
>>> from snl_d3d_cec_verify import Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> result.faces.extract_z(-1, -1) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ($x$: 18, $y$: 4)
Coordinates:
* $x$ ($x$) float64 0.5 1.5 2.5 3.5 4.5 5.5 ... 13.5 14.5 15.5 16.5 17.5
* $y$ ($y$) float64 1.5 2.5 3.5 4.5
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
Data variables:
$\\sigma$ ($x$, $y$) float64 -0.4994 -0.4994 -0.4994 ... -0.5 -0.5 -0.5
$u$ ($x$, $y$) float64 0.781 0.781 0.781 ... 0.7763 0.7763 0.7763
$v$ ($x$, $y$) float64 -3.237e-18 1.423e-17 ... -8.598e-17 -4.824e-17
$w$ ($x$, $y$) float64 -0.01472 -0.01472 ... 0.001343 0.001343
$k$ ($x$, $y$) float64 0.004802 0.004765 ... 0.003674 0.0036...
:param nc_path: path to the ``.nc`` file containing results
:param n_steps: number of time steps in the simulation
:param xmax: maximum range in x-direction, in metres
"""
@docstringtemplate
def extract_turbine_centre(self, t_step: int,
case: CaseStudy,
offset_x: Num = 0,
offset_y: Num = 0,
offset_z: Num = 0) -> xr.Dataset:
"""Extract data at the turbine centre, as defined in the given
:class:`.CaseStudy` object. Available data is:
* :code:`sigma`: sigma layer
* :code:`u`: velocity in the x-direction, in metres per second
* :code:`v`: velocity in the x-direction, in metres per second
* :code:`w`: velocity in the x-direction, in metres per second
* :code:`k`: turbulent kinetic energy, in metres squared per second
squared
Results are returned as a :class:`xarray.Dataset`. For example:
>>> from snl_d3d_cec_verify import MycekStudy, Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> case = MycekStudy()
>>> result.faces.extract_turbine_centre(-1, case) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: (dim_0: 1)
Coordinates:
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
$x$ (dim_0) ... 6
$y$ (dim_0) ... 3
Dimensions without coordinates: dim_0
Data variables:
$\\sigma$ (dim_0) float64 -0.4996
$u$ (dim_0) float64 0.7748
$v$ (dim_0) float64 -2.942e-17
$w$ (dim_0) float64 0.0002786
$k$ (dim_0) float64 0.004...
The position extracted can also be shifted using the ``offset_x``,
``offset_y`` and ``offset_z`` parameters.
:param t_step: Time step index
:param case: Case study from which to get turbine position
:param offset_x: Shift x-coordinate of extraction point, in metres.
Defaults to {offset_x}
:param offset_y: Shift y-coordinate of extraction point, in metres.
Defaults to {offset_y}
:param offset_z: Shift z-coordinate of extraction point, in metres.
Defaults to {offset_z}
:raises IndexError: if the time-step index (``t_step``) is out of
range
:raises ValueError: if the length of the :class:`.CaseStudy` object is
greater than one
:rtype: xarray.Dataset
"""
_check_case_study(case)
# Inform the type checker that we have Num for single value cases
turb_pos_z = cast(Num, case.turb_pos_z)
turb_pos_x = cast(Num, case.turb_pos_x)
turb_pos_y = cast(Num, case.turb_pos_y)
return self.extract_z(t_step,
turb_pos_z + offset_z,
[turb_pos_x + offset_x],
[turb_pos_y + offset_y])
@docstringtemplate
def extract_turbine_centreline(self, t_step: int,
case: CaseStudy,
x_step: Num = 0.5,
offset_x: Num = 0,
offset_y: Num = 0,
offset_z: Num = 0) -> xr.Dataset:
"""Extract data along the turbine centreline, from the turbine
position defined in the given :class:`.CaseStudy` object. Available
data is:
* :code:`k`: sigma layer
* :code:`u`: velocity in the x-direction, in metres per second
* :code:`v`: velocity in the x-direction, in metres per second
* :code:`w`: velocity in the x-direction, in metres per second
* :code:`k`: turbulent kinetic energy, in metres squared per second
squared
Results are returned as a :class:`xarray.Dataset`. Use the ``x_step``
argument to control the frequency of samples. For example:
>>> from snl_d3d_cec_verify import MycekStudy, Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> case = MycekStudy()
>>> result.faces.extract_turbine_centreline(-1, case, x_step=1) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: (dim_0: 13)
Coordinates:
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
$x$ (dim_0) float64 6.0 7.0 8.0 9.0 10.0 ... 14.0 15.0 16.0 17.0 18.0
$y$ (dim_0) ... 3 3 3 3 3 3 3 3 3 3 3 3 3
Dimensions without coordinates: dim_0
Data variables:
$\\sigma$ (dim_0) float64 -0.4996 -0.4996 -0.4996 ... -0.4999 -0.4999 nan
$u$ (dim_0) float64 0.7748 0.7747 0.7745 0.7745 ... 0.7759 0.7762 nan
$v$ (dim_0) float64 -2.942e-17 4.192e-17 9.126e-17 ... -8.523e-17 nan
$w$ (dim_0) float64 0.0002786 -0.0004764 0.0003097 ... -7.294e-05 nan
$k$ (dim_0) float64 0.004307 0.004229 0.004157 ... 0.003691 nan
The position extracted can also be shifted using the ``offset_x``,
``offset_y`` and ``offset_z`` parameters.
:param t_step: Time step index
:param case: Case study from which to get turbine position
:param x_step: Sample step, in metres. Defaults to {x_step}
:param offset_x: Shift x-coordinate of extraction point, in metres.
Defaults to {offset_x}
:param offset_y: Shift y-coordinate of extraction point, in metres.
Defaults to {offset_y}
:param offset_z: Shift z-coordinate of extraction point, in metres.
Defaults to {offset_z}
:raises IndexError: if the time-step index (``t_step``) is out of
range
:raises ValueError: if the length of the :class:`.CaseStudy` object is
greater than one
:rtype: xarray.Dataset
"""
_check_case_study(case)
# Inform the type checker that we have Num for single value cases
turb_pos_z = cast(Num, case.turb_pos_z)
turb_pos_x = cast(Num, case.turb_pos_x)
turb_pos_y = cast(Num, case.turb_pos_y)
x = np.arange(turb_pos_x + offset_x, self.xmax, x_step)
if np.isclose(x[-1] + x_step, self.xmax): x = np.append(x, self.xmax)
y = [turb_pos_y + offset_y] * len(x)
return self.extract_z(t_step, turb_pos_z + offset_z, list(x), y)
def extract_turbine_z(self, t_step: int,
case: CaseStudy,
offset_z: Num = 0) -> xr.Dataset:
"""Extract data from the z-plane interseting the turbine centre, as
defined in the given :class:`.CaseStudy` object, at the face centres.
Available data is:
* :code:`k`: sigma layer
* :code:`u`: velocity in the x-direction, in metres per second
* :code:`v`: velocity in the x-direction, in metres per second
* :code:`w`: velocity in the x-direction, in metres per second
* :code:`k`: turbulent kinetic energy, in metres squared per second
squared
Results are returned as a :class:`xarray.Dataset`.For example:
>>> from snl_d3d_cec_verify import MycekStudy, Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> case = MycekStudy()
>>> result.faces.extract_turbine_z(-1, case) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ($x$: 18, $y$: 4)
Coordinates:
* $x$ ($x$) float64 0.5 1.5 2.5 3.5 4.5 5.5 ... 13.5 14.5 15.5 16.5 17.5
* $y$ ($y$) float64 1.5 2.5 3.5 4.5
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
Data variables:
$\\sigma$ ($x$, $y$) float64 -0.4994 -0.4994 -0.4994 ... -0.5 -0.5 -0.5
$u$ ($x$, $y$) float64 0.781 0.781 0.781 ... 0.7763 0.7763 0.7763
$v$ ($x$, $y$) float64 -3.237e-18 1.423e-17 ... -8.598e-17 -4.824e-17
$w$ ($x$, $y$) float64 -0.01472 -0.01472 ... 0.001343 0.001343
$k$ ($x$, $y$) float64 0.004802 0.004765 ... 0.003674 0.0036...
The z-plane can be shifted using the ``offset_z`` parameter.
:param t_step: Time step index
:param case: Case study from which to get turbine position
:param offset_z: Shift z-coordinate of extraction point, in metres.
Defaults to {offset_z}
:raises IndexError: if the time-step index (``t_step``) is out of
range
:raises ValueError: if the length of the :class:`.CaseStudy` object is
greater than one
:rtype: xarray.Dataset
"""
_check_case_study(case)
turb_pos_z = cast(Num, case.turb_pos_z)
return self.extract_z(t_step, turb_pos_z + offset_z)
@_extract
def extract_z(self, t_step: int,
z: Num,
x: Optional[Sequence[Num]] = None,
y: Optional[Sequence[Num]] = None) -> xr.Dataset:
"""Extract data on the plane at the given z-level. Available data is:
* :code:`sigma`: sigma value
* :code:`u`: velocity in the x-direction, in metres per second
* :code:`v`: velocity in the x-direction, in metres per second
* :code:`w`: velocity in the x-direction, in metres per second
* :code:`k`: turbulent kinetic energy, in metres squared per second
squared
Results are returned as a :class:`xarray.Dataset`. If the ``x`` and
``y`` parameters are defined, then the results are interpolated onto
the given coordinates. For example:
>>> from snl_d3d_cec_verify import Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> x = [6, 7, 8, 9, 10]
>>> y = [2, 2, 2, 2, 2]
>>> result.faces.extract_z(-1, -1, x, y) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: (dim_0: 5)
Coordinates:
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
$x$ (dim_0) ... 6 7 8 9 10
$y$ (dim_0) ... 2 2 2 2 2
Dimensions without coordinates: dim_0
Data variables:
$\\sigma$ (dim_0) float64 -0.4996 -0.4996 -0.4996 -0.4997 -0.4997
$u$ (dim_0) float64 0.7748 0.7747 0.7745 0.7745 0.7746
$v$ (dim_0) float64 -3.877e-18 4.267e-17 5.452e-17 5.001e-17 8.011e-17
$w$ (dim_0) float64 0.0002786 -0.0004764 ... -0.0002754 0.0003252
$k$ (dim_0) float64 0.004317 0.0042... 0.00416... 0.00409... 0.00403...
If ``x`` and ``y`` are not given, then the results are returned at the
face centres.
>>> result.faces.extract_z(-1, -1) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ($x$: 18, $y$: 4)
Coordinates:
* $x$ ($x$) float64 0.5 1.5 2.5 3.5 4.5 5.5 ... 13.5 14.5 15.5 16.5 17.5
* $y$ ($y$) float64 1.5 2.5 3.5 4.5
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
Data variables:
$\\sigma$ ($x$, $y$) float64 -0.4994 -0.4994 -0.4994 ... -0.5 -0.5 -0.5
$u$ ($x$, $y$) float64 0.781 0.781 0.781 ... 0.7763 0.7763 0.7763
$v$ ($x$, $y$) float64 -3.237e-18 1.423e-17 ... -8.598e-17 -4.824e-17
$w$ ($x$, $y$) float64 -0.01472 -0.01472 ... 0.001343 0.001343
$k$ ($x$, $y$) float64 0.004802 0.004765 ... 0.003674 0.0036...
:param t_step: Time step index
:param z: z-level at which to extract data
:param x: x-coordinates on which to interpolate data
:param y: y-coordinates on which to interpolate data
:raises IndexError: if the time-step index (``t_step``) is out of
range
:raises RuntimeError: if only ``x`` or ``y`` is set
:rtype: xarray.Dataset
"""
return _faces_frame_to_slice(self._frame,
self._t_steps[t_step],
"z",
z)
@_extract
def extract_sigma(self, t_step: int,
sigma: float,
x: Optional[Sequence[Num]] = None,
y: Optional[Sequence[Num]] = None) -> xr.Dataset:
"""Extract data on the plane at the given sigma-level. Available
data is:
* :code:`z`: the z-level, in metres
* :code:`u`: velocity in the x-direction, in metres per second
* :code:`v`: velocity in the x-direction, in metres per second
* :code:`w`: velocity in the x-direction, in metres per second
* :code:`k`: turbulent kinetic energy, in metres squared per second
squared
Results are returned as a :class:`xarray.Dataset`. If the ``x`` and
``y`` parameters are defined, then the results are interpolated onto
the given coordinates. For example:
>>> from snl_d3d_cec_verify import Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> x = [6, 7, 8, 9, 10]
>>> y = [2, 2, 2, 2, 2]
>>> result.faces.extract_sigma(-1, -0.5, x, y) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: (dim_0: 5)
Coordinates:
$\\sigma$ ... -0.5
time datetime64[ns] 2001-01-01T01:00:00
$x$ (dim_0) ... 6 7 8 9 10
$y$ (dim_0) ... 2 2 2 2 2
Dimensions without coordinates: dim_0
Data variables:
$z$ (dim_0) float64 -1.001 -1.001 -1.001 -1.001 -1.001
$u$ (dim_0) float64 0.7747 0.7746 0.7744 0.7745 0.7745
$v$ (dim_0) float64 -3.88e-18 4.267e-17 5.452e-17 5.002e-17 8.013e-17
$w$ (dim_0) float64 0.0002791 -0.0004769 ... -0.0002756 0.0003256
$k$ (dim_0) float64 0.004... 0.0042... 0.0041... 0.004... 0.0040...
If ``x`` and ``y`` are not given, then the results are returned at the
face centres.
>>> result.faces.extract_sigma(-1, -0.5) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ($x$: 18, $y$: 4)
Coordinates:
* $x$ ($x$) float64 0.5 1.5 2.5 3.5 4.5 5.5 ... 13.5 14.5 15.5 16.5 17.5
* $y$ ($y$) float64 1.5 2.5 3.5 4.5
$\\sigma$ ... -0.5
time datetime64[ns] 2001-01-01T01:00:00
Data variables:
$z$ ($x$, $y$) float64 -1.001 -1.001 -1.001 -1.001 ... -1.0 -1.0 -1.0
$u$ ($x$, $y$) float64 0.7809 0.7809 0.7809 ... 0.7763 0.7763 0.7763
$v$ ($x$, $y$) float64 -3.29e-18 1.419e-17 ... -8.598e-17 -4.824e-17
$w$ ($x$, $y$) float64 -0.01473 -0.01473 ... 0.001343 0.001343
$k$ ($x$, $y$) float64 0.004809 0.004772 ... 0.003674 0.0036...
:param t_step: Time step index
:param sigma: sigma-level at which to extract data
:param x: x-coordinates on which to interpolate data
:param y: y-coordinates on which to interpolate data
:raises IndexError: if the time-step index (``t_step``) is out of
range
:raises RuntimeError: if only ``x`` or ``y`` is set
:rtype: xarray.Dataset
"""
return _faces_frame_to_slice(self._frame,
self._t_steps[t_step],
"sigma",
sigma)
def extract_depth(self, t_step: int) -> xr.DataArray:
"""Extract the depth, in meters, at each of the face centres.
Results are returned as a :class:`xarray.DataArray`. For example:
>>> from snl_d3d_cec_verify import Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> result.faces.extract_depth(-1)
<xarray.DataArray 'depth' ($x$: 18, $y$: 4)>
array([[2.00234445, 2.00234445, 2.00234445, 2.00234445],
[2.00224624, 2.00224624, 2.00224624, 2.00224624],
[2.00212823, 2.00212823, 2.00212823, 2.00212823],
[2.00201275, 2.00201275, 2.00201275, 2.00201275],
[2.00188605, 2.00188605, 2.00188605, 2.00188605],
[2.00176218, 2.00176218, 2.00176218, 2.00176218],
[2.00163089, 2.00163089, 2.00163089, 2.00163089],
[2.00150178, 2.00150178, 2.00150178, 2.00150178],
[2.0013675 , 2.0013675 , 2.0013675 , 2.0013675 ],
[2.00123502, 2.00123502, 2.00123502, 2.00123502],
[2.00109849, 2.00109849, 2.00109849, 2.00109849],
[2.00096352, 2.00096352, 2.00096352, 2.00096352],
[2.0008259 , 2.0008259 , 2.0008259 , 2.0008259 ],
[2.00068962, 2.00068962, 2.00068962, 2.00068962],
[2.0005524 , 2.0005524 , 2.0005524 , 2.0005524 ],
[2.00041653, 2.00041653, 2.00041653, 2.00041653],
[2.00027887, 2.00027887, 2.00027887, 2.00027887],
[2.00014281, 2.00014281, 2.00014281, 2.00014281]])
Coordinates:
* $x$ ($x$) float64 0.5 1.5 2.5 3.5 4.5 5.5 ... 13.5 14.5 15.5 16.5 17.5
* $y$ ($y$) float64 1.5 2.5 3.5 4.5
time datetime64[ns] 2001-01-01T01:00:00
:param t_step: Time step index
:raises IndexError: if the time-step index (``t_step``) is out of
range
:rtype: xarray.DataArray
"""
t_step = self._resolve_t_step(t_step)
if t_step not in self._t_steps:
self._load_t_step(t_step)
return _faces_frame_to_depth(self._frame,
self._t_steps[t_step])
def _load_t_step(self, t_step: int):
t_step = self._resolve_t_step(t_step)
if t_step in self._t_steps: return
frame = self._get_faces_frame(t_step)
if self._frame is None:
self._frame = frame
else:
self._frame = pd.concat([self._frame, frame],
ignore_index=True)
self._t_steps[t_step] = pd.Timestamp(frame["time"].unique().take(0))
@abstractmethod
def _get_faces_frame(self, t_step: int) -> pd.DataFrame:
pass # pragma: no cover
def _check_case_study(case: CaseStudy):
if len(case) != 1:
raise ValueError("case study must have length one")
def _faces_frame_to_slice(frame: pd.DataFrame,
sim_time: pd.Timestamp,
key: str,
value: Num) -> xr.Dataset:
valid_keys = ['z', 'sigma']
if key not in valid_keys:
keys_msg = ", ".join(valid_keys)
err_msg = f"Given key is not valid. Choose from {keys_msg}"
raise RuntimeError(err_msg)
valid_keys.remove(key)
other_key = valid_keys[0]
frame = frame.set_index(['x', 'y', 'time'])
frame = frame.xs(sim_time, level=2)
data = collections.defaultdict(list)
remove_nans = lambda a: a[:, ~np.isnan(a).any(axis=0)]
for (x, y), group in frame.groupby(level=[0, 1]):
cols = ["z", "sigma", "u", "v", "w"]
if "tke" in group: cols.append("tke")
group = group.reset_index(drop=True)
group_values = group[cols].to_numpy().T
zsig = group_values[:2, :]
zsig = remove_nans(zsig)
if key == "z":
get_sigma = interpolate.interp1d(zsig[0, :],
zsig[1, :],
fill_value="extrapolate")
sigma = float(get_sigma(value))
other = sigma
else:
get_z = interpolate.interp1d(zsig[1, :],
zsig[0, :],
fill_value="extrapolate")
other = float(get_z(value))
sigma = value
sigvel = group_values[1:5, :]
sigvel = remove_nans(sigvel)
get_vel = interpolate.interp1d(sigvel[0, :],
sigvel[1:, :],
fill_value="extrapolate")
vel = get_vel(sigma)
if "tke" in group:
sigtke = group_values[[1, 5], :]
sigtke = remove_nans(sigtke)
get_tke = interpolate.interp1d(sigtke[0, :],
sigtke[1:, :],
fill_value="extrapolate")
tke = get_tke(sigma)
data["x"].append(x)
data["y"].append(y)
data[other_key].append(other)
data["u"].append(vel[0])
data["v"].append(vel[1])
data["w"].append(vel[2])
if "tke" in group:
data["tke"].append(tke[0])
zframe = pd.DataFrame(data)
zframe = zframe.set_index(['x', 'y'])
ds = zframe.to_xarray()
ds = ds.assign_coords({key: value})
ds = ds.assign_coords({"time": sim_time})
name_map = {"z": "$z$",
"x": "$x$",
"y": "$y$",
"u": "$u$",
"v": "$v$",
"w": "$w$",
"sigma": r"$\sigma$"}
if "tke" in data: name_map["tke"] = "$k$"
ds = ds.rename(name_map)
return ds
def _faces_frame_to_depth(frame: pd.DataFrame,
sim_time: pd.Timestamp) -> xr.DataArray:
frame = frame[['x', 'y', 'sigma', 'time', 'depth']]
frame = frame.dropna()
sigma = frame["sigma"].unique().take(0)
frame = frame.set_index(['x', 'y', 'sigma', 'time'])
frame = frame.xs((sigma, sim_time), level=(2, 3))
ds = frame.to_xarray()
ds = ds.assign_coords({"time": sim_time})
ds = ds.rename({"x": "$x$",
"y": "$y$"})
return ds.depth
class _FMFaces(Faces):
def _get_faces_frame(self, t_step: int) -> pd.DataFrame:
return _map_to_faces_frame_with_tke(self.nc_path, t_step)
def _map_to_faces_frame_with_tke(map_path: StrOrPath,
t_step: int = None) -> pd.DataFrame:
faces = _map_to_faces_frame(map_path, t_step)
edges = _map_to_edges_geoframe(map_path, t_step)
times = faces["time"].unique()
facesi = faces.set_index("time")
edgesi = edges.set_index("time")
faces_final = pd.DataFrame()
for time in times:
facest = facesi.loc[time]
edgest = edgesi.loc[time]
facest = facest.reset_index(drop=True)
edgest = edgest.reset_index(drop=True)
edgest["x"] = edgest['geometry'].apply(
lambda line: np.array(line.centroid.coords[0])[0])
edgest["y"] = edgest['geometry'].apply(
lambda line: np.array(line.centroid.coords[0])[1])
edgesdf = pd.DataFrame(edgest[["x",
"y",
"sigma",
"turkin1",
"f0",
"f1"]])
facest = facest.set_index(["x", "y", "sigma"])
facest = facest.sort_index()
x = facest.index.get_level_values(0).unique().values
y = facest.index.get_level_values(1).unique().values
grid_x, grid_y = np.meshgrid(x, y)
facest_new = facest.copy()
for sigma, group in edgesdf.groupby(by="sigma"):
# Fill missing values
groupna = group[pd.isna(group["turkin1"])]
group = group[~pd.isna(group["turkin1"])]
if group.empty: continue
points = np.array(list(zip(group.x, group.y)))
values = group.turkin1.values
group_x = sorted(groupna.x.unique())
group_y = sorted(groupna.y.unique())
group_grid_x, group_grid_y = np.meshgrid(group_x, group_y)
group_grid_z = interpolate.griddata(points,
values,
(group_grid_x, group_grid_y),
method='nearest')
turkin1 = []
for i, j in itertools.product(range(len(group_x)),
range(len(group_y))):
turkin1.append(group_grid_z[j, i])
groupna = groupna.set_index(["x", "y"])
groupna = groupna.sort_index()
groupna["turkin1"] = turkin1
group = pd.concat([group, groupna.reset_index()])
# Interpolate onto faces grid
data = collections.defaultdict(list)
maxf = group[["f0", "f1"]].max().max()
for i in range(maxf + 1):
quad_df = group[(group["f0"] == i) | (group["f1"] == i)]
quad_df = quad_df.reset_index(drop=True)
quad_df = quad_df.sort_values(by=['y'], ignore_index=True)
coords = np.array([quad_df.x, quad_df.y]).T
densities = quad_df.turkin1.values
target = coords.mean(axis=0)
target_tke = _get_quadrilateral_centre(densities)
data["x"].append(target[0])
data["y"].append(target[1])
data["tke"].append(target_tke)
kdf = pd.DataFrame(data)
kdf["sigma"] = sigma
kdf = kdf.set_index(["x", "y", "sigma"])
facest_new = facest_new.combine_first(kdf)
facest_new = facest_new.reset_index()
facest_new["time"] = time
faces_final = pd.concat([faces_final, facest_new])
return faces_final[["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w",
"tke"]]
def _map_to_faces_frame(map_path: StrOrPath,
t_step: int = None) -> pd.DataFrame:
data = collections.defaultdict(list)
with xr.open_dataset(map_path) as ds:
if t_step is None:
t_steps = tuple(range(len(ds.time)))
else:
t_steps = (t_step,)
for i in t_steps:
time = ds.time[i].values.take(0)
x_values = ds.mesh2d_face_x.values
y_values = ds.mesh2d_face_y.values
depth_values = ds.mesh2d_waterdepth.values
sigma_values = ds.mesh2d_layer_sigma.values
u_values = ds.mesh2d_ucx.values
v_values = ds.mesh2d_ucy.values
w_values = ds.mesh2d_ucz.values
for iface in ds.mesh2d_nFaces.values:
x = x_values[iface]
y = y_values[iface]
depth = depth_values[i, iface]
for ilayer in ds.mesh2d_nLayers.values:
sigma = sigma_values[ilayer]
z = sigma * depth
u = u_values[i, iface, ilayer]
v = v_values[i, iface, ilayer]
w = w_values[i, iface, ilayer]
data["x"].append(x)
data["y"].append(y)
data["z"].append(z)
data["sigma"].append(sigma)
data["time"].append(time)
data["depth"].append(depth)
data["u"].append(u)
data["v"].append(v)
data["w"].append(w)
return pd.DataFrame(data)
def _get_quadrilateral_centre(densities: npt.NDArray[np.float64]) -> float:
return np.sum(0.25 * densities)
class _StructuredFaces(Faces):
def _get_faces_frame(self, t_step: int) -> pd.DataFrame:
return _trim_to_faces_frame(self.nc_path, t_step)
def _trim_to_faces_frame(trim_path: StrOrPath,
t_step: int = None) -> pd.DataFrame:
Content = Union[Num, pd.Timestamp]
data: Dict[str, List[Content]] = collections.defaultdict(list)
with xr.open_dataset(trim_path) as ds:
if t_step is None:
t_steps = tuple(range(len(ds.time)))
else:
t_steps = (t_step,)
for i in t_steps:
time = ds.time[i].values
ds_step = ds.isel(time=i)
x = ds_step.XZ.values
y = ds_step.YZ.values
dp0 = ds_step.DP0.values
s1 = ds_step.S1.values
sig_lyr = ds_step.SIG_LYR.values
ik = ds_step.KMAXOUT_RESTR.values
u1 = ds_step.U1.values
v1 = ds_step.V1.values
w = ds_step.W.values
tke = ds_step.RTUR1.values
n_layers = len(ik)
x = x[1:-1, 1:-1]
x = np.repeat(x[np.newaxis, :, :], n_layers, axis=0)
y = y[1:-1, 1:-1]
y = np.repeat(y[np.newaxis, :, :], n_layers, axis=0)
depth = dp0 + s1
z = depth[..., None] * sig_lyr
z = np.rollaxis(z, 2)
z = z[:, 1:-1, 1:-1]
isig = sig_lyr.reshape(n_layers, 1, 1)
sigma = np.ones(x.shape, dtype=int) * isig
time = np.tile(time, x.shape)
depth = depth[1:-1, 1:-1]
depth = np.repeat(depth[np.newaxis, :, :], n_layers, axis=0)
u1 = u1[:, :-1, 1:-1]
u = np.nansum([u1[:, :-1, :], u1[:, 1:, :]], axis=0) / 2
v1 = v1[:,1:-1,:-1]
v = np.nansum([v1[:, :, :-1], v1[:, :, 1:]], axis=0) / 2
w = np.nansum([w[1:, :, :], w[:-1, :, :]], axis=0) / 2
w = w[:, 1:-1, 1:-1]
tke = np.nansum([tke[0, 1:, :, :], tke[0, :-1, :, :]], axis=0) / 2
tke = tke[:, 1:-1, 1:-1]
data["x"].extend(np.ravel(x))
data["y"].extend(np.ravel(y))
data["z"].extend(np.ravel(z))
data["sigma"].extend(np.ravel(sigma))
data["time"].extend(np.ravel(time))
data["depth"].extend(np.ravel(depth))
data["u"].extend(np.ravel(u))
data["v"].extend(np.ravel(v))
data["w"].extend(np.ravel(w))
data["tke"].extend(np.ravel(tke))
return pd.DataFrame(data)
|
# -*- coding: utf-8 -*-
"""
Tree IO compatibility utilities and classes.
"""
import base64
import io
import itertools
import json
import math
import os
import pathlib
import re
import struct
import sys
from typing import Callable, Optional, Tuple, Union
import cv2
import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from perceptree.common.logger import Logger
class TreeFile(Logger):
"""
Wrapper around a single file in .tree format.
:param file_path: Path to file containing the tree data.
:param file_content: Content of the tree file.
:param load_static: Load static meta-data?
:param load_dynamic: Load dynamic meta-data?
:param load_node: Load tree node data?
"""
META_DATA_SPLITTER = "#####"
""" String splitting meta-data from tree nodes. """
STATIC_CSV_SEPARATOR = ","
""" Separator used in static meta-data csv. """
DYNAMIC_START_TAG = "<DynamicData>"
""" Tag used to start dynamic meta-data section. """
DYNAMIC_END_TAG = "</DynamicData>"
""" Tag used to end dynamic meta-data section. """
def __init__(self, file_path: Optional[str] = None,
file_content: Optional[str] = None,
load_static: bool = True, load_dynamic: bool = True,
load_node: bool = True, calculate_stats: bool = True):
self._static_meta_data = {}
self._dynamic_meta_data = {}
self._node_data = {}
if file_path:
self.load_file(file_path, load_static=load_static,
load_dynamic=load_dynamic, load_node=load_node,
calculate_stats=calculate_stats)
elif file_content:
self.load_content(file_content, load_static=load_static,
load_dynamic=load_dynamic, load_node=load_node,
calculate_stats=calculate_stats)
def load_file(self, file_path: str,
load_static: bool = True, load_dynamic: bool = True,
load_node: bool = True, calculate_stats: bool = True):
""" Load tree format from file. """
with open(file_path, "r") as f:
self.load_content(f.read(), load_static=load_static,
load_dynamic=load_dynamic, load_node=load_node,
calculate_stats=calculate_stats)
def load_content(self, file_content: str,
load_static: bool = True, load_dynamic: bool = True,
load_node: bool = True, calculate_stats: bool = True):
""" Load tree format from tree file content. """
static_meta_data, dynamic_meta_data, node_data = self._split_content_sections(
content=file_content
)
if load_static:
self._static_meta_data = self._parse_static_meta_data(static_meta_data)
if load_dynamic:
self._dynamic_meta_data = self._parse_dynamic_meta_data(
dynamic_meta_data, calculate_stats=calculate_stats)
if load_node:
self._node_data = self._parse_node_data(node_data)
@property
def static_meta_data(self) -> dict:
""" Get dictionary containing static meta-data. """
return self._static_meta_data
@property
def dynamic_meta_data(self) -> dict:
""" Get dictionary containing dynamic meta-data. """
return self._dynamic_meta_data
@property
def node_data(self) -> dict:
""" Get dictionary containing tree node data. """
return self._node_data
def _split_content_sections(self, content: str) -> (str, str, str):
"""
Split file content into the three sections.
:param content: Content to split.
:return: Returns content sections:
* Static meta-data
* Dynamic meta-data
* Tree node data
"""
meta_node_split = content.split(self.META_DATA_SPLITTER)
if len(meta_node_split) > 1:
all_meta_data = "".join(meta_node_split[:-1])
node_data = meta_node_split[-1]
else:
all_meta_data = ""
node_data = content
static_dynamic_meta_split = all_meta_data.split(self.DYNAMIC_START_TAG)
if len(static_dynamic_meta_split) > 1:
static_meta_data = static_dynamic_meta_split[0]
dynamic_meta_data = "".join([
d.replace(self.DYNAMIC_START_TAG, "").replace(self.DYNAMIC_END_TAG, "")
for d in static_dynamic_meta_split[1:]
])
else:
dynamic_meta_data = ""
static_meta_data = all_meta_data
return static_meta_data, dynamic_meta_data, node_data
def _parse_static_meta_data(self, static_content: str) -> dict:
""" Parse given static meta-data string and return dictionary with values. """
if len(static_content) == 0 or static_content == "null":
return dict()
content_io = io.StringIO(static_content)
csv_df = pd.read_csv(content_io, sep=self.STATIC_CSV_SEPARATOR)
if len(csv_df) >= 1:
return csv_df.iloc[0].to_dict()
else:
return dict()
def _parse_dynamic_meta_data(self, dynamic_content: str, calculate_stats: bool) -> dict:
""" Parse given dynamic meta-data string and return dictionary with values. """
if len(dynamic_content) == 0 or dynamic_content.strip() == "null":
return dict()
dynamic = json.loads(dynamic_content)
if calculate_stats and "stats" in dynamic:
# Search for statistics and create wrapper objects.
def parse_dict(stat_dict):
if "histogram" in stat_dict:
stat_dict["statistic"] = TreeStatistic(stat_dict)
elif "image" in stat_dict:
stat_dict["image"] = TreeImage(stat_dict)
stats = dynamic.get("stats", { })
for stat_dict in stats.values(): parse_dict(stat_dict)
visual = stats.get("visual", { })
for stat_dict in visual.values(): parse_dict(stat_dict)
return dynamic
def _parse_node_single(self, content: str) -> (dict, str):
""" Parse a single node and return its data and the rest of the content. """
startPos = content.find("(")
endPos = content.find(")")
if startPos < 0 or endPos < 0:
raise RuntimeError("No starting/ending bracket found for node specification!")
node_data = content[startPos + 1:endPos]
rest_content = content[endPos + 1:]
value_names = [ "x", "y", "z", "width" ]
node_values = [ float(v) for v in node_data.split(",") ]
node_dict = { value_names[idx]: node_values[idx] for idx in range(len(node_values)) }
return node_dict, rest_content
def _parse_node_data(self, node_content: str) -> dict:
""" Parse given tree node data string and return dictionary with values. """
if len(node_content) == 0 or node_content == "null":
return dict()
content = node_content.strip()
parent_stack = [ ]
node_data = { }
current_parent = -1
current_node_idx = 0
while len(content) > 0:
if content[0] == "(":
# Node data is starting -> Parse it.
node_dict, content = self._parse_node_single(content)
# Add node meta-data.
node_dict["parent"] = current_parent
node_dict["children"] = [ ]
node_dict["id"] = current_node_idx
node_data[current_node_idx] = node_dict
# Add child to the parent node.
if current_parent >= 0:
node_data[current_parent]["children"].append(current_node_idx)
# Update current index.
current_parent = current_node_idx
current_node_idx += 1
elif content[0] == "[":
# Chain is starting -> Add parent to the stack.
parent_stack.append(current_parent)
content = content[1:]
elif content[0] == "]":
# Chain is ending -> Recover parent from the stack.
current_parent = parent_stack.pop()
content = content[1:]
else:
# Unknown character found.
raise RuntimeError(f"Failed to parse tree node data, invalid character found \"{content[0]}\"!")
return node_data
class TreeImage(Logger):
"""
Simple wrapper for images passed in the tree dynamic meta-data or
view files..
:param image_dict: Image dictionary from the dynamic meta-data,
containing "image" key.
:param image_path: Path to the image to be loaded.
"""
TYPE_NAME_FLOAT = "Float"
""" Name of value type for float. """
TYPE_NAME_UINT = "UInt"
""" Name of value type for unsigned int. """
def __init__(self,
image_dict: Optional[dict] = None,
image_path: Optional[Union[str, pathlib.Path]] = None):
if image_dict is not None:
self._data = self._parse_image_dict(image_dict)
elif image_path is not None:
self._data = self._load_image_file(image_path)
else:
raise RuntimeError("No input data provided to the TreeImage!")
@staticmethod
def is_image_dict(image_dict: dict) -> bool:
""" Check if given dict is an image dict. """
return "image" in image_dict
@property
def name(self) -> str:
""" Get image name. """
return self._data["name"]
@property
def description(self) -> str:
""" Get image description. """
return self._data["description"]
@property
def data(self) -> np.array:
""" Get image data. """
return self._data["values"]
def display(self):
""" Display the image using matplotlib.pyplot.imshow. """
plt.imshow(self.data.squeeze(), origin="lower")
plt.show()
def save_to(self, path: Union[str, pathlib.Path], transform: Optional[Callable] = None):
""" Save the image to given path, which must include the extension. """
values = self._data["values"]
if transform is not None:
values = transform(values)
cv2.imwrite(filename=str(path), img=values)
def resize(self, resolution: Optional[int],
interpolation: Optional[str]) -> "TreeImage":
"""
Resize this image and return self.
:param resolution: Requested resolution, use None for no
resizing.
:param interpolation: Interpolation used for resizing, use
None for automatic.
:return: Returns self.
"""
interpolations = {
"nearest": cv2.INTER_NEAREST,
"linear": cv2.INTER_LINEAR,
"cubic": cv2.INTER_CUBIC,
"area": cv2.INTER_AREA,
"lanczos": cv2.INTER_LANCZOS4
}
current_size = self.data.shape[:2]
if resolution is None or (current_size[0] == resolution and
current_size[1] == resolution):
return self
decimation = (current_size[0] < resolution) and (current_size[1] < resolution)
default_interpolation = interpolations["area"] if decimation else interpolations["linear"]
interpolation = interpolations.get(interpolation, None) or default_interpolation
self._data["values"] = cv2.resize(
src=self._data["values"],
dsize=(resolution, resolution),
interpolation=interpolation
)
return self
def _unpack_format_for(self, width: int, height: int,
channels: int, value_type: str) -> str:
""" Get unpack format for given parameters. """
type_map = {
self.TYPE_NAME_FLOAT: "f",
self.TYPE_NAME_UINT: "I",
}
if value_type not in type_map:
raise RuntimeError(f"Unknown value type for image: \"{value_type}\".")
unpack_type = type_map[value_type]
total_values = width * height * channels
return f"{total_values}{unpack_type}"
def _parse_image_dict(self, image_dict: dict) -> dict:
""" Parse given image dict and return parsed data. """
if "image" not in image_dict:
raise RuntimeError("Missing 'image' in meta-data image dict!")
image = image_dict["image"]
width = image["width"]
height = image["height"]
channels = image["channels"]
value_type = image["valueType"]
unpack_format = self._unpack_format_for(
width=width, height=height,
channels=channels, value_type=value_type
)
decoded = base64.b64decode(image["data"])
values = struct.unpack(unpack_format, decoded)
values = np.array(values).reshape((height, width, channels))
name = image.get("name", "")
description = image.get("description", "")
return {
"values": values,
"name": name,
"description": description
}
def _load_image_file(self, image_path: Union[str, pathlib.Path]) -> dict:
""" Load given image file and return parsed data. """
values = cv2.imread(
filename=str(image_path)
)
if values is None:
raise RuntimeError(f"Failed to load image from \"{image_path}\"")
name = pathlib.Path(image_path).with_suffix("").name
return {
"values": values,
"name": name,
"description": image_path
}
class TreeStatistic(Logger):
"""
Simple wrapper for statistics passed in the tree dynamic meta-data.
:param stat_dict: Statistic dictionary from the dynamic meta-data,
containing "histogram", "stochastic", "variable" and other keys.
"""
VT_SIMPLE = "simple"
""" Identifier used for simple values. """
VT_PAIRED = "paired"
""" Identifier used for paired values. """
VAL_INF = 3.4e+38
""" Value used to signify floating point infinity. """
def __init__(self, stat_dict: dict):
self._data = self._parse_stat_dict(stat_dict)
@staticmethod
def is_stat_dict(stat_dict: dict) -> bool:
""" Check if given dict is a statistic dict. """
return "histogram" in stat_dict and \
"stochastic" in stat_dict and \
"variable" in stat_dict
@property
def data(self) -> dict:
""" Get statistic data. """
return self._data
@property
def values(self) -> np.array:
""" Get values for this statistic. """
arr_values = np.array(self.data["variable"]["values"])
if len(arr_values.shape) <= 1:
arr_values = arr_values.reshape((-1, 1))
arr_values = arr_values[np.all(np.isfinite(arr_values) & \
np.less(arr_values, self.VAL_INF) & \
np.greater(arr_values, -self.VAL_INF), axis=-1)]
return arr_values
@property
def bucket_values(self) -> np.array:
""" Get values used for determining histogram buckets. """
vals = self.values
getters = {
TreeStatistic.VT_SIMPLE: lambda: self.values,
TreeStatistic.VT_PAIRED: lambda: self.values[:, 0]
}
return getters.get(self.data["variable"]["values_type"], None)() \
if len(vals) else np.array([ ], dtype=vals.dtype)
@property
def count_values(self) -> np.array:
""" Get values used for determining histogram counts. """
vals = self.values
getters = {
TreeStatistic.VT_SIMPLE: lambda: np.ones(vals.shape),
TreeStatistic.VT_PAIRED: lambda: vals[:, 1]
}
return getters.get(self.data["variable"]["values_type"], None)() \
if len(vals) else np.array([ ], dtype=vals.dtype)
def display_hist(self):
""" Display the histogram using matplotlib.pyplot.hist. """
hist = self._data["histogram"]
plt.hist(hist["buckets"][:-1], hist["buckets"], weights=hist["counts"])
plt.show()
def display_value_hist(self):
""" Display the value histogram using matplotlib.pyplot.hist. """
values = self._data["variable"]["values"]
#plt.hist(values)
hist = self._data["histogram"]
plt.hist(values, hist["buckets"])
plt.show()
def _parse_histogram_dict(self, hist_dict: dict) -> dict:
""" Parse histogram dictionary and return the result. """
data = hist_dict["data"] or [ ]
min_val = hist_dict["min"]
max_val = hist_dict["max"]
bucket_count = hist_dict["buckets"]
if not isinstance(data, list) or len(data) == 0:
if bucket_count >= 1:
step = (np.float128(max_val) - np.float128(min_val)) / (bucket_count - 1)
else:
step = 1
min_val = 0
max_val = -1
else:
step = data[0]["end"] - data[0]["start"]
buckets = np.linspace(min_val, max_val, bucket_count)
step = buckets[1] - buckets[0] if len(buckets) > 1 else step
counts = np.zeros(max(0, len(buckets) - 1))
for dat in data:
idx = int(round((dat["start"] - min_val) / step))
assert(idx >= 0 and idx + 1 < len(buckets))
assert(abs(dat["start"] - buckets[idx]) < step and \
abs(dat["end"] - buckets[idx + 1]) < step)
counts[idx] += dat["count"]
return {
"buckets": buckets,
"counts": counts
}
def _parse_stochastic_dict(self, stoch_dict: dict) -> dict:
""" Parse stochastic dictionary and return the result. """
return stoch_dict.copy()
def _parse_variable_dict(self, var_dict: dict) -> dict:
""" Parse variable dictionary and return the result. """
if "values" in var_dict:
values_encoded = var_dict["values"]
values_decoded = base64.b64decode(values_encoded)
if len(values_decoded) == 0:
if isinstance(var_dict["max"], list):
value_type = self.VT_PAIRED
else:
value_type = self.VT_SIMPLE
return {
"values": np.array([ ], dtype=np.float),
"values_type": value_type,
"count": var_dict["count"],
"min": var_dict["max"],
"max": var_dict["min"]
}
h5_data = io.BytesIO(values_decoded)
h5_file = h5py.File(h5_data, "r")
if "data" in h5_file:
# Basic list of values.
values = h5_file["data"][:]
count_val = len(values)
if count_val:
min_val = np.min(values)
max_val = np.max(values)
else:
values = np.array([ ], dtype=values.dtype)
min_val = np.zeros(1, dtype=values.dtype)[0]
max_val = np.zeros(1, dtype=values.dtype)[0]
values_type = self.VT_SIMPLE
elif "data.first" in h5_file and "data.second" in h5_file:
# Paired list with bucket value and delta.
first_values = h5_file["data.first"][:]
second_values = h5_file["data.second"][:]
values = np.dstack([ first_values, second_values ])
if len(first_values):
values = values.reshape(len(first_values), -1)
count_val = len(values)
min_val = values[np.argmin(second_values)]
max_val = values[np.argmax(second_values)]
else:
values = np.array([ ], dtype=values.dtype)
count_val = 0
min_val = np.zeros(1, dtype=values.dtype)[ 0 ]
max_val = np.zeros(1, dtype=values.dtype)[ 0 ]
values_type = self.VT_PAIRED
assert (count_val == var_dict["count"])
else:
values = [ ]
count_val = var_dict["count"]
min_val = var_dict["min"]
max_val = var_dict["max"]
return {
"values": values,
"values_type": values_type,
"count": count_val,
"min": min_val,
"max": max_val
}
def _parse_stat_dict(self, stat_dict: dict) -> dict:
""" Parse given statistic dictionary and produced parsed data. """
if "histogram" not in stat_dict:
raise RuntimeError("Missing 'histogram' in meta-data statistic dict!")
if "stochastic" not in stat_dict:
raise RuntimeError("Missing 'stochastic' in meta-data statistic dict!")
if "variable" not in stat_dict:
raise RuntimeError("Missing 'variable' in meta-data statistic dict!")
name = stat_dict.get("name", "")
description = stat_dict.get("description", "")
histogram = self._parse_histogram_dict(stat_dict["histogram"])
stochastic = self._parse_stochastic_dict(stat_dict["stochastic"])
variable = self._parse_variable_dict(stat_dict["variable"])
return {
"name": name,
"description": description,
"histogram": histogram,
"stochastic": stochastic,
"variable": variable
}
|
"""add unique constraint to software table
Revision ID: 931e55599de8
Revises: f1e4fcb38055
Create Date: 2021-10-28 10:54:15.305576
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '931e55599de8'
down_revision = 'f1e4fcb38055'
branch_labels = None
depends_on = None
def upgrade():
op.create_unique_constraint('uniq_software_01', 'software', ['name', 'version'])
def downgrade():
op.drop_constraint('uniq_software_01', 'software')
|
# temporary mapping algorithm of existing Canadian cases data to hospital occupancy rates
import json
import geocoder
import os
from dotenv import load_dotenv
import pandas as pd
from math import inf
from math import sin
from math import cos
from math import atan2
from math import sqrt
# firebase
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
# load environment variables
dotenv_path = '.env'
load_dotenv(dotenv_path)
# firebase setup
cred = credentials.Certificate(os.environ.get('GOOGLE_APPLICATION_CREDENTIALS'))
firebase_admin.initialize_app(cred)
db = firestore.client()
# google maps api key
maps_api_key = os.environ.get('GOOGLE_MAPS_API_KEY')
# flag to only activate a specific part of the script
part = 0
# ----- PART 1 -----
# create json file with regions and latlng and number of cases
if part == 1:
# lookup table of already known lat-lngs for each region (and counter for number of cases in that region)
# so I don't have to redo these geocoder calls
health_regions = {
'Toronto' : {'cases': 0, 'hospitals': [], 'location': [43.653226, -79.3831843]},
'Vancouver Coastal' : {'cases': 0, 'hospitals': [], 'location': [49.2827291, -123.1207375]},
'Middlesex-London' : {'cases': 0, 'hospitals': [], 'location': [42.9849233, -81.2452768]},
'Interior' : {'cases': 0, 'hospitals': [], 'location': [49.2835277, -123.1169394]},
'Fraser' : {'cases': 0, 'hospitals': [], 'location': [59.71711500000001, -135.049219]},
'Montréal' : {'cases': 0, 'hospitals': [], 'location': [45.5016889, -73.567256]},
'York' : {'cases': 0, 'hospitals': [], 'location': [43.6956787, -79.4503544]},
'Durham' : {'cases': 0, 'hospitals': [], 'location': [44.1763254, -80.8185006]},
'Laurentides' : {'cases': 0, 'hospitals': [], 'location': [46.6181619, -75.01814929999999]},
'Waterloo' : {'cases': 0, 'hospitals': [], 'location': [43.4642578, -80.5204096]},
'Peel' : {'cases': 0, 'hospitals': [], 'location': [43.6766398, -79.7848422]},
'Calgary' : {'cases': 0, 'hospitals': [], 'location': [51.04473309999999, -114.0718831]},
'Montérégie' : {'cases': 0, 'hospitals': [], 'location': [45.3290251, -72.81482489999999]},
'Edmonton' : {'cases': 0, 'hospitals': [], 'location': [53.5461245, -113.4938229]},
'Sudbury' : {'cases': 0, 'hospitals': [], 'location': [46.4917317, -80.99302899999999]},
'Ottawa' : {'cases': 0, 'hospitals': [], 'location': [45.4215296, -75.69719309999999]},
'Halton' : {'cases': 0, 'hospitals': [], 'location': [43.53253720000001, -79.87448359999999]},
'Mauricie' : {'cases': 0, 'hospitals': [], 'location': [46.6629657, -72.8512198]},
'Island' : {'cases': 0, 'hospitals': [], 'location': [50.39297269999999, -125.1186076]},
'Hamilton' : {'cases': 0, 'hospitals': [], 'location': [43.2557206, -79.8711024]},
'Simcoe Muskoka' : {'cases': 0, 'hospitals': [], 'location': [44.4716525, -79.8296743]},
'Saskatoon' : {'cases': 0, 'hospitals': [], 'location': [52.1332144, -106.6700458]},
'Winnipeg' : {'cases': 0, 'hospitals': [], 'location': [49.895136, -97.13837439999999]},
'Chaudière-Appalaches' : {'cases': 0, 'hospitals': [], 'location': [46.6981917, -71.2993195]},
'Estrie' : {'cases': 0, 'hospitals': [], 'location': [45.7903568, -70.9565703]},
'Zone 1 (Moncton area)' : {'cases': 0, 'hospitals': [], 'location': [46.0878165, -64.7782313]},
'Niagara' : {'cases': 0, 'hospitals': [], 'location': [43.0581645, -79.29021329999999]},
'Haliburton Kawartha Pineridge' : {'cases': 0, 'hospitals': [], 'location': [43.9683674, -78.2856082]},
'Huron Perth' : {'cases': 0, 'hospitals': [], 'location': [43.6416566, -81.6911559]},
'Northwestern' : {'cases': 0, 'hospitals': [], 'location': [64.8255441, -124.8457334]},
'Eastern' : {'cases': 0, 'hospitals': [], 'location': [43.6668105, -79.6419657]},
'Lanaudière' : {'cases': 0, 'hospitals': [], 'location': [46.1256124, -73.704151]},
'Estrie' : {'cases': 0, 'hospitals': [], 'location': [45.7903568, -70.9565703]},
'Prince Edward Island' : {'cases': 0, 'hospitals': [], 'location': [46.510712, -63.41681359999999]}
}
data_xl = pd.read_excel(r'data/cases.xlsx')
df = pd.DataFrame(data_xl, columns=['case_id', 'health_region', 'province'])
for case_id, region, prov in zip(df['case_id'], df['health_region'], df['province']):
try:
# insert new region into lookup table
if region not in health_regions.keys():
g = geocoder.google(region + ' ' + prov + ' Canada', key=maps_api_key)
health_regions[region] = {'cases': 0, 'location': g.latlng}
# increment # cases for that region
health_regions[region]['cases'] += 1
print(case_id)
except:
print('----- ERROR with case ID: {} -----'.format(case_id))
# some error trapping
if 'Not Reported' in health_regions:
del health_regions['Not Reported']
json_obj = json.dumps(health_regions, indent=4, sort_keys=True)
with open('data/health_regions_data.json', 'w') as file:
file.write(json_obj)
# ----- PART 2 -----
# add list of hospitals to regions
elif part == 2:
with open('data/health_regions_data.json', 'r') as file:
health_regions = json.load(file)
# get latlngs of each hospital
db_data = db.collection(u'hospitals').stream()
hospitals = {}
for hospital in db_data:
hospital = hospital.to_dict()
hospitals[hospital['name']] = [hospital['lat'], hospital['lng']]
print(hospital['name'])
# for each hospital, find closest region
for hospital, location in hospitals.items():
min_dist = inf
min_region = 'ERROR'
lat1 = location[0]
lng1 = location[1]
for region, val in health_regions.items():
lat2 = val['location'][0]
lng2 = val['location'][1]
# use Haversine Formula to caclulate great circle distance between two points
dlng = lng2 - lng1
dlat = lat2 - lat1
a = (sin(dlat/2))**2 + cos(lat1) * cos(lat2) * (sin(dlng/2))**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
if c < min_dist:
min_dist, min_region = c, region
# add hospital to regions data
if 'hospitals' in health_regions[min_region]:
health_regions[min_region]['hospitals'].append(hospital)
else:
health_regions[min_region]['hospitals'] = [hospital]
json_obj = json.dumps(health_regions, indent=4, sort_keys=True)
with open('data/health_regions_data.json', 'w') as file:
file.write(json_obj)
# ----- PART 3 -----
# determine occupancy for each hospital
elif part == 3:
db_data = db.collection(u'hospitals').stream()
hospitals = {}
# create a table to keep track of hospital bed data
# IMPORTANT: it is assumed that num_beds is greater than zero
for hospital in db_data:
hospital = hospital.to_dict()
hospitals[hospital['name']] = {'beds_occupied': 0, 'total_beds': int(hospital['num_beds'])}
print(hospital['name'])
with open('data/health_regions_data.json', 'r') as file:
health_regions = json.load(file)
print('\nGoing through each region:\n')
# for each region, keep assigning cases to hospitals until no more cases or hospital is full
for region, val in health_regions.items():
try:
queue = val['hospitals']
cases = val['cases']
i = 0
while len(queue) > 0 and cases > 0:
hospital = queue[i]
# if hospital still has empty beds, assign it a case
if hospitals[hospital]['beds_occupied'] < hospitals[hospital]['total_beds']:
hospitals[hospital]['beds_occupied'] += 1
cases -= 1
# if hospital is full, remove it from the queue
if hospitals[hospital]['beds_occupied'] == hospitals[hospital]['total_beds']:
del queue[i]
else:
i += 1
if i >= len(queue):
i = 0
print(region)
except:
print('----- ERROR with ' + region + ' -----')
# for each hospital, set its occupancy to beds occupied / total beds
for val in hospitals.values():
val['occupancy'] = round(100 * val['beds_occupied'] / val['total_beds'])
# store in json
json_obj = json.dumps(hospitals, indent=4, sort_keys=True)
with open('data/hospital_occupancy_estimates.json', 'w') as file:
file.write(json_obj)
# ----- PART 4 -----
# update db data
elif part == 4:
with open('data/hospital_occupancy_estimates.json', 'r') as file:
estimates = json.load(file)
for name, val in estimates.items():
try:
db.collection(u'hospitals').document(name).update({u'percent_occupancy': val['occupancy']})
print(name)
except:
print('----- ERROR with ' + name + ' -----') |
# Copyright (c) 2015-2021 Agalmic Ventures LLC (www.agalmicventures.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def extractPath(data, path):
"""
Indexes a JSON object with a period separated path.
:param data: dict
:param path: str
:return: a valid JSON value
"""
splitPath = path.split('.')
return extractSplitPath(data, splitPath)
def extractSplitPath(data, splitPath):
"""
Indexes a JSON object with list of string keys as a path.
:param data: dict
:param path: [str]
:return: a valid JSON value
"""
nextData = data.get(splitPath[0])
return nextData if len(splitPath) <= 1 or nextData is None else extractSplitPath(nextData, splitPath[1:])
|
# -*- coding: utf-8 -*-
from chai import Chai
from arrow import api, arrow, factory
class ModuleTests(Chai):
def test_get(self):
self.expect(api._factory.get).args(1, b=2).returns("result")
self.assertEqual(api.get(1, b=2), "result")
def test_utcnow(self):
self.expect(api._factory.utcnow).returns("utcnow")
self.assertEqual(api.utcnow(), "utcnow")
def test_now(self):
self.expect(api._factory.now).args("tz").returns("now")
self.assertEqual(api.now("tz"), "now")
def test_factory(self):
class MockCustomArrowClass(arrow.Arrow):
pass
result = api.factory(MockCustomArrowClass)
self.assertIsInstance(result, factory.ArrowFactory)
self.assertIsInstance(result.utcnow(), MockCustomArrowClass)
|
class SemanticException(Exception):
pass
|
import sys
import auth
import testList
import testServer
def main(argv):
# testList.novaList()
# testList.glanceList()
serverName = "will_test"
testServer.createServer(serverName)
testServer.deleteServer(serverName)
if __name__ == "__main__":
main(sys.argv[1:])
|
def unique(l):
used = set()
return [x for x in l if x not in used and (used.add(x) or True)]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 09:11:06 2020
@author: cgasca
"""
import csv
import numpy
import pandas as pd
def readCSV(filename):
raw_data = open(filename, 'rt')
reader = csv.reader(raw_data, delimiter=',', quoting=csv.QUOTE_NONE)
x = list(reader)
return x
def readXLSX(filename):
df = pd.read_excel (filename,encoding='utf-8')
df = df.values.tolist()
y = []
X = []
for i in df:
y.append(i[1])
X.append(i[0])
return X,y
def replaceNonAscii(string):
return "".join(i for i in string if ord(i)<128)
def writeXLSX(filename,new_row):
import openpyxl
wb = openpyxl.load_workbook(filename=filename)
ws = wb.get_sheet_by_name('Hoja1')
row = ws.max_row + 1
for entry in new_row:
if len(entry) < 12:
print("--------------")
print(entry)
else:
ws.cell(row=row, column = 1, value=entry)
ws.cell(row=row, column = 2, value="phishing")
row += 1
wb.save(filename)
def readXML(filename):
import quopri
from lxml import etree
with open(filename,encoding='utf-8') as f:
root = etree.parse(f)
emails = root.getroot().getchildren()
text = []
for e in emails:
aux = replaceNonAscii(e.text)
aux = quopri.decodestring(aux)
aux = aux.decode('latin-1')
text.append(aux)
return text
if __name__ == "__main__":
print("read")
#X,y = readXLSX('Libro1.xlsx')
text = readXML('spam.xml')
writeXLSX('corpus.xlsx',text)
|
print('hola mundo :D')
print(5-2)
print(10//4)
print(2**4)
print(4+3)
x = 10
mi_string ="hola" |
#!/usr/bin/python
################################################################################
# 2137f054-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "2137f054-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry BINARY
binary = cli.get_reg_binary(r'HKLM:\Software\Microsoft\Driver Signing', 'Policy')
# Output Lines
self.output = [r'HKLM:\Software\Microsoft\Driver Signing', ('Policy=')] + binary
if len(binary) == 1:
if int(binary[0], 0) == 1:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Driver Signing'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Microsoft\Driver Signing' -name 'Policy' -Type Binary -value 1")
|
#!/usr/bin/env python
"""
Continuous backup and recoverable trash can for Git
The aim of ``git-blackhole`` is to connect any of your repositories to
a single repository ("blackhole" repository) to which you can push any
commits --- WIP commits, branches no longer needed, and useless
stashes.
There are three main features of ``git-blackhole``:
1. **Continuous backup**. You can use ``git-blackhole`` to
continuously backup commits in background to a remote repository
(or actually any repository) called *blackhole repository*.
Run ``git blackhole init <url>`` and then setup ``post-commit``
hook to run ``git blackhole push``. See the help of ``git
blackhole init`` and ``git blackhole push`` for the details.
Note that blackhole repository at ``<url>`` can be used for
arbitrary number of local repositories. You just need to setup a
single repository once.
By combining with git-wip_ command, you can backup/share
uncommitted changes as well.
2. **Sharing local repository state**. Since ``git-blackhole`` can
push commits and the location of HEAD to the blackhole repository,
the state of a repository in one machine is accessible from other
machines.
For example, if you forget to push a commit from your desktop (to
the usual remote) but want to resume the work from your laptop,
``git blackhole warp`` would be helpful.
3. **Recoverable trash can**. Use ``git blackhole trash-branch`` and
``git blackhole trash-stashes`` to remove branches and stashes from
the local repository after sending them to the remote blackhole
repository. They are stored remotely as ordinary branches so that
you can recover them easily.
.. _git-wip: https://github.com/bartman/git-wip
"""
from __future__ import print_function
import os
import sys
from subprocess import check_output, CalledProcessError
__version__ = '0.1.1.dev1'
__author__ = 'Takafumi Arakaki'
__license__ = 'BSD-2-Clause' # SPDX short identifier
class BlackholeError(RuntimeError):
pass
def make_run(verbose, dry_run, check=True):
from subprocess import check_call, call
def run(*command, **kwds):
out = kwds.pop('out', False)
if verbose:
redirects = ()
if 'stdout' in kwds:
redirects = ('>', kwds['stdout'].name)
print(' '.join(command + redirects))
sys.stdout.flush()
if out:
return check_output(command, **kwds)
elif not dry_run:
return (check_call if check else call)(command, **kwds)
return run
def getprefix(type, info=None):
info = info or getrecinfo()
return '{type}/{host}/{repokey}'.format(
type=type,
**info)
def getrepopath():
repo = check_output(['git', 'rev-parse', '--show-toplevel'])
repo = repo.decode().rstrip()
relpath = os.path.relpath(repo, os.path.expanduser('~'))
return repo, relpath
def getrecinfo(remote='blackhole'): # TODO: make `remote` mandatory
from socket import gethostname
repo, relpath = getrepopath()
return dict(
host=gethostname(),
repo=repo,
repokey=getconfig('blackhole.{}.repokey'.format(remote)) or relpath,
git_blackhole=__version__)
def getbranches():
checkedout_branches = []
branches = []
out = check_output(
["git", "branch", "--list", "--format=%(refname:short) %(worktreepath)"]
).decode()
for line in out.splitlines():
branch_worktree = line.split(maxsplit=1)
br = branch_worktree[0]
branches.append(br)
if len(branch_worktree) == 2:
checkedout_branches.append(br)
return branches, checkedout_branches
def getrefs():
out = check_output(['git', 'show-ref']).decode()
for line in out.splitlines():
yield line.rstrip().split(None, 1)
def getrefnames():
return [ref for (_sha1, ref) in getrefs()]
def getconfig(name, aslist=False):
try:
out = check_output(['git', 'config', '--null'] + (
['--get-all'] if aslist else ['--get']
) + [name]).decode()
except CalledProcessError as err:
if err.returncode == 1:
return None
else:
raise
return out.split('\0')[:-1] if aslist else out.rstrip('\0')
def check_communicate(cmd, input, **kwds):
"""
Run ``Popen(cmd, **kwds).communicate(input)`` and bark on an error.
>>> check_communicate(['cat'], 'hey') == b'hey'
True
"""
from subprocess import Popen, PIPE
if 'stderr' not in kwds:
kwds['stderr'] = PIPE
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, **kwds)
if input is not None and not isinstance(input, bytes):
input = input.encode()
(stdout, stderr) = proc.communicate(input)
if proc.returncode != 0:
output = stdout if stderr is None else stderr
raise CalledProcessError(proc.returncode, cmd, output)
return stdout
def git_annot_commit(message, parent):
"""
Make a commit with `message` on top of `parent` commit.
This is "annotation commit" in a sense that it does not add change
to `parent` version. It just adds a commit with a message which
can contain additional information (annotation) about `parent`
commit.
See:
* https://git-scm.com/book/en/v2/Git-Internals-Git-Objects
* man git-rev-parse
* man git-commit-tree
"""
tree = check_output(['git', 'rev-parse', '{0}^{{tree}}'.format(parent)])
tree = tree.decode().rstrip('\n')
rev = check_communicate(['git', 'commit-tree', tree, '-p', parent],
message)
return rev.decode().rstrip('\n')
def git_json_commit(heading, obj, parent):
import json
return git_annot_commit(
"GIT-BLACKHOLE: {}\n\nGIT-BLACKHOLE-JSON:\n{}"
.format(heading, json.dumps(obj)),
parent)
def parse_json_message(message):
import json
heading, _, cookie, rest = message.split('\n', 3)
preh = 'GIT-BLACKHOLE: '
assert heading.startswith(preh)
assert cookie == 'GIT-BLACKHOLE-JSON:'
return (heading[len(preh):], json.loads(rest))
def cmd_push(remote, force=False, verify=None):
cmd = ['git', 'push']
if force:
cmd.append('--force')
if verify is True:
cmd.append('--verify')
elif verify is False:
cmd.append('--no-verify')
cmd.append(remote)
return cmd
def trash_commitish(commitish, remote, info, headingtemp,
verbose, dry_run, **kwds):
"""
Push `commitish` to `remote` trash.
"""
run = make_run(verbose, dry_run)
prefix = getprefix('trash')
rev = check_output(['git', 'rev-parse', commitish]).strip()
url = getconfig('remote.{0}.url'.format(remote))
if url is None:
raise BlackholeError(
"Cannot find blackhole remote URL.\n"
"Please run `git blackhole init` first.\n"
"(Note: remote.{}.url is not configured.)"
.format(remote))
info = dict(info, **getrecinfo())
heading = headingtemp.format(**info)
rev = git_json_commit(heading, info, commitish)
refspec = '{0}:refs/heads/{1}/{2}/{3}'.format(rev, prefix,
rev[:2], rev[2:])
run(*cmd_push(url, **kwds) + [refspec])
return refspec
def trashinfo(rev):
out = check_output(['git', 'show', '--format=format:%B', rev])
heading, obj = parse_json_message(out.decode())
rev0 = check_output(['git', 'rev-parse', rev + '^'])
return dict(obj, heading=heading, rev_info=rev, rev=rev0.decode().strip())
def gettrashes():
out = check_output(['git', 'rev-parse', '--glob=refs/bh/trash/*'])
revs = out.decode().splitlines()
return list(map(trashinfo, revs))
def show_trashes(trashes, verbose):
for trash in trashes:
print(trash['rev'])
if verbose:
keys = set(trash) - {'heading', 'rev', 'git_blackhole'}
for k in keys:
print(' ', k, ': ', trash[k], sep='')
def git_stash_list():
output = check_output(
["git", "stash", "list", "--format=%gD %H"])
# man git-log > PRETTY FORMATS
return output.decode().splitlines()
def parse_stash(line):
"""
Parse a line of `git_stash_list` output.
>>> parse_stash('refs/stash@{0} 29453bf380ff2e3aabf932a08287a162bc12d218')
(0, 'refs/stash@{0}', '29453bf380ff2e3aabf932a08287a162bc12d218')
"""
(reflog_selector, commit_hash) = line.split()
num = int(reflog_selector.lstrip('refs/stash@{').rstrip('}'))
return (num, reflog_selector, commit_hash)
def parse_range(stash_range):
"""
Parse stash range syntax
>>> in_range = parse_range('0, 3-5, 8-')
>>> for i in range(11):
... print('{0} {1}'.format(i, in_range(i)))
0 True
1 False
2 False
3 True
4 True
5 True
6 False
7 False
8 True
9 True
10 True
"""
def minmax(raw):
if '-' in raw:
(low, high) = map(str.strip, raw.split('-'))
return (int(low) if low else 0,
int(high) if high else None)
else:
return (int(raw), int(raw))
def in_range(num):
for (low, high) in ranges:
if high is None and low <= num:
return True
elif low <= num <= high:
return True
return False
if stash_range:
ranges = list(map(minmax, stash_range.split(',')))
return in_range
else:
return lambda _: True
def refspecs_for_stashes(num, info=None):
"""
Compile refspecs for stashes and and return as a list strings.
>>> refspecs_for_stashes(3, info=dict(
... host='myhost',
... repokey='local/repo',
... )) # doctest: +NORMALIZE_WHITESPACE
['stash@{0}:refs/heads/stash/myhost/local/repo/0',
'stash@{1}:refs/heads/stash/myhost/local/repo/1',
'stash@{2}:refs/heads/stash/myhost/local/repo/2']
"""
prefix = getprefix('stash', info=info)
tmpl = r'stash@{{{0}}}:refs/heads/' + prefix + '/{0}'
return list(map(tmpl.format, range(num)))
def refspecs_from_globs(globs, refs=None, info=None):
"""
Compile refspecs for stashes and and return as a list strings.
>>> refspecs_from_globs(
... ['refs/wip/*'],
... refs=[
... 'refs/heads/master',
... 'refs/remotes/wip/master',
... 'refs/wip/master',
... ],
... info=dict(
... host='myhost',
... repokey='local/repo',
... )) # doctest: +NORMALIZE_WHITESPACE
['refs/wip/master:refs/wip/myhost/local/repo/master']
"""
import fnmatch
info = info or getrecinfo()
allrefs = refs or getrefnames()
refs = []
for pattern in globs:
refs.extend(fnmatch.filter(allrefs, pattern))
refs = fnmatch.filter(refs, 'refs/*')
refs = [r for r in refs if not r.startswith('refs/remotes/')]
refspecs = []
for r in refs:
(_, type, rest) = r.split('/', 2)
prefix = getprefix(type, info=info)
refspecs.append('{0}:refs/{1}/{2}'.format(r, prefix, rest))
return refspecs
def mangle_relpath(relpath):
"""
Mangle a path `relpath` so that it can be used for git branch name.
>>> mangle_relpath('spam/egg')
'spam/egg'
>>> mangle_relpath('spam/.egg.spam')
'spam/_egg.spam'
>>> mangle_relpath('.spam/egg')
'_spam/egg'
"""
repokey = relpath.replace(os.path.sep + '.', os.path.sep + '_')
if repokey.startswith('.'):
repokey = '_' + repokey[1:]
return repokey
def is_bad_branch_name(branch):
return os.path.sep + '.' in branch
def cli_init(name, url, verbose, dry_run, repokey=None, mangle='default',
_prefix=None):
"""
Add blackhole remote at `url` with `name`.
This command runs ``git remote add <name> <url>`` and configure
appropriate `remote.<name>.fetch` and `remote.<name>.pushe`
properties so that remote blackhole repository at `url` acts
as if it is a yet another remote repository.
To be more precise, each local branch is related to the branch at
the blackhole remote with the prefix ``heads/$HOST/$REPOKEY/``
where ``$HOST`` is the name of local machine and ``$REPOKEY`` is
the path of the repository relative to ``$HOME``.
"""
run = make_run(verbose, dry_run)
info = None
if (not repokey) and (
mangle == 'always' or
(mangle == 'auto' and is_bad_branch_name(getprefix('heads')))):
_, relpath = getrepopath()
repokey = mangle_relpath(relpath)
if repokey:
info = getrecinfo()
info['repokey'] = repokey
prefix = _prefix or getprefix('heads', info=info)
if is_bad_branch_name(prefix):
print('git blackhole cannot be configured for repositories',
'under a hidden directory (starting with ".")')
return 1
run('git', 'remote', 'add', name, url)
run('git', 'config', 'remote.{0}.fetch'.format(name),
'+refs/heads/{0}/*:refs/remotes/{1}/*' .format(prefix, name))
run('git', 'config', 'remote.{0}.push'.format(name),
'+refs/heads/*:{0}/*'.format(prefix))
if repokey:
run('git', 'config', 'blackhole.{}.repokey'.format(name), repokey)
def cli_warp(host, repokey, name, remote, url, **kwds):
"""
Peek into other repositories through the blackhole.
"""
if not (host or repokey):
print('need HOST or --repokey=REPOKEY')
return 2
if not url:
url = getconfig('remote.{0}.url'.format(remote))
if url is None:
print('need --url in an uninitialized repository')
return 1
info = getrecinfo()
info.update(
host=host or info['host'],
repokey=repokey or info['repokey'],
)
prefix = getprefix('heads', info)
if not name:
name = 'bh_' + host
return cli_init(_prefix=prefix, name=name, url=url, **kwds)
def cli_push(verbose, dry_run, ref_globs, remote, skip_if_no_blackhole,
**kwds):
"""
Push branches and HEAD forcefully to blackhole `remote`.
Note that local HEAD is pushed to the remote branch named
``heads/$HOST/$REPOKEY/HEAD`` (see help of ``git blackhole init``)
instead of real remote HEAD. This way, if the blackhole remote is
shared with other machine, you can recover the HEAD at ``$HOST``.
It is useful to call this command from the ``post-commit`` hook::
nohup git blackhole push --no-verify &> /dev/null &
See also `githooks(5)`.
To push revisions created by git-wip_ command, add option
``--ref-glob='refs/wip/*'``.
"""
if getconfig('remote.{0}.url'.format(remote)) is None:
if skip_if_no_blackhole:
return
else:
print("git blackhole is not configured.")
print("Run: git blackhole init URL")
return 1
run = make_run(verbose, dry_run, check=False)
prefix = getprefix('heads')
branches, _checkedout_branches = getbranches()
# Build "git push" command options:
cmd = cmd_push(remote=remote, force=True, **kwds)
cmd.extend(branches)
cmd.extend(refspecs_for_stashes(len(git_stash_list())))
cmd.extend(refspecs_from_globs(ref_globs))
# Explicitly specify destination (HEAD:HEAD didn't work):
cmd.append('HEAD:refs/heads/{0}/HEAD'.format(prefix))
return run(*cmd)
def cli_trash_branch(branches, verbose, dry_run, **kwds):
"""
[EXPERIMENTAL] Save `branch` in blackhole `remote` before deletion.
The `branch` is pushed to the branch of the blackhole `remote`
named ``trash/$HOST/$REPOKEY/$SHA1[:2]/$SHA1[2:]`` where ``$HOST``
is the name of local machine, ``$REPOKEY`` is the path of the
repository relative to ``$HOME``, and ``$SHA1`` is the revision of
the commit. (To be more precise, ``$SHA`` is the revision of the
commit recording the revision of `branch` and some meta
information).
Use ``git blackhole fetch-trash`` to retrieve all trashes from
remote and store them locally. Commands ``git blackhole
ls-branch`` and ``git blackhole show-branch`` can be used to list
and show trash commits.
.. WARNING:: Commands to navigate through trashes (e.g., ``git
blackhole show-branch``) are still preliminary. Furthermore,
how trash metadata is stored may change in the future.
However, since trashes are ordinary git branches in remote,
they can be dealt with standard git commands.
"""
"""
- FIXME: Maybe I should remove ``$HOST/$REPOKEY`` part and use
branch named ``trash/$REV[:2]/$REV[2:]``, since the JSON has all
the info I need.
"""
run = make_run(verbose, dry_run)
_branches, checkedout_branches = getbranches()
final_code = None
for branch in branches:
code = trash_branch(
run, checkedout_branches, branch, verbose=verbose, dry_run=dry_run, **kwds
)
if code:
final_code = code
return final_code
def trash_branch(
run, checkedout_branches, branch, verbose, dry_run, remote, remove_upstream, **kwds
):
if branch in checkedout_branches:
print("Cannot trash the branch '{0}' which you are currently on."
.format(branch))
return 1
if remove_upstream:
upstream_repo = getconfig('branch.{0}.remote'.format(branch))
upstream_branch = getconfig('branch.{0}.merge'.format(branch))
trash_commitish(
branch, remote, dict(command='trash-branch', branch=branch),
'Trash branch "{branch}" at {host}:{repo}',
verbose, dry_run, **kwds)
run('git', 'branch', '--delete', '--force', branch)
if remove_upstream:
if upstream_repo is None:
print('Not removing upstream branch as upstream is'
' not configured.')
else:
run('git', 'push', upstream_repo, ':' + upstream_branch)
def cli_trash_stash(remote, stash_range, keep_stashes,
verbose, dry_run, **kwds):
"""
[EXPERIMENTAL] Save stashes in blackhole `remote` before deletion.
It works as (almost) the same way as ``git blackhole trash-branch``.
Several stashes can be specified in `stash_range`. It takes
single numbers (e.g., 3) and ranges (e.g., 3-5 or 8-) separated by
commas. Each range is in the form ``x-y`` which selects stashes
``x, x+1, x+2, ..., y``. The upper limit ``y`` can be omitted,
meaning "until the last stash". For example, when you have
stashes 0 to 10, ``git blackhole trash-stash 0,3-5,8-`` removes
stashes 0, 3, 4, 5, 8, 9, and 10.
"""
run = make_run(verbose, dry_run)
in_range = parse_range(stash_range)
stashes = [s for s in map(parse_stash, git_stash_list())
if in_range(s[0])]
if not stashes:
print('No stash is found.')
return
# Using "git stash drop stash@{SHA1}" is unreliable because
# sometime git confuses SHA1 with date (e.g., SHA1 could starts
# with "1d"). So "stash@{N}" must be used. However, "N" would
# change if newer stashes are popped. Hence `reversed`.
for (num, raw, sha1) in reversed(stashes):
if in_range(num):
stash = 'stash@{{{0}}}'.format(num)
trash_commitish(
sha1, remote, dict(command='trash-stash'),
'Trash a stash at {host}:{repo}',
verbose, dry_run, **kwds)
if not keep_stashes:
run('git', 'stash', 'drop', stash)
def cli_fetch_trash(remote, verbose, dry_run):
"""
Fetch trashes from remote to ``refs/bh/trash/``.
"""
run = make_run(verbose, dry_run)
info = dict(getrecinfo(), host='*')
prefix = getprefix('trash', info)
out = run('git', 'ls-remote', 'blackhole',
'refs/heads/' + prefix + '/*', out=True)
refs = [l.split(None, 1)[1] for l in out.decode().splitlines()]
cmd = ['git', 'fetch']
if verbose:
cmd.append('--verbose')
cmd.append(remote)
cmd.append('--')
cmd.extend(
'{0}:refs/bh/trash/{1[0]}/{1[1]}'.format(r, r.rsplit('/', 2)[-2:])
for r in refs
)
run(*cmd)
def cli_ls_trash(verbose, dry_run):
"""
List trashes fetched by ``git blackhole fetch-trash``.
"""
show_trashes(gettrashes(), verbose)
def cli_show_trash(verbose, dry_run):
"""
Run ``git show`` on trashes fetched by ``git blackhole fetch-trash``.
"""
revs = [t['rev'] for t in gettrashes()]
run = make_run(verbose, dry_run)
run('git', 'show', *revs)
def cli_rm_local_trash(verbose, dry_run, refs, all):
"""
Remove trashes fetched by ``git blackhole fetch-trash``.
"""
run = make_run(verbose, dry_run)
if all:
out = check_output(['git', 'rev-parse', '--symbolic',
'--glob=refs/bh/trash/*'])
refs = out.decode().splitlines()
for r in refs:
run('git', 'update-ref', '-d', r)
def make_parser(doc=__doc__):
import argparse
class FormatterClass(argparse.RawDescriptionHelpFormatter,
argparse.ArgumentDefaultsHelpFormatter):
pass
parser = argparse.ArgumentParser(
formatter_class=FormatterClass,
description=doc)
parser.add_argument(
'--version', action='version',
version='%(prog)s {} from {}'.format(__version__, __file__))
parser.add_argument('--debug', default=False, action='store_true')
subparsers = parser.add_subparsers()
def subp(command, func):
doc = func.__doc__
title = None
for title in filter(None, map(str.strip, (doc or '').splitlines())):
break
p = subparsers.add_parser(
command,
formatter_class=FormatterClass,
help=title,
description=doc)
p.set_defaults(func=func)
p.add_argument(
'--verbose', '-v', default=False, action='store_true',
help='print git commands to run')
p.add_argument(
'--dry-run', '-n', default=False, action='store_true',
help='do nothing when given. Use it with --verbose to see '
'what is going to happen.')
return p
def push_common(p):
p.add_argument('--verify', default=None, action='store_true',
help='passed to git-push')
p.add_argument('--no-verify', dest='verify', action='store_false',
help='passed to git-push')
p = subp('init', cli_init)
p.add_argument('--name', default='blackhole',
help='name of the remote blackhole repository')
g = p.add_mutually_exclusive_group()
g.add_argument('--mangle', nargs='?', choices=['never', 'always', 'auto'],
const='auto', default='never',
help='Replace a dot right after the path separator'
' (hidden directories) to underscore "_" and use it as'
' REPOKEY.'
' --mangle[=auto] means to do it only when necessary.'
' --mangle=always means to always set REPOKEY.'
' --mangle=never means no replacement and fail with an'
' error for hidden directories.')
g.add_argument('--repokey',
help='Set arbitrary REPOKEY for the location of this'
' repository in the blackhole repository.')
p.add_argument('url',
help='URL of the remote blackhole repository')
p = subp('warp', cli_warp)
p.add_argument('--name', default='',
help='Name of the repository at <HOST>:<REPOKEY>, '
' accessed through the blackhole.'
' Set to "bh_<HOST>" if empty.')
p.add_argument('--url',
help='URL of the remote blackhole repository'
' Use remote.<REMOTE>.url if not given.')
p.add_argument('--remote', default='blackhole',
help='name of the remote blackhole repository')
p.add_argument('--repokey',
help='The repository relative to the $HOME at <HOST>.'
' Use current repository root if empty.')
p.add_argument('host', default='', metavar='HOST', nargs='?',
help='The host name of the repository.'
' Use current host name if empty.')
p = subp('push', cli_push)
push_common(p)
p.add_argument('--remote', default='blackhole',
help='name of the remote blackhole repository')
# FIXME: Stop hard-coding remote name. Use git config system to
# set default.
p.add_argument('--ref-glob', action='append', default=[],
dest='ref_globs',
help='add glob patterns to be pushed, e.g., wip/*')
p.add_argument('--ignore-error', action='store_true',
help='quick with code 0 on error')
p.add_argument('--skip-if-no-blackhole', action='store_true',
help='do nothing if git blackhole is not configured')
p = subp('trash-branch', cli_trash_branch)
push_common(p)
p.add_argument('branches', metavar='branch', nargs='+',
help='branch to be removed')
p.add_argument('--remote', default='blackhole', # FIXME: see above
help='name of the remote blackhole repository')
p.add_argument('--remove-upstream', '-u', action='store_true',
help='remove branch in upstream repository.'
' i.e., remove branch.<branch>.merge'
' at branch.<branch>.remote. ignored if no remote'
' is set.')
p = subp('trash-stash', cli_trash_stash)
push_common(p)
p.add_argument('--remote', default='blackhole', # FIXME: see above
help='name of the remote blackhole repository')
p.add_argument(
'stash_range',
help='stashes to trash. It is comma-separated low-high range'
' (inclusive). e.g.: 0,3-5,8-')
p.add_argument(
'--keep-stashes', '-k', default=False, action='store_true',
help='when this option is given, do not remove local stashes.')
p = subp('fetch-trash', cli_fetch_trash)
p.add_argument('--remote', default='blackhole', # FIXME: see above
help='name of the remote blackhole repository')
p = subp('ls-trash', cli_ls_trash)
p = subp('show-trash', cli_show_trash)
p = subp('rm-local-trash', cli_rm_local_trash)
p.add_argument('--all', '-a', action='store_true',
help='remove all local copy of trashes')
p.add_argument('refs', metavar='ref', nargs='*',
help='trash refs to be removed.')
return parser
def main(args=None):
parser = make_parser()
ns = parser.parse_args(args)
debug = ns.__dict__.pop('debug')
ignore_error = ns.__dict__.pop('ignore_error', False)
try:
# FIXME: stop returning error code from cli_* functions
code = (lambda func, **kwds: func(**kwds))(**vars(ns))
if ignore_error:
print("ignoring the error")
return
sys.exit(code)
except BlackholeError as err:
if debug:
raise
print(err)
sys.exit(1)
except CalledProcessError as err:
if debug:
raise
if ignore_error:
print("ignoring the error")
return
sys.exit(err.returncode + 122)
if __name__ == '__main__':
main()
|
while True:
n=int(input('Quer ver a tabuada de qual valor? '))
if n < 0:
print('Adeus')
break
print('-='*30)
for c in range(1, 11):
print(f'{n} x {c} = {c*n}')
print('-='*30)
|
"""
Module to create topo for this example.
Piecewise linear canonical beach and solitary wave.
"""
from pyclaw.geotools import topotools
import numpy as np
x0 = 19.85
slope = 1./x0
xs = 22.
H = 0.0185
nxpoints = 91
nypoints = 4
xlower = -20.e0
xupper = 60.e0
dx = (xupper-xlower)/(nxpoints-1)
ylower = 0.e0
yupper = (nypoints-1)*dx
def maketopo():
"""
Output topography file for the entire domain
"""
outfile= "beach.topotype2"
topotools.topo2writer(outfile,topo,xlower,xupper,ylower,yupper,nxpoints,nypoints)
def topo(x,y):
"""
Piecewise linear beach
"""
z = np.where(x<x0, -slope*x, -1.)
return z
if __name__=='__main__':
maketopo()
|
import random
import re
import sys
import bs4
import discord
import requests
from discord.ext import commands # Bot Commands Frameworkのインポート
# from datetime import datetime
sys.path.append('../')
class Nirezi(commands.Cog, name="nirezi"):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
client = self.bot
# local = self.bot.local
mcs = message.channel.send
mention = message.author.mention
# now = datetime.now()
# time = now.strftime("%Y/%m/%d %H:%M:%S")
if message.author.bot: # botのメッセージなら無視する
return
if isinstance(message.channel, discord.DMChannel):
return
if message.content.startswith("!"):
return
if message.guild.id == 621326525521723414: # 2レジ鯖
# unnei_ca = [621334345579364372, 621326525521723418, 649193418492215306] # お知らせ、はじめに、logs
# unnei_ch = [621330963938410496, 625591106989588480, 643040530921553940] # 運営用、運営用コマンド、やることリスト
# testyou = [627867724541853716, 632944517934481409]
# log_ch = client.get_channel(640587255332732938) # log用ch
if message.channel.id == 663636102145507330: # mcチャットのとこは弾く
return
if message.channel.id == 621328380620701736: # mcid申請のch
mcid_a = f'{message.content}'.replace('\\', '')
p = re.compile(r'^[a-zA-Z0-9_]+$')
if p.fullmatch(message.content):
mcid = mcid_a.lower()
url = f"https://ranking-gigantic.seichi.click/player/{mcid}"
try:
res = requests.get(url)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, "html.parser")
td = soup.td
if f'{mcid}' in f'{td}':
mcid_log_ch = client.get_channel(660809650027102209)
async for msg in mcid_log_ch.history():
mcid_log = msg.content[19:]
if mcid_log == mcid_a:
faild = discord.Embed(description=f"{message.author}さん\n{mcid_a}はすでに報告されています、もしこれがバグなら2レジまで報告してください",
color=0xff0000)
await mcs(embed=faild)
return
role = discord.utils.get(message.guild.roles, id=672006791474708490)
rinnzi = discord.utils.get(message.guild.roles, id=660825080602820618)
emoji = ['👍', '🙆']
await message.author.add_roles(role)
await message.author.add_roles(rinnzi)
await message.add_reaction(random.choice(emoji))
color = [0x3efd73, 0xfb407c, 0xf3f915, 0xc60000, 0xed8f10, 0xeacf13, 0x9d9d9d, 0xebb652, 0x4259fb, 0x1e90ff]
embed_mcid = discord.Embed(description=f'{message.author.display_name}のMCIDの報告を確認したよ!',
color=random.choice(color))
embed_mcid.add_field(name="MCID", value=mcid_a)
embed_mcid.set_author(name=message.author, icon_url=message.author.avatar_url, ) # ユーザー名+ID,アバターをセット
channel = client.get_channel(646691005030203410)
await channel.send(embed=embed_mcid)
await mcid_log_ch.send(f"{message.author.id} {mcid_a}")
else:
embed = discord.Embed(
description=f'{message.author} さん。\n入力されたMCIDは実在しないか、又はまだ一度も整地鯖にログインしていません。\n続けて間違った入力を行うと規定によりBANの対象になることがあります。',
color=0xff0000)
await message.channel.send(embed=embed)
except requests.exceptions.HTTPError:
await message.channel.send('requests.exceptions.HTTPError')
else:
embed = discord.Embed(
description="MCIDに使用できない文字が含まれています'\n続けて間違った入力を行うと規定によりBANの対象になることがあります。",
color=0xff0000)
await message.channel.send(embed=embed)
if message.content.startswith("/mcid"):
id = int(message.content[5:])
user = client.get_user(id)
mcid_reported = f"{user}さんのmcid\n"
kazu = 0
mcid_log_ch = client.get_channel(660809650027102209)
if user is not None:
flag = False
async for msg in mcid_log_ch.history():
mcid_log = await mcid_log_ch.fetch_message(msg.id)
if mcid_log.content.startswith(str(id)):
mcid_log2 = mcid_log.content[19:]
mcid_reported += f"{mcid_log2}\n"
kazu += 1
flag = True
if not flag:
await mcs(f"{user}さんはまだmcidを報告していません")
else:
await mcs(f"{mcid_reported}以上{kazu}個のmcidが報告されています")
else:
await mcs("その方はこのサーバーにいません")
if message.content == "/join":
if message.channel.id == 672010326077734922:
role = discord.utils.get(message.guild.roles, id=621329653763932160)
await message.author.add_roles(role)
await mcs(f"{mention}役職を付与しました")
if "discord.gg" in message.content:
category_list = [621326525521723418, 621334345579364372, 621330415348613160, 621330763089969152, 649193418492215306]
if message.channel.id == 621328600972525578 or message.channel.category_id in category_list:
pass
else:
await message.delete()
await mcs(f"{mention}\n指定されたチャンネル以外で招待リンクを貼る行為は禁止されています")
await mcs("削除しました")
if message.channel.id == 658686103276093440: # dm用のチャンネル
user_id = message.content[:18]
user = client.get_user(int(user_id))
content = message.content[18:]
await user.send(content)
if message.content == "/delmsg": # メッセージ削除
if discord.utils.get(message.author.roles, id=621326896554311700): # 2rz以外弾く
await message.channel.purge()
else:
await mcs("何様のつもり?")
if message.content.startswith("/delmsg"):
kazu = len(message.content[7:])
if discord.utils.get(message.author.roles, id=621326896554311700):
if kazu >= 1:
await message.channel.purge(limit=kazu)
if message.content == "/test":
await mcs("say hello!")
def check(m):
return m.content == "hello" and m.channel == message.channel
msg = await client.wait_for("message", check=check)
await mcs("hello!")
def setup(bot):
bot.add_cog(Nirezi(bot))
|
#!/usr/bin/env python3
#from .logging import *
from BruteLoops.logging import *
from . import logging
from .brute_time import BruteTime
from . import sql
from .config import Config
from .db_manager import *
from sqlalchemy.orm.session import close_all_sessions
from pathlib import Path
from uuid import uuid4
from collections import namedtuple
from copy import deepcopy
from time import sleep,time
from types import FunctionType, MethodType
import traceback
import re
import signal
import logging
from time import time
UNKNOWN_PRIORITIZED_USERNAME_MSG = \
'Prioritized username value supplied ' \
'during configuration that does not a' \
'ppear in the database. Insert this v' \
'alue or remove it from the configura' \
'tion: {username}'
UNKNOWN_PRIORITIZED_PASSWORD_MSG = \
'Prioritized password value supplied ' \
'during configuration that does not a' \
'ppear in the database. Insert this v' \
'alue or remove it from the configura' \
'tion: {password}'
logger = None
class BruteForcer:
'''Base object from which all other brute forcers will inherit.
Provides all basic functionality, less brute force logic.
'''
def __init__(self, config, use_billiard=False):
'''Initialize the BruteForcer object, including processes.
- config - A BruteLoops.config.Config object providing all
configuration parameters to proceed with the attack.
'''
if not config.validated: config.validate()
# DB SESSION FOR MAIN PROCESS
self.main_db_sess = config.session_maker.new()
self.handler_db_sess = config.session_maker.new()
# ==============================
# BASIC CONFIGURATION PARAMETERS
# ==============================
self.config = config
self.presults = []
self.pool = None
self.attack = None
self.logger = getLogger('BruteLoops.BruteForcer',
config.log_level, config.log_valid, config.log_invalid,
config.log_general, config.log_file, config.log_stdout,
config.log_stderr)
self.logger.log(
GENERAL_EVENTS,
f'Initializing {config.process_count} process(es)'
)
# ===================================
# LOG ATTACK CONFIGURATION PARAMETERS
# ===================================
self.logger.log(GENERAL_EVENTS,
'Logging attack configuration parameters')
config_attrs = [
'authentication_jitter',
'max_auth_jitter',
'max_auth_tries',
'stop_on_valid',
'db_file',
'log_file',
'log_valid',
'log_invalid',
'log_general',
'log_stdout',
'log_stderr'
]
for attr in config_attrs:
self.logger.log(GENERAL_EVENTS,
f'Config Parameter -- {attr}: '+str(getattr(self.config,attr)))
if hasattr(self.config.authentication_callback, 'callback_name'):
self.logger.log(GENERAL_EVENTS,
f'Config Parameter -- callback_name: '+ \
getattr(self.config.authentication_callback,
'callback_name'))
# =============================================================
# REASSIGN DEFAULT SIGNAL HANDLER AND INITIALIZE A PROCESS POOL
# =============================================================
original_sigint_handler = signal.signal(signal.SIGINT,signal.SIG_IGN)
if use_billiard:
import billiard
self.pool = billiard.Pool(processes=config.process_count)
else:
from multiprocessing.pool import Pool
self.pool = Pool(processes=config.process_count)
if not KeyboardInterrupt in self.config.exception_handlers:
def handler(sig,exception):
print('SIGINT Captured -- Shutting down ' \
'attack\n')
self.shutdown()
print('Exiting')
exit(sig)
self.config.exception_handlers[KeyboardInterrupt] = handler
if KeyboardInterrupt in self.config.exception_handlers:
sigint_handler = config.exception_handlers[KeyboardInterrupt]
sigint_class = sigint_handler.__class__
if sigint_class != MethodType and sigint_class != FunctionType:
assert '__call__' in sigint_handler.__dict__, (
'Exception handler must implement __call__'
)
call_class = sigint_handler.__getattribute__('__call__').__class__
assert call_class == FunctionType or call_class == MethodType, (
'__call__ must be of type FunctionType or MethodType'
)
signal.signal(signal.SIGINT, sigint_handler)
else: signal.signal(signal.SIGINT, original_sigint_handler)
# =================
# HANDLE THE ATTACK
# =================
current_time = BruteTime.current_time(format=str)
self.logger.log(GENERAL_EVENTS,
f'Beginning attack: {current_time}')
# CREATE A NEW ATTACK
self.attack = sql.Attack(start_time=BruteTime.current_time())
self.main_db_sess.add(self.attack)
self.main_db_sess.commit()
self.config = config
# Realign future jitter times with the current configuration
self.realign_future_time()
def handle_outputs(self, outputs):
'''Handle outputs from the authentication callback. It expects a list of
tuples/lists conforming to the following format:
```
output_list = [
(<OUTCOME>,<USERNAME>,<PASSWORD>),
(<OUTCOME>,<USERNAME>,<PASSWORD>)
]
```
In the structure below:
- `OUTCOME` - is an integer value indicating if authentication was
successful (1 for true, 0 for false)
- `USERNAME` - string value of the username
- `PASSWORD` - string value of the password
'''
# ==================================================
# DETERMINE AND HANDLE VALID_CREDENTIALS CREDENTIALS
# ==================================================
recovered = False
for output in outputs:
# ===============================
# QUERY FOR THE TARGET CREDENTIAL
# ===============================
credential = self.handler_db_sess \
.query(sql.Credential) \
.join(sql.Username) \
.join(sql.Password) \
.filter(
sql.Username.value == output[1],
sql.Password.value == output[2],
sql.Username.recovered == False) \
.first()
if not credential: continue
credential.guessed=True
# ======================
# HANDLE THE CREDENTIALS
# ======================
cred = f'{output[1]}:{output[2]}'
# Handle valid credentials
if output[0]:
recovered = True
self.logger.log(VALID_CREDENTIALS,cred)
# Update username to "recovered"
credential.username.recovered=True
# Update the credential to valid
credential.valid=True
# Credentials are no good
else:
# Update the credential to invalid
credential.valid=False
self.logger.log(CREDENTIAL_EVENTS,cred)
# Commit the changes
self.handler_db_sess.commit()
return recovered
def realign_future_time(self):
'''Iterate over each imported username value and rejitter
the future time based on the current max_authentication_jitter
'''
# Get all relevant username values
usernames = self.main_db_sess.query(sql.Username) \
.filter(
sql.Username.recovered == False,
sql.Username.last_time > -1.0,
)
# Iterate over each username
for username in usernames:
# If there's a max_auth_jitter configuration
if self.config.max_auth_jitter:
# Generate a new jitter value
username.future_time = \
self.config.max_auth_jitter.get_jitter_future(
current_time=username.last_time
)
# Otherwise, set it to the default value of -1.0
else: username.future_time = -1.0
# Commit the changes to the database
self.main_db_sess.commit()
def monitor_processes(self,ready_all=False):
'''Iterate over each process in ```self.presults``` and wait
for a process to complete execution. ```ready_all```
indciates that monitoring will continue looping until all
processes complete execution, otherwise a list of outputs
will be returned after a single process is finished.
Returns a list of output objects from the
```self.authentication_callback``` function and the first
three elements should follow the pattern below:
```
output = [
0, # indicator of successful authentication; 0=failure, 1=success
username, # string representing the username used during authentication
password # string representing the password used during authentication
]
```
'''
outputs = []
while True:
# iterate over each result
for result in self.presults:
# act on results that are ready
if result.ready():
# append outputs from the result
outputs.append(
result.get()
)
# remove the finished result
del(
self.presults[
self.presults.index(result)
]
)
# keep iterating should all results be cleared
# and some still remain
if (ready_all and self.presults) or (
len(self.presults) == self.config.process_count):
sleep(.1)
continue
else:
return outputs
def do_authentication_callback(self, username, password, stop_on_valid=False,
*args, **kwargs):
'''
Call the authentication callback from a distinct process. Will monitor
processes for completion if all are currently occupied with a previous
callback request.
'''
'''
When the maximum number of processes have been engaged
to make authentication requests, call monitor_processes
to watch each process until authentication finishes.
Once completeds, the outputs are passed to handle_outputs,
which is responsible for logging the outcome of the authentication
request and updating the proper SQL record with the outcome.
'''
recovered = False
if len(self.presults) == self.config.process_count:
# monitor result objects
outputs = self.monitor_processes()
recovered = self.handle_outputs(outputs)
if recovered and stop_on_valid:
return recovered
# initiate a brute in a process within the pool
self.presults.append(
self.pool.apply_async(
self.config.authentication_callback,
(
(username,password,)
)
)
)
return recovered
def shutdown(self):
'''Close & join the process pool, followed by closing input/output files.
'''
# =====================
# LOG ATTACK COMPLETION
# =====================
self.logger.log(GENERAL_EVENTS,'Shutting attack down')
self.attack.complete = True
self.attack.end_time = BruteTime.current_time()
self.main_db_sess.commit()
self.logger.log(GENERAL_EVENTS,'Closing/joining Processes')
if self.pool:
self.pool.close()
self.pool.join()
close_all_sessions()
def launch(self):
"""Launch a horitontal brute force attack.
The argument to `usernames` and `passwords` are expected to
be either a string, tuple, or list object. Should a string be
provided, it should represent a path to a file containing
newline delimited values of the corresponding input. Should
a tuple or list be provided, each element should be a value
corrsponding to the appropriate input.
"""
if self.config.max_auth_tries:
# Handle manually configured lockout threshold
limit = self.config.max_auth_tries
else:
# Set a sane default otherwise
limit = 1
sleeping = False # determine if the brute attack is sleeping
recovered = False # track if a valid credentials has been recovered
# =============================================
# ENSURE PRIORITIZED VALUES ARE IN THE DATABASE
# =============================================
'''Logic iterates through each prioritized username
and password value and determines if it resides in
the database. A ValueError is raised if it doesn't
exist in the database.
Note that the password value is checked for both normal
passwords and credentials. No error is raised so long
as the value resides in one of the two tables.
'''
# ========================
# BEGIN BRUTE FORCE ATTACK
# ========================
while True:
try:
# =======================
# GET GUESSABLE USERNAMES
# =======================
'''Get a list of guessable usernames. Prioritize by:
1. priority specifications
2. Whether or not strict credentials have been set for
the user
'''
# Get a list of usernames to target
# must not have already been recovered during an earlier attack
# future_time must be less than current time
# for that user have been completed
usernames = self.main_db_sess.query(sql.Username) \
.join(sql.Credential) \
.filter(
sql.Username.recovered == False,
sql.Username.future_time <= time(),
sql.Credential.guessed == False) \
.order_by(sql.Username.priority.desc()) \
.order_by(sql.Credential.strict.desc()) \
.all()
# Logging sleep events
if not usernames and not sleeping:
u = self.main_db_sess.query(sql.Username) \
.filter(sql.Username.recovered == 0) \
.order_by(sql.Username.future_time.desc()) \
.first()
sleeping = True
if u and u.future_time > 60+time():
self.logger.log(
GENERAL_EVENTS,
f'Sleeping until {BruteTime.float_to_str(u.future_time)}'
)
elif usernames and sleeping:
sleeping = False
# =========================
# BRUTE FORCE EACH USERNAME
# =========================
# Current limit will be used to calculate the limit of the current query
# used to assure that the limit remains lesser than the greatest password
# id
for username in usernames:
# ================================
# GET CREDENTIALS FOR THE USERNAME
# ================================
'''Get credentials to guess for a given user. Order by:
1. Strict credentials
2. Then priority
'''
credentials = self.main_db_sess.query(sql.Credential) \
.join(sql.Password) \
.filter(
sql.Credential.guessed == False,
sql.Credential.username == username) \
.order_by(sql.Credential.strict.desc()) \
.order_by(sql.Password.priority.desc()) \
.limit(limit) \
.all()
# Avoid race condition
if username.recovered: continue
for credential in credentials:
# =======================================
# DO THE AUTHENTICATION FOR EACH PASSWORD
# =======================================
# Current time of authentication attempt
ctime = BruteTime.current_time()
# Get the future time when this user can be targeted later
if self.config.max_auth_jitter:
# Derive from the password jitter
ftime = self.config.max_auth_jitter.get_jitter_future()
else:
# Default effectively asserting that no jitter will occur.
ftime = -1.0
# Avoid race condition
# also prevents checking of additional passwords if a valid
# password has been recovered in the distinct process
if username.recovered: break
# Update the Username/Credential object with relevant
# attributes and commit
credential.guess_time=ctime
credential.username.last_time=ctime
credential.username.future_time=ftime
self.main_db_sess.commit()
# Do the authentication callback
recovered = self.do_authentication_callback(
credential.username.value,
credential.password.value
)
if recovered and self.config.stop_on_valid:
break
if recovered and self.config.stop_on_valid:
break
# ============================================
# STOP ATTACK DUE TO STOP_ON_VALID_CREDENTIALS
# ============================================
if recovered and self.config.stop_on_valid:
self.logger.log(
GENERAL_EVENTS,
'Valid credentials recovered. Exiting per ' \
'stop_on_valid configuration.',
)
self.shutdown()
break
# ===============================================
# CONTINUE LOOPING UNTIL ALL GUESSES ARE FINISHED
# ===============================================
# Check if a normal credentials remains
sample_remaining = self.main_db_sess \
.query(sql.Username) \
.join(sql.Credential) \
.filter(sql.Username.recovered == False,
sql.Credential.guessed == False) \
.first()
if sample_remaining:
if len(self.presults):
outputs = self.monitor_processes()
self.handle_outputs(outputs)
sleep(.2)
continue
# =======================================
# GUESSES FINISHED; CLEAN REMINING OUTPUT
# =======================================
outputs = self.monitor_processes(ready_all=True)
self.handle_outputs(outputs)
self.logger.log(GENERAL_EVENTS,'Attack finished')
# ========
# SHUTDOWN
# ========
self.shutdown()
break
# ==================
# EXCEPTION HANDLING
# ==================
except Exception as e:
# =========================
# DEFAULT EXCEPTION HANDLER
# =========================
#
# - check if an exception handler has been provided for
# a given exception class
# - if not, then shut down the brute forcer and raise
# the exception for the caller to handle
# Allow registered handlers to trigger
if e in self.config.exception_handlers:
self.config.exception_handlers[e](self)
# Raise to caller
else:
self.logger.log(
GENERAL_EVENTS,
'Unhandled exception occurred. Shutting down attack '\
'and returning control to the caller.'
)
self.shutdown()
raise e
|
"""
Use these variable to easily compare the values of the payload
"""
NULL: int = -1
NOT_IMPLEMENTED_YET: Exception = NotImplemented
ROUND_WIN_T_BOMB: str = "t_win_bomb"
ROUND_WIN_T_ELIMINATIONS: str = "t_win_elimination"
ROUND_WIN_CT_DEFUSE: str = "ct_win_defuse"
ROUND_WIN_CT_ELIMINATIONS: str = "ct_win_elimination"
ROUND_PHASE_FREEZETIME: str = "freezetime"
ROUND_PHASE_LIVE: str = "live"
ROUND_PHASE_OVER: str = "over"
BOMB_CARRIED: str = "carried"
BOMB_PLANTING: str = "planting"
BOMB_PLANTED: str = "planted"
BOMB_DEFUSED: str = "defused"
BOMB_EXPLODED: str = "exploded"
WIN_TEAM_T: str = "T"
WIN_TEAM_CT: str = "CT"
TEAM_T: str = WIN_TEAM_T
TEAM_CT: str = WIN_TEAM_CT
NO_TEAM: str = ""
PLAYER_NO_CLAN: str = " No clan" # Space useful: no clan can have a space in their name (I guess)
NAME_UNCONNECTED: int = -2
PLAYER_ACTIVITY_PLAYING: str = "playing"
PLAYER_ACTIVITY_MENU: str = "menu"
PLAYER_ACTIVITY_TEXTINPUT: str = "textinput"
PLAYER_ACTIVITY_UNKNOWN: str = "unknown"
WEAPON_HOLSTERED: str = "holstered"
WEAPON_INACTIVE: str = WEAPON_HOLSTERED
WEAPON_ACTIVE: str = "active"
|
import numpy as np
import pandas as pd
import sys
import math
from tabulate import tabulate
def isValidPrior(data):
if (data.values < 0).any():
return False
elif data.sum(axis=1)[0] != 1.0:
return False
return True
def getN(filename):
data = pd.read_csv(filename, delimiter=";", header=None, nrows=1)
data = data.fillna(0.0)
return data
def getPrior(filename):
data = pd.read_csv(filename, delimiter=";", header=None, skiprows=[0], nrows=2)
data = data.fillna(0.0)
if isValidPrior(data):
return data
return None
def calculateProbNTries(n, prior):
numRows = n.iloc[0][0]
end = len(prior.columns)
index = end - numRows
ordered = prior.T.sort_values(by=[0])
ordered = ordered.iloc[index:]
return ordered.sum()
def main():
if len(sys.argv) == 2:
print('--------- Output ----------')
prior = getPrior(sys.argv[1])
n = getN(sys.argv[1])
entropy = calculateProbNTries(n, prior)
print("PROBABILITE in N TRIES")
print(entropy.to_markdown())
print('---------------------------')
else:
print("File not found")
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.