hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acea8a6854cd08dfc1aa72fc98790e4da20534e2 | 4,948 | py | Python | shot_scraper/cli.py | joshuadavidthomas/shot-scraper | f238c5b01347e0549acb0b18095a8298dd91a8fc | [
"Apache-2.0"
] | null | null | null | shot_scraper/cli.py | joshuadavidthomas/shot-scraper | f238c5b01347e0549acb0b18095a8298dd91a8fc | [
"Apache-2.0"
] | null | null | null | shot_scraper/cli.py | joshuadavidthomas/shot-scraper | f238c5b01347e0549acb0b18095a8298dd91a8fc | [
"Apache-2.0"
] | null | null | null | import click
from click_default_group import DefaultGroup
from playwright.sync_api import sync_playwright
from runpy import run_module
import sys
import time
import yaml
@click.group(
cls=DefaultGroup,
default="shot",
default_if_no_args=True,
context_settings=dict(help_option_names=["-h", "--help"]),
)
@click.version_option()
def cli():
"Tools for taking automated screenshots"
pass
@cli.command()
@click.argument("url") # TODO: validate with custom type
@click.option(
"-w",
"--width",
type=int,
help="Width of browser window, defaults to 1280",
default=1280,
)
@click.option(
"-h",
"--height",
type=int,
help="Height of browser window and shot - defaults to the full height of the page",
)
@click.option(
"-o",
"--output",
type=click.Path(file_okay=True, writable=True, dir_okay=False, allow_dash=True),
default="-",
)
@click.option(
"-s", "--selector", help="Take shot of first element matching this CSS selector"
)
@click.option(
"-j", "--javascript", help="Execute this JavaScript prior to taking the shot"
)
@click.option("--quality", type=int, help="Save as JPEG with this quality, e.g. 80")
@click.option(
"--wait", type=int, help="Wait this many milliseconds before taking screenshot"
)
def shot(url, output, width, height, selector, javascript, quality, wait):
"""
Take a single screenshot of a page or portion of a page.
Usage:
shot-scraper http://www.example.com/ -o example.png
Use -s to take a screenshot of one area of the page, identified using a CSS selector:
shot-scraper https://simonwillison.net -o bighead.png -s '#bighead'
"""
shot = {
"url": url,
"selector": selector,
"javascript": javascript,
"width": width,
"height": height,
"quality": quality,
"wait": wait,
}
with sync_playwright() as p:
browser = p.chromium.launch()
if output == "-":
shot = take_shot(browser, shot, return_bytes=True)
sys.stdout.buffer.write(shot)
else:
shot["output"] = str(output)
shot = take_shot(browser, shot)
browser.close()
@cli.command()
@click.argument("config", type=click.File(mode="r"))
def multi(config):
"""
Take multiple screenshots, defined by a YAML file
Usage:
shot-scraper config.yml
Where config.yml contains configuration like this:
\b
- output: example.png
url: http://www.example.com/
"""
shots = yaml.safe_load(config)
with sync_playwright() as p:
browser = p.chromium.launch()
for shot in shots:
take_shot(browser, shot)
browser.close()
@cli.command()
def install():
"""
Install Playwright browser needed by this tool.
Usage:
shot-scraper install
"""
sys.argv = ["playwright", "install", "chromium"]
run_module("playwright", run_name="__main__")
def take_shot(browser, shot, return_bytes=False):
url = shot.get("url") or ""
if not (url.startswith("http://") or url.startswith("https://")):
raise click.ClickException(
"'url' must start http:// or https:// - got: \n{}".format(url)
)
output = shot.get("output", "").strip()
if not output and not return_bytes:
raise click.ClickException(
"'output' filename is required, messing for url:\n {}".format(url)
)
quality = shot.get("quality")
wait = shot.get("wait")
page = browser.new_page()
viewport = {}
full_page = True
if shot.get("width") or shot.get("height"):
viewport = {
"width": shot.get("width") or 1280,
"height": shot.get("height") or 720,
}
page.set_viewport_size(viewport)
if shot.get("height"):
full_page = False
page.goto(url)
if wait:
time.sleep(wait / 1000)
message = ""
selector = shot.get("selector")
javascript = shot.get("javascript")
if javascript:
page.evaluate(javascript)
screenshot_args = {}
if quality:
screenshot_args.update({"quality": quality, "type": "jpeg"})
if not return_bytes:
screenshot_args["path"] = output
if not selector:
screenshot_args["full_page"] = full_page
if selector:
if return_bytes:
return page.locator(selector).screenshot(**screenshot_args)
else:
page.locator(selector).screenshot(**screenshot_args)
message = "Screenshot of '{}' on '{}' written to '{}'".format(
selector, url, output
)
else:
# Whole page
if return_bytes:
return page.screenshot(**screenshot_args)
else:
page.screenshot(**screenshot_args)
message = "Screenshot of '{}' written to '{}'".format(url, output)
click.echo(message, err=True)
| 27.186813 | 89 | 0.605295 |
acea8ac0b57f25e21c4d706307ddabbc3ce88d46 | 7,981 | py | Python | lib/selenium/selenium/webdriver/common/action_chains.py | ceremetrix/X | f59beb2c202986657617bf82e658de19b251b85a | [
"MIT"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | lib/selenium/selenium/webdriver/common/action_chains.py | ceremetrix/X | f59beb2c202986657617bf82e658de19b251b85a | [
"MIT"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | lib/selenium/selenium/webdriver/common/action_chains.py | ceremetrix/X | f59beb2c202986657617bf82e658de19b251b85a | [
"MIT"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2011 WebDriver committers
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ActionChains implementation
"""
from selenium.webdriver.remote.command import Command
from selenium.webdriver.common.keys import Keys
class ActionChains(object):
"""
Generate user actions.
All actions are stored in the ActionChains object. Call perform() to fire
stored actions.
"""
def __init__(self, driver):
"""
Creates a new ActionChains.
:Args:
- driver: The WebDriver instance which performs user actions.
"""
self._driver = driver
self._actions = []
def perform(self):
"""
Performs all stored actions.
"""
for action in self._actions:
action()
def click(self, on_element=None):
"""
Clicks an element.
:Args:
- on_element: The element to click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.CLICK, {'button': 0}))
return self
def click_and_hold(self, on_element=None):
"""
Holds down the left mouse button on an element.
:Args:
- on_element: The element to mouse down.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.MOUSE_DOWN, {}))
return self
def context_click(self, on_element=None):
"""
Performs a context-click (right click) on an element.
:Args:
- on_element: The element to context-click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.CLICK, {'button': 2}))
return self
def double_click(self, on_element=None):
"""
Double-clicks an element.
:Args:
- on_element: The element to double-click.
If None, clicks on current mouse position.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.DOUBLE_CLICK, {}))
return self
def drag_and_drop(self, source, target):
"""Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:Args:
- source: The element to mouse down.
- target: The element to mouse up.
"""
self.click_and_hold(source)
self.release(target)
return self
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
"""
Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:Args:
- source: The element to mouse down.
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
self.click_and_hold(source)
self.move_by_offset(xoffset, yoffset)
self.release(source)
return self
def key_down(self, value, element=None):
"""Sends a key press only, without releasing it.
Should only be used with modifier keys (Control, Alt and Shift).
:Args:
- key: The modifier key to send. Values are defined in Keys class.
- target: The element to send keys.
If None, sends a key to current focused element.
"""
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
if element: self.click(element)
self._actions.append(lambda:
self._driver.execute(Command.SEND_KEYS_TO_ACTIVE_ELEMENT, {
"value": typing }))
return self
def key_up(self, value, element=None):
"""
Releases a modifier key.
:Args:
- key: The modifier key to send. Values are defined in Keys class.
- target: The element to send keys.
If None, sends a key to current focused element.
"""
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
if element: self.click(element)
self._actions.append(lambda:
self._driver.execute(Command.SEND_KEYS_TO_ACTIVE_ELEMENT, {
"value": typing }))
return self
def move_by_offset(self, xoffset, yoffset):
"""
Moving the mouse to an offset from current mouse position.
:Args:
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {
'xoffset': xoffset,
'yoffset': yoffset}))
return self
def move_to_element(self, to_element):
"""
Moving the mouse to the middle of an element.
:Args:
- to_element: The element to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {'element': to_element.id}))
return self
def move_to_element_with_offset(self, to_element, xoffset, yoffset):
"""
Move the mouse by an offset of the specificed element.
Offsets are relative to the top-left corner of the element.
:Args:
- to_element: The element to move to.
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.MOVE_TO, {
'element': to_element.id,
'xoffset': xoffset,
'yoffset': yoffset}))
return self
def release(self, on_element=None):
"""
Releasing a held mouse button.
:Args:
- on_element: The element to mouse up.
"""
if on_element: self.move_to_element(on_element)
self._actions.append(lambda:
self._driver.execute(Command.MOUSE_UP, {}))
return self
def send_keys(self, *keys_to_send):
"""Sends keys to current focused element.
:Args:
- keys_to_send: The keys to send.
"""
self._actions.append(lambda:
self._driver.switch_to_active_element().send_keys(*keys_to_send))
return self
def send_keys_to_element(self, element, *keys_to_send):
"""
Sends keys to an element.
:Args:
- element: The element to send keys.
- keys_to_send: The keys to send.
"""
self._actions.append(lambda:
element.send_keys(*keys_to_send))
return self
| 31.42126 | 78 | 0.583761 |
acea8aef7223bff7cd6fde310dee48cadd08dfe2 | 1,322 | py | Python | accelbyte_py_sdk/api/dsmc/operations/deployment_config/__init__.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/dsmc/operations/deployment_config/__init__.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | accelbyte_py_sdk/api/dsmc/operations/deployment_config/__init__.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
"""Auto-generated package that contains models used by the justice-dsm-controller-service."""
__version__ = "3.2.1"
__author__ = "AccelByte"
__email__ = "dev@accelbyte.net"
# pylint: disable=line-too-long
from .create_deployment import CreateDeployment
from .create_deployment_override import CreateDeploymentOverride
from .create_override_region__89178f import CreateOverrideRegionOverride
from .create_root_region_override import CreateRootRegionOverride
from .delete_deployment import DeleteDeployment
from .delete_deployment_override import DeleteDeploymentOverride
from .delete_override_region__2e7e2d import DeleteOverrideRegionOverride
from .delete_root_region_override import DeleteRootRegionOverride
from .get_all_deployment import GetAllDeployment
from .get_deployment import GetDeployment
from .update_deployment import UpdateDeployment
from .update_deployment_override import UpdateDeploymentOverride
from .update_override_region__fb90bf import UpdateOverrideRegionOverride
from .update_root_region_override import UpdateRootRegionOverride
| 42.645161 | 93 | 0.863086 |
acea8afdfde5e880430499cdf51179e9eb3780ce | 6,678 | py | Python | nova/virt/disk/vfs/guestfs.py | bopopescu/zknova | 8dd09199f5678697be228ffceeaf2c16f6d7319d | [
"Apache-2.0"
] | null | null | null | nova/virt/disk/vfs/guestfs.py | bopopescu/zknova | 8dd09199f5678697be228ffceeaf2c16f6d7319d | [
"Apache-2.0"
] | null | null | null | nova/virt/disk/vfs/guestfs.py | bopopescu/zknova | 8dd09199f5678697be228ffceeaf2c16f6d7319d | [
"Apache-2.0"
] | 1 | 2020-07-24T07:27:49.000Z | 2020-07-24T07:27:49.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import guestfs
from nova import exception
from nova.openstack.common import log as logging
from nova.virt.disk.vfs import api as vfs
LOG = logging.getLogger(__name__)
guestfs = None
class VFSGuestFS(vfs.VFS):
"""
This class implements a VFS module that uses the libguestfs APIs
to access the disk image. The disk image is never mapped into
the host filesystem, thus avoiding any potential for symlink
attacks from the guest filesystem.
"""
def __init__(self, imgfile, imgfmt='raw', partition=None):
super(VFSGuestFS, self).__init__(imgfile, imgfmt, partition)
global guestfs
if guestfs is None:
guestfs = __import__('guestfs')
self.handle = None
def setup_os(self):
if self.partition == -1:
self.setup_os_inspect()
else:
self.setup_os_static()
def setup_os_static(self):
LOG.debug(_("Mount guest OS image %(imgfile)s partition %(part)s"),
{'imgfile': self.imgfile, 'part': str(self.partition)})
if self.partition:
self.handle.mount_options("", "/dev/sda%d" % self.partition, "/")
else:
self.handle.mount_options("", "/dev/sda", "/")
def setup_os_inspect(self):
LOG.debug(_("Inspecting guest OS image %s"), self.imgfile)
roots = self.handle.inspect_os()
if len(roots) == 0:
raise exception.NovaException(_("No operating system found in %s"),
self.imgfile)
if len(roots) != 1:
LOG.debug(_("Multi-boot OS %(roots)s") % {'roots': str(roots)})
raise exception.NovaException(
_("Multi-boot operating system found in %s"),
self.imgfile)
self.setup_os_root(roots[0])
def setup_os_root(self, root):
LOG.debug(_("Inspecting guest OS root filesystem %s"), root)
mounts = self.handle.inspect_get_mountpoints(root)
if len(mounts) == 0:
raise exception.NovaException(
_("No mount points found in %(root)s of %(imgfile)s") %
{'root': root, 'imgfile': self.imgfile})
mounts.sort(key=lambda mount: mount[1])
for mount in mounts:
LOG.debug(_("Mounting %(dev)s at %(dir)s") %
{'dev': mount[1], 'dir': mount[0]})
self.handle.mount_options("", mount[1], mount[0])
def setup(self):
LOG.debug(_("Setting up appliance for %(imgfile)s %(imgfmt)s") %
{'imgfile': self.imgfile, 'imgfmt': self.imgfmt})
self.handle = guestfs.GuestFS()
try:
self.handle.add_drive_opts(self.imgfile, format=self.imgfmt)
self.handle.launch()
self.setup_os()
self.handle.aug_init("/", 0)
except RuntimeError, e:
# dereference object and implicitly close()
self.handle = None
raise exception.NovaException(
_("Error mounting %(imgfile)s with libguestfs (%(e)s)") %
{'imgfile': self.imgfile, 'e': e})
except Exception:
self.handle = None
raise
def teardown(self):
LOG.debug(_("Tearing down appliance"))
try:
try:
self.handle.aug_close()
except RuntimeError, e:
LOG.warn(_("Failed to close augeas %s"), e)
try:
self.handle.shutdown()
except AttributeError:
# Older libguestfs versions haven't an explicit shutdown
pass
except RuntimeError, e:
LOG.warn(_("Failed to shutdown appliance %s"), e)
try:
self.handle.close()
except AttributeError:
# Older libguestfs versions haven't an explicit close
pass
except RuntimeError, e:
LOG.warn(_("Failed to close guest handle %s"), e)
finally:
# dereference object and implicitly close()
self.handle = None
@staticmethod
def _canonicalize_path(path):
if path[0] != '/':
return '/' + path
return path
def make_path(self, path):
LOG.debug(_("Make directory path=%(path)s") % locals())
path = self._canonicalize_path(path)
self.handle.mkdir_p(path)
def append_file(self, path, content):
LOG.debug(_("Append file path=%(path)s") % locals())
path = self._canonicalize_path(path)
self.handle.write_append(path, content)
def replace_file(self, path, content):
LOG.debug(_("Replace file path=%(path)s") % locals())
path = self._canonicalize_path(path)
self.handle.write(path, content)
def read_file(self, path):
LOG.debug(_("Read file path=%(path)s") % locals())
path = self._canonicalize_path(path)
return self.handle.read_file(path)
def has_file(self, path):
LOG.debug(_("Has file path=%(path)s") % locals())
path = self._canonicalize_path(path)
try:
self.handle.stat(path)
return True
except RuntimeError:
return False
def set_permissions(self, path, mode):
LOG.debug(_("Set permissions path=%(path)s mode=%(mode)s") % locals())
path = self._canonicalize_path(path)
self.handle.chmod(mode, path)
def set_ownership(self, path, user, group):
LOG.debug(_("Set ownership path=%(path)s "
"user=%(user)s group=%(group)s") % locals())
path = self._canonicalize_path(path)
uid = -1
gid = -1
if user is not None:
uid = int(self.handle.aug_get(
"/files/etc/passwd/" + user + "/uid"))
if group is not None:
gid = int(self.handle.aug_get(
"/files/etc/group/" + group + "/gid"))
LOG.debug(_("chown uid=%(uid)d gid=%(gid)s") % locals())
self.handle.chown(uid, gid, path)
| 33.898477 | 79 | 0.579665 |
acea8bd437558379a909e90e0da49c428dd131e9 | 9,239 | py | Python | src/xia2/cli/compare_merging_stats.py | xia2/xia2 | 18554e9b4d442e7c23a0c4ce93f51b491f77d4b7 | [
"BSD-3-Clause"
] | 10 | 2015-10-30T06:36:55.000Z | 2021-12-10T20:06:22.000Z | src/xia2/cli/compare_merging_stats.py | xia2/xia2 | 18554e9b4d442e7c23a0c4ce93f51b491f77d4b7 | [
"BSD-3-Clause"
] | 528 | 2015-11-24T08:20:12.000Z | 2022-03-21T21:47:29.000Z | src/xia2/cli/compare_merging_stats.py | xia2/xia2 | 18554e9b4d442e7c23a0c4ce93f51b491f77d4b7 | [
"BSD-3-Clause"
] | 14 | 2016-03-15T22:07:03.000Z | 2020-12-14T07:13:35.000Z | import concurrent.futures
import functools
import math
import os
import sys
import iotbx.merging_statistics
import iotbx.phil
import libtbx
from libtbx.introspection import number_of_processors
from cctbx import uctbx
from cycler import cycler
from dials.util.options import OptionParser
help_message = """
"""
phil_scope = iotbx.phil.parse(
"""
nproc = Auto
.type = int(value_min=1)
n_bins = 20
.type = int(value_min=1)
anomalous = False
.type = bool
use_internal_variance = False
.type = bool
eliminate_sys_absent = False
.type = bool
plot_labels = None
.type = strings
data_labels = None
.type = str
size_inches = None
.type = floats(size=2, value_min=0)
image_dir = None
.type = path
format = *png pdf
.type = choice
style = *ggplot
.type = choice
space_group = None
.type = space_group
d_min = None
.type = float
d_max = None
.type = float
small_multiples = False
.type = bool
alpha = 0.3
.type = float(value_min=0, value_max=1)
.help = "The alpha value for the background line plots in conjunction with"
"small_multiples=True."
""",
process_includes=True,
)
def run(args=sys.argv[1:]):
usage = "xia2.compare_merging_stats [options] unmerged1.mtz unmerged2.mtz (..)"
parser = OptionParser(
usage=usage, phil=phil_scope, check_format=False, epilog=help_message
)
params, options, args = parser.parse_args(
args, show_diff_phil=True, return_unhandled=True
)
if params.nproc is libtbx.Auto:
params.nproc = number_of_processors()
results = []
mtz_files = []
for arg in args:
if os.path.isfile(arg):
print(arg)
mtz_files.append(arg)
get_merging_stats_partial = functools.partial(
get_merging_stats,
anomalous=params.anomalous,
n_bins=params.n_bins,
use_internal_variance=params.use_internal_variance,
eliminate_sys_absent=params.eliminate_sys_absent,
data_labels=params.data_labels,
space_group_info=params.space_group,
d_min=params.d_min,
d_max=params.d_max,
)
with concurrent.futures.ProcessPoolExecutor(max_workers=params.nproc) as pool:
results = pool.map(get_merging_stats_partial, mtz_files)
plot_merging_stats(
list(results),
labels=params.plot_labels,
size_inches=params.size_inches,
image_dir=params.image_dir,
format=params.format,
style=params.style,
small_multiples=params.small_multiples,
alpha=params.alpha,
)
return results
def get_merging_stats(
scaled_unmerged_mtz,
anomalous=False,
n_bins=20,
use_internal_variance=False,
eliminate_sys_absent=False,
data_labels=None,
space_group_info=None,
d_min=None,
d_max=None,
):
i_obs = iotbx.merging_statistics.select_data(
scaled_unmerged_mtz, data_labels=data_labels
)
i_obs = i_obs.customized_copy(anomalous_flag=False, info=i_obs.info())
if space_group_info is not None:
i_obs = i_obs.customized_copy(
space_group_info=space_group_info, info=i_obs.info()
)
result = iotbx.merging_statistics.dataset_statistics(
i_obs=i_obs,
n_bins=n_bins,
anomalous=anomalous,
use_internal_variance=use_internal_variance,
eliminate_sys_absent=eliminate_sys_absent,
d_min=d_min,
d_max=d_max,
)
return result
def plot_merging_stats(
results,
labels=None,
plots=None,
size_inches=None,
image_dir=None,
format="png",
style="ggplot",
small_multiples=False,
global_labels=None,
alpha=0.3,
):
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
if style is not None:
plt.style.use(style)
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
plt.rcParams["axes.titlesize"] = "medium"
linestyles = []
for style in ("-", "--", ":", "-."):
linestyles.extend([style] * len(colors))
colors = colors * len(set(linestyles))
plt.rc("axes", prop_cycle=(cycler("c", colors) + cycler("ls", linestyles)))
plots_ = {
"r_merge": "$R_{merge}$",
"r_meas": "$R_{meas}$",
"r_pim": "$R_{pim}$",
"cc_one_half": r"$CC_{\frac{1}{2}}$",
"cc_one_half_sigma_tau": r"$CC_{\frac{1}{2}}$",
"cc_anom": "$CC_{anom}$",
"i_over_sigma_mean": r"$< I / \sigma(I) >$",
"completeness": "Completeness",
"mean_redundancy": "Multiplicity",
}
if plots is None:
plots = plots_
else:
plots = {k: plots_[k] for k in plots}
if labels is not None:
assert len(results) == len(labels)
if image_dir is None:
image_dir = "."
elif not os.path.exists(image_dir):
os.makedirs(image_dir)
n_rows = 1
n_cols = 1
if small_multiples:
n_rows = int(math.floor(math.sqrt(len(results))))
n_cols = n_rows
while n_cols * n_rows < len(results):
n_cols += 1
assert n_cols * n_rows >= len(results), (n_cols, n_rows, len(results))
for k in plots:
plot_data(
results,
k,
plots.get(k, k),
labels,
n_rows=n_rows,
n_cols=n_cols,
global_labels=global_labels,
alpha=alpha,
)
if size_inches is not None:
fig = plt.gcf()
fig.set_size_inches(size_inches)
if n_cols == 1 and labels is not None:
if k.startswith("cc"):
plt.legend(loc="lower left")
elif k.startswith("r_"):
plt.legend(loc="upper left")
elif k.startswith("i_"):
plt.legend(loc="upper right")
else:
plt.legend(loc="best")
if global_labels is not None:
ax = plt.gca()
handles, lab = ax.get_legend_handles_labels()
plt.figlegend(handles, lab, loc="lower right")
plt.tight_layout()
plt.savefig(os.path.join(image_dir, k + ".%s" % format))
plt.clf()
def plot_data(
results,
k,
ylabel,
labels,
linestyle=None,
n_rows=None,
n_cols=None,
global_labels=None,
alpha=0.3,
):
from matplotlib import pyplot as plt
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
ref_ax = None
for i, result in enumerate(results):
if not isinstance(result, (list, tuple)):
result = (result,)
if labels is not None:
label = labels[i].replace("\\$", "$")
else:
label = None
if n_cols > 1:
ax = plt.subplot(n_rows, n_cols, i + 1, sharex=ref_ax, sharey=ref_ax)
if label:
ax.set_title(label, loc="left")
if ref_ax is None:
ref_ax = ax
for other in results:
if isinstance(other, iotbx.merging_statistics.dataset_statistics):
other = (other,)
for res in other:
if res is not None:
x = [
0.5
* (
uctbx.d_as_d_star_sq(b.d_max)
+ uctbx.d_as_d_star_sq(b.d_min)
)
for b in res.bins
]
y = [getattr(b, k) for b in res.bins]
ax.plot(
x,
y,
linestyle="-",
color="grey",
linewidth=1,
alpha=alpha,
)
else:
ax = plt.gca()
for i_res, res in enumerate(result):
if res is not None:
if global_labels is not None:
l = global_labels[i_res]
else:
l = label
x = [
0.5
* (uctbx.d_as_d_star_sq(b.d_max) + uctbx.d_as_d_star_sq(b.d_min))
for b in res.bins
]
y = [getattr(b, k) for b in res.bins]
color = colors[i_res] if n_cols > 1 else colors[i]
ax.plot(x, y, label=l, linestyle=linestyle, color=color)
if n_cols > 1 or i == len(results) - 1:
ax.set_xlabel(r"Resolution ($\AA$)")
ax.set_ylabel(ylabel)
ax.label_outer()
if k in ("cc_one_half", "cc_one_half_sigma_tau", "completeness"):
ax.set_ylim(0, 1.05)
elif k in ("cc_anom",):
ax.set_ylim(min(0, ax.get_ylim()[0]), 1.05)
elif k in ("r_merge",):
ax.set_ylim(0, min(4, ax.get_ylim()[1]))
elif k in ("r_meas", "r_pim"):
ax.set_ylim(0, min(2, ax.get_ylim()[1]))
else:
ax.set_ylim(0)
xticks = ax.get_xticks()
xticks_d = [
"%.2f" % uctbx.d_star_sq_as_d(ds2) if ds2 > 0 else 0 for ds2 in xticks
]
ax.set_xticklabels(xticks_d)
| 28.781931 | 86 | 0.55309 |
acea8bdb52e9939961631829a0d35f42e85a1353 | 8,533 | py | Python | discord_slash/utils/manage_commands.py | MrSpinne/discord-py-slash-command | efdc9a97b0539d1898e2ef83e12fa659a64edc70 | [
"MIT"
] | null | null | null | discord_slash/utils/manage_commands.py | MrSpinne/discord-py-slash-command | efdc9a97b0539d1898e2ef83e12fa659a64edc70 | [
"MIT"
] | null | null | null | discord_slash/utils/manage_commands.py | MrSpinne/discord-py-slash-command | efdc9a97b0539d1898e2ef83e12fa659a64edc70 | [
"MIT"
] | null | null | null | import typing
import inspect
import asyncio
import aiohttp
from ..error import RequestFailure
from ..model import SlashCommandOptionType
from collections.abc import Callable
async def add_slash_command(bot_id,
bot_token: str,
guild_id,
cmd_name: str,
description: str,
options: list = None):
"""
A coroutine that sends a slash command add request to Discord API.
:param bot_id: User ID of the bot.
:param bot_token: Token of the bot.
:param guild_id: ID of the guild to add command. Pass `None` to add global command.
:param cmd_name: Name of the command. Must be 3 or longer and 32 or shorter.
:param description: Description of the command.
:param options: List of the function.
:return: JSON Response of the request.
:raises: :class:`.error.RequestFailure` - Requesting to Discord API has failed.
"""
url = f"https://discord.com/api/v8/applications/{bot_id}"
url += "/commands" if not guild_id else f"/guilds/{guild_id}/commands"
base = {
"name": cmd_name,
"description": description,
"options": options or []
}
async with aiohttp.ClientSession() as session:
async with session.post(url, headers={"Authorization": f"Bot {bot_token}"}, json=base) as resp:
if resp.status == 429:
_json = await resp.json()
await asyncio.sleep(_json["retry_after"])
return await add_slash_command(bot_id, bot_token, guild_id, cmd_name, description, options)
if not 200 <= resp.status < 300:
raise RequestFailure(resp.status, await resp.text())
return await resp.json()
async def remove_slash_command(bot_id,
bot_token,
guild_id,
cmd_id):
"""
A coroutine that sends a slash command remove request to Discord API.
:param bot_id: User ID of the bot.
:param bot_token: Token of the bot.
:param guild_id: ID of the guild to remove command. Pass `None` to remove global command.
:param cmd_id: ID of the command.
:return: Response code of the request.
:raises: :class:`.error.RequestFailure` - Requesting to Discord API has failed.
"""
url = f"https://discord.com/api/v8/applications/{bot_id}"
url += "/commands" if not guild_id else f"/guilds/{guild_id}/commands"
url += f"/{cmd_id}"
async with aiohttp.ClientSession() as session:
async with session.delete(url, headers={"Authorization": f"Bot {bot_token}"}) as resp:
if resp.status == 429:
_json = await resp.json()
await asyncio.sleep(_json["retry_after"])
return await remove_slash_command(bot_id, bot_token, guild_id, cmd_id)
if not 200 <= resp.status < 300:
raise RequestFailure(resp.status, await resp.text())
return resp.status
async def get_all_commands(bot_id,
bot_token,
guild_id=None):
"""
A coroutine that sends a slash command get request to Discord API.
:param bot_id: User ID of the bot.
:param bot_token: Token of the bot.
:param guild_id: ID of the guild to get commands. Pass `None` to get all global commands.
:return: JSON Response of the request.
:raises: :class:`.error.RequestFailure` - Requesting to Discord API has failed.
"""
url = f"https://discord.com/api/v8/applications/{bot_id}"
url += "/commands" if not guild_id else f"/guilds/{guild_id}/commands"
async with aiohttp.ClientSession() as session:
async with session.get(url, headers={"Authorization": f"Bot {bot_token}"}) as resp:
if resp.status == 429:
_json = await resp.json()
await asyncio.sleep(_json["retry_after"])
return await get_all_commands(bot_id, bot_token, guild_id)
if not 200 <= resp.status < 300:
raise RequestFailure(resp.status, await resp.text())
return await resp.json()
async def remove_all_commands(bot_id,
bot_token,
guild_ids: typing.List[int] = None):
"""
Remove all slash commands.
:param bot_id: User ID of the bot.
:param bot_token: Token of the bot.
:param guild_ids: List of the guild ID to remove commands. Pass ``None`` to remove only the global commands.
"""
await remove_all_commands_in(bot_id, bot_token, None)
for x in guild_ids or []:
try:
await remove_all_commands_in(bot_id, bot_token, x)
except RequestFailure:
pass
async def remove_all_commands_in(bot_id,
bot_token,
guild_id=None):
"""
Remove all slash commands in area.
:param bot_id: User ID of the bot.
:param bot_token: Token of the bot.
:param guild_id: ID of the guild to remove commands. Pass `None` to remove all global commands.
"""
commands = await get_all_commands(
bot_id,
bot_token,
guild_id
)
for x in commands:
await remove_slash_command(
bot_id,
bot_token,
guild_id,
x['id']
)
def create_option(name: str,
description: str,
option_type: int,
required: bool,
choices: list = None) -> dict:
"""
Creates option used for creating slash command.
:param name: Name of the option.
:param description: Description of the option.
:param option_type: Type of the option.
:param required: Whether this option is required.
:param choices: Choices of the option. Can be empty.
:return: dict
"""
return {
"name": name,
"description": description,
"type": option_type,
"required": required,
"choices": choices or []
}
def generate_options(function: Callable, description: str = "No description.") -> list:
"""
Generates a list of options from the type hints of a command.
You currently can type hint: str, int, bool, discord.User, discord.Channel, discord.Role
.. warning::
This is automatically used if you do not pass any options directly. It is not recommended to use this.
:param function: The function callable of the command.
:param description: The default argument description.
"""
options = []
params = iter(inspect.signature(function).parameters.values())
if next(params).name in ("self", "cls"):
# Skip 1. (+ 2.) parameter, self/cls and ctx
next(params)
for param in params:
required = True
if isinstance(param.annotation, str):
# if from __future__ import annotations, then annotations are strings and should be converted back to types
param = param.replace(annotation=eval(param.annotation, function.__globals__))
if getattr(param.annotation, "__origin__", None) is typing.Union:
# Make a command argument optional with typing.Optional[type] or typing.Union[type, None]
args = getattr(param.annotation, "__args__", None)
if args:
param = param.replace(annotation=args[0])
required = not args[-1] is type(None)
option_type = SlashCommandOptionType.from_type(param.annotation) or SlashCommandOptionType.STRING
options.append(create_option(param.name, description, option_type, required))
return options
def generate_auto_convert(options: list) -> dict:
"""
Generate an auto_convert dict from command options.
.. note::
This is automatically used if you pass options.
:param options: The list of options.
"""
auto_convert = {}
for x in options:
if x["type"] in (SlashCommandOptionType.SUB_COMMAND, SlashCommandOptionType.SUB_COMMAND_GROUP):
raise Exception("You can't use subcommand or subcommand_group type!")
auto_convert[x["name"]] = x["type"]
return auto_convert
def create_choice(value: str, name: str):
"""
Creates choices used for creating command option.
:param value: Value of the choice.
:param name: Name of the choice.
:return: dict
"""
return {
"value": value,
"name": name
}
| 36.310638 | 119 | 0.614438 |
acea8c8fe17fdc1735b62db70f47cd5d21d0554a | 2,542 | py | Python | lib/taniumpy/object_types/plugin.py | netsec/pytan | 29a3484d21cb90d8896275febd1c535e4f3cdc7e | [
"MIT"
] | null | null | null | lib/taniumpy/object_types/plugin.py | netsec/pytan | 29a3484d21cb90d8896275febd1c535e4f3cdc7e | [
"MIT"
] | 1 | 2021-12-08T08:29:26.000Z | 2021-12-08T08:29:26.000Z | pytanlib/taniumpy/object_types/plugin.py | splunk-soar-connectors/tanium | e6f38fd014ea125e11a584ac9932ad4e7e855ac7 | [
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 Tanium Inc
#
# Generated from console.wsdl version 0.0.1
#
#
from .base import BaseType
class Plugin(BaseType):
_soap_tag = 'plugin'
def __init__(self):
BaseType.__init__(
self,
simple_properties={'name': str,
'bundle': str,
'plugin_server': str,
'input': str,
'script_response': str,
'exit_code': int,
'type': str,
'path': str,
'filename': str,
'plugin_url': str,
'run_detached_flag': int,
'execution_id': int,
'timeout_seconds': int,
'cache_row_id': int,
'local_admin_flag': int,
'allow_rest': int,
'raw_http_response': int,
'raw_http_request': int,
'use_json_flag': int,
'status': str,
'status_file_content': str},
complex_properties={'arguments': PluginArgumentList,
'sql_response': PluginSql,
'metadata': MetadataList,
'commands': PluginCommandList,
'permissions': PermissionList},
list_properties={},
)
self.name = None
self.bundle = None
self.plugin_server = None
self.input = None
self.script_response = None
self.exit_code = None
self.type = None
self.path = None
self.filename = None
self.plugin_url = None
self.run_detached_flag = None
self.execution_id = None
self.timeout_seconds = None
self.cache_row_id = None
self.local_admin_flag = None
self.allow_rest = None
self.raw_http_response = None
self.raw_http_request = None
self.use_json_flag = None
self.status = None
self.status_file_content = None
self.arguments = None
self.sql_response = None
self.metadata = None
self.commands = None
self.permissions = None
from plugin_argument_list import PluginArgumentList
from plugin_sql import PluginSql
from metadata_list import MetadataList
from plugin_command_list import PluginCommandList
from permission_list import PermissionList
| 31.775 | 64 | 0.509048 |
acea8f6ee86540206f5829a1d2d36ac620589a80 | 14,403 | py | Python | aliyun/log/etl_core/trans_comp/trans_json.py | crhan/aliyun-log-python-sdk | aee03f056170e67a8ff68fad717f6d291671c9e3 | [
"MIT"
] | null | null | null | aliyun/log/etl_core/trans_comp/trans_json.py | crhan/aliyun-log-python-sdk | aee03f056170e67a8ff68fad717f6d291671c9e3 | [
"MIT"
] | null | null | null | aliyun/log/etl_core/trans_comp/trans_json.py | crhan/aliyun-log-python-sdk | aee03f056170e67a8ff68fad717f6d291671c9e3 | [
"MIT"
] | null | null | null | import inspect
import json
import logging
from collections import Iterable
import jmespath
import re
import six
from jmespath.exceptions import ParseError
from .trans_base import trans_comp_base
from ..etl_util import get_re_full_match
from ..exceptions import SettingError
__all__ = ['trans_comp_json']
logger = logging.getLogger(__name__)
def trans_comp_json(*args, **kwargs):
if (args and isinstance(args[0], dict)) or ('event' in kwargs and isinstance(kwargs['event'], dict)):
# event case
trans = json_transformer()
return trans(*args, **kwargs)
else:
return json_transformer(*args, **kwargs)
class json_transformer(trans_comp_base):
DEFAULT_SEP = u'.'
DEFAULT_FMT = "simple"
DEFAULT_DEPTH = 100
DEFAULT_INCLUDE_NODE = u''
DEFAULT_EXCLUDE_NODE = u''
DEFAULT_INCLUDE_PATH = u''
DEFAULT_EXCLUDE_PATH = u''
DEFAULT_FMT_ARRAY = u"{parent_rlist[0]}_{index}" # could also be custom formatting string using up to five placehodler: parent_list, parent_list, current, sep, prefix, suffix
FMT_MAP = {
"simple": lambda prefix, current, suffix, *args, **kwargs: u"{prefix}{current}{suffix}".format(prefix=prefix,
current=current,
suffix=suffix),
"full": lambda parent_list, sep, prefix, current, suffix, *args,
**kwargs: u"{parent_list_str}{sep}{prefix}{current}{suffix}".format(
parent_list_str=sep.join(parent_list), current=current, sep=sep, prefix=prefix, suffix=suffix),
"parent": lambda parent_list, sep, prefix, current, suffix, *args,
**kwargs: u"{parent}{sep}{prefix}{current}{suffix}".format(parent=parent_list[-1],
current=current, sep=sep,
prefix=prefix, suffix=suffix),
"root": lambda parent_list, sep, prefix, current, suffix, *args,
**kwargs: u"{parent_list[0]}{sep}{prefix}{current}{suffix}".format(parent_list=parent_list,
current=current, sep=sep,
prefix=prefix, suffix=suffix)
# could also be custom formatting string using up to five placehodler: parent_list, parent_list, current, sep, prefix, suffix
# could also be formatting function accepting the 3 parameters: parrent_list, current key, current value
# Note: the functoin must result k, v tuple, if returning None, meaning skip this k-v
}
def __init__(self, jmes=None, jmes_ignore_none=None, output=None,
expand=None, depth=None, include_node=None, exclude_node=None,
include_path=None, exclude_path=None,
fmt=None, sep=None, prefix=None, suffix=None,
expand_array=None, fmt_array=None,
):
"""
:param jmes: jmes filter to select or generate new field
:param jmes_ignore_none: if jmes filter is null, ignore it (default). Or else consider it as "". default is
:param output: put the value parsed from jmes filter to this field
:param expand: If jmes filter is configure, expand the result or not (Default is False in this case), If jmes is not configured, directly expand the field passed or not (Default is True in this case).
:param depth: depth to scan, 1 means first layer, default is 100.
:param include_node: keys to expand and include. regex string. using '|' for multiple ones. default is all.
:param exclude_node: keys to skip, regex string. using '|' for multiple ones. default is nothing.
:param include_path: path to expand and include. regex string to match from begin. using '|' for multiple ones. default is all. e.g. r"data\.k1", all sub-keys in data.k1 will be included.
:param exclude_path: path to skip, regex string to match from begin. using '|' for multiple ones. default is nothing. . e.g. r"data\.k2", all sub-keys in data.k2 will be excluded.
:param fmt: during expansion, how to format the key name, there're several types or customized as described in FMT_MAP
:param sep: sep used in formatting during expansion
:param prefix: prefix used in formatting during expansion
:param suffix: suffix used in formatting during expansion
:param expand_array: if expand array or just render it. default is True. item in array will be with name index, e.g. [1,2,3] will be considered as {"0": 1, "1": 2, "2": 3}
:param fmt_array: format string for key name of each array element, default is "{parent_rlist[0]}_{index}", can be custom formatting string using placehodler: parent_list, parent_list, current
"""
self.expand = expand
if expand is None:
# when jmes is not configured or configure but no output configured
self.expand = not jmes or not output
# self.level = level or 1
self.jmes = self._u(jmes or "")
self.prefix = self._u("" if prefix is None else prefix)
self.suffix = self._u("" if suffix is None else suffix)
self.sep = self._u(self.DEFAULT_SEP if sep is None else sep)
self.output = self._u(output or "")
self.jmes_filter = None
self.jmes_ignore_none = True if jmes_ignore_none is None else jmes_ignore_none
if jmes:
try:
self.jmes_filter = jmespath.compile(jmes)
except jmespath.exceptions.ParseError as ex:
raise SettingError(ex=ex, msg="Invalid JMES filter setting", settings=jmes)
elif self.output:
logger.warning(u"json_transformer: parameter output '{0}' will be ignored as there's no filter is selected."
.format(output))
self.depth = min((depth or self.DEFAULT_DEPTH), self.DEFAULT_DEPTH)
self.include_node = self._u(include_node or self.DEFAULT_INCLUDE_NODE)
self.exclude_node = self._u(exclude_node or self.DEFAULT_EXCLUDE_NODE)
self.include_path = self._u(include_path or self.DEFAULT_INCLUDE_PATH)
self.exclude_path = self._u(exclude_path or self.DEFAULT_EXCLUDE_PATH)
self.fmt = self._u(fmt or self.DEFAULT_FMT)
try:
self.include_node_match = get_re_full_match(self.include_node)
self.exclude_node_match = get_re_full_match(self.exclude_node)
self.include_path_match = re.compile(self.include_path).match # use match instead of full match
self.exclude_path_match = re.compile(self.exclude_path).match # use match instead of full match
except Exception as ex:
raise SettingError(ex=ex, msg="Invalid regex string for include/exclude")
self.expand_array = True if expand_array is None else expand_array
self.format_array = self._u(fmt_array or self.DEFAULT_FMT_ARRAY)
def _skip_keys(self, key, parent_list):
if (self.include_node and not self.include_node_match(key)) or (
self.exclude_node and self.exclude_node_match(key)):
logger.info(u"json_transformer: 'key' {0} is not in include keys '{1}' or in exclude keys '{2}', skip it."
.format(key, self.include_node, self.exclude_node))
return True
if self.include_path or self.exclude_path:
path = '.'.join(parent_list) + '.' + key
if (self.include_path and not self.include_path_match(path)) or (
self.exclude_path and self.exclude_path_match(path)):
logger.info(
u"json_transformer: path '{0}' is not in include path '{1}' or in exclude path '{2}', skip it."
.format(path, self.include_path, self.exclude_path))
return True
return False
def format_add_kv(self, event, fmt, current, value, parent_list, parent_rlist, sep, prefix, suffix):
if self._skip_keys(current, parent_list):
logger.info(u"json_transformer: 'key' {0} is not in include keys '{1}' or in exclude keys '{2}', skip it."
.format(current, self.include_node, self.exclude_node))
return
ret = None
if isinstance(fmt, (six.text_type, six.binary_type)):
fmt = json_transformer.FMT_MAP.get(fmt.strip().lower(), fmt)
try:
if isinstance(fmt, (six.text_type, six.binary_type)):
ret = fmt.format(parent_list=parent_list, parent_rlist=parent_rlist, current=current, sep=sep,
prefix=prefix, suffix=suffix), \
json_transformer._n(value)
else:
# callable formatting function
ret = fmt(parent_list=parent_list, parent_rlist=parent_rlist, current=current, sep=sep,
prefix=prefix, suffix=suffix), \
json_transformer._n(value)
except Exception as ex:
logger.info(u"json_transformer: fail to format with settings: '{0}'".format((fmt, current, value,
parent_list, sep, prefix,
suffix)))
elif inspect.isfunction(fmt):
try:
ret = fmt(parent_list, current, value)
except Exception as ex:
logger.info(u"json_transformer: fail to call formatting string: {0} wuth parameters: {1}"
.format(fmt, (parent_list, current, value)))
if ret and len(ret) == 2:
k, v = ret
event[json_transformer._n(k)] = json_transformer._n(v)
else:
logger.info(u"json_transformer: unexpected format result: {0}, fmt: '{1}', k: '{2}', v: '{3}', skip it"
.format(ret, fmt, current, value))
def _expand_json(self, event, key, value, parent_list, parent_rlist, depth, sep, prefix, suffix):
# check if need to format it directly
if len(parent_list) > depth \
or (not isinstance(value, (list, tuple, dict))) \
or (isinstance(value, (list, tuple)) and not self.expand_array):
# 1. depth hit, 2. basic type, 3. array but not expand
logger.info(u"json_transformer: hit stop parsing, key: '{0}', value: '{1}', parent: '{2}', depth: '{3}'"
.format(key, value, parent_list, depth))
self.format_add_kv(event, self.fmt, self._n(key), self._n(value), parent_list, parent_rlist, sep, prefix,
suffix)
return None
# convert array to dict
if isinstance(value, (list, tuple)):
value = dict(
(self.format_array.format(parent_list=parent_list, parent_rlist=parent_rlist, index=i), v) for i, v in
enumerate(value))
if isinstance(value, dict):
for k, v in six.iteritems(value):
if isinstance(v, (dict, tuple, list)):
# recursively parse it
self._expand_json(event, k, v, parent_list + (k,), (k,) + parent_rlist, depth, sep, prefix, suffix)
else:
self.format_add_kv(event, self.fmt, self._n(k), self._n(v), parent_list, parent_rlist, sep, prefix,
suffix)
else:
logger.info(u"json_transformer: skip unsupported message '{0}' of type '{1}' when expanding"
.format(value, type(value)))
def _process_message(self, key, value):
new_event = {}
if isinstance(value, (six.binary_type, six.text_type)):
try:
value = json.loads(value)
except Exception as ex:
logger.info(u"json_transformer: fail to load event into json object: {0}, error: {1}".format(value, ex))
return None
if self.jmes_filter:
try:
value = self.jmes_filter.search(value)
if value is None and self.jmes_ignore_none:
logger.info(u"split_event_transformer: value {0} get null from jmes settings {1}, skip it".
format(value, self.jmes))
return None
except Exception as ex:
logger.info(u"split_event_transformer: value {0} with invalid jmes settings {1}, skip it".
format(value, self.jmes))
return None
if self.output:
new_event[self.output] = self._n(value)
# if need to expand
if self.expand:
self._expand_json(new_event, key, value, (key,), (key,), self.depth, self.sep, self.prefix, self.suffix)
return new_event
def __call__(self, event, inpt):
inpt = self._u(inpt)
# simple dict mode
if isinstance(inpt, (six.binary_type, six.text_type)):
inpt = [inpt]
if isinstance(inpt, Iterable):
for i in inpt:
if not isinstance(i, (six.binary_type, six.text_type)):
logger.error(u'trans_comp_lookup: type of input field "{0}" is unknown'.format(i))
continue
if i not in event:
logger.info(u'trans_comp_lookup: event "{0}" does not contain field "{1}"'.format(event, i))
continue
# get input value
new_event = self._process_message(i, event[i])
if new_event and isinstance(new_event, dict):
event.update(new_event)
else:
logger.info(
u'trans_comp_lookup: event "{0}" does not extract value from field "{1}"'.format(event, i))
else:
logger.error(u"trans_comp_lookup: unknown type of input field {0}".format(inpt))
return event
| 54.146617 | 208 | 0.582032 |
acea8f844ba000204ebf30210ce165e90cc77748 | 2,724 | py | Python | tests/python/shape/test_rot.py | WSID/crank-system | 7c94f829050815392259c227e38b34f97886f7fc | [
"MIT"
] | null | null | null | tests/python/shape/test_rot.py | WSID/crank-system | 7c94f829050815392259c227e38b34f97886f7fc | [
"MIT"
] | null | null | null | tests/python/shape/test_rot.py | WSID/crank-system | 7c94f829050815392259c227e38b34f97886f7fc | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Copyright (C) 2015, WSID
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
import math;
from gi.repository import GObject
from gi.repository import CrankBase
from gi.repository import CrankShape
class TestRot(unittest.TestCase):
def assertFloat (self, a, b, delta=0.0001):
"""A simple custom assert that given values are same.
It takes into delta values into account, so that test can endure little
errors.
"""
try: #if they are both of list type.
if (len(a) != len(b)):
raise AssertionError ("array length: %d != %d" % (len(a), len(b)))
for i in range (0, len(a)):
if ((a[i] < b[i] - delta) or (b[i] + delta < a[i])):
raise AssertionError ("%g != %g (diff=%g)" % (a[i], b[i], b[i]-a[i]))
except TypeError: #then they are numeric type.
if ((a < b - delta) or (b + delta < a)):
raise AssertionError ("%g != %g (diff=%g)" % (a, b, b-a))
def test_vec2_rot (self):
a = CrankBase.VecFloat2.init (0.9950, 0.0998)
b = CrankShape.rot_vec2_rot (a, 0.5)
self.assertFloat (b.x, 0.8253)
self.assertFloat (b.y, 0.5646)
def test_vec2_left (self):
a = CrankBase.VecFloat2.init (0.9950, 0.0998)
b = CrankShape.rot_vec2_left (a)
self.assertFloat (b.x, -0.0998)
self.assertFloat (b.y, 0.9950)
def test_vec2_right (self):
a = CrankBase.VecFloat2.init (0.9950, 0.0998)
b = CrankShape.rot_vec2_right (a)
self.assertFloat (b.x, 0.0998)
self.assertFloat (b.y, -0.9950)
if __name__ == '__main__':
unittest.main ()
| 38.914286 | 93 | 0.649413 |
acea90666a0d1a6d720c7ddf0952c3d3e931aa6e | 808 | py | Python | bayesian_cut/tests/load_data.py | DTUComputeCognitiveSystems/bayesian_cut | 89dc3e5a2f4b9d4ab7bd7aadbc7d0f7314ffc680 | [
"BSD-3-Clause"
] | 7 | 2019-03-01T13:58:59.000Z | 2021-12-16T00:41:26.000Z | bayesian_cut/tests/load_data.py | DTUComputeCognitiveSystems/bayesian_cut | 89dc3e5a2f4b9d4ab7bd7aadbc7d0f7314ffc680 | [
"BSD-3-Clause"
] | 3 | 2019-02-28T17:38:18.000Z | 2019-03-01T13:55:30.000Z | bayesian_cut/tests/load_data.py | DTUComputeCognitiveSystems/bayesian_cut | 89dc3e5a2f4b9d4ab7bd7aadbc7d0f7314ffc680 | [
"BSD-3-Clause"
] | 1 | 2019-02-28T17:55:24.000Z | 2019-02-28T17:55:24.000Z | """
Script to check the availability of test files
"""
#!/usr/bin/env python3
#
# -*- coding: utf-8 -*-
#
# Author: Laurent Vermue <lauve@dtu.dk>
#
#
# License: 3-clause BSD
import os
from ..data.load import file_checker
TEST_FILES = ['test_data.pkl']
GITHUB_TESTDIR = 'https://github.com/DTUComputeCognitiveSystems/bayesian_cut/raw/master/bayesian_cut/tests'
def data_path():
path = os.path.dirname(os.path.abspath(__file__))
return path
def check_test_files():
file_path = data_path()
for file in TEST_FILES:
abs_file_path = os.path.join(file_path, file)
if file_checker(file, abs_file_path, GITHUB_TESTDIR) == 0:
pass
else:
print('File {0} was not available and could not be downloaded'.format(file))
return 1
return 0
| 23.085714 | 107 | 0.669554 |
acea90e57f7fc6a31df3f4fe910af57463bd9866 | 1,847 | py | Python | youtube_notifier/message.py | thequietestoctopus/youtube-notifier | d59c621cbf0c98bc1cb957a22deaf6d6747c3e65 | [
"MIT"
] | null | null | null | youtube_notifier/message.py | thequietestoctopus/youtube-notifier | d59c621cbf0c98bc1cb957a22deaf6d6747c3e65 | [
"MIT"
] | null | null | null | youtube_notifier/message.py | thequietestoctopus/youtube-notifier | d59c621cbf0c98bc1cb957a22deaf6d6747c3e65 | [
"MIT"
] | null | null | null | def markdown(string):
s = string.replace('|', '\\|')
s = s.replace('-', '\\-')
s = s.replace('!', '\\!')
s = s.replace("'", "\\'")
s = s.replace('_', '\\_')
s = s.replace('*', '\\*')
s = s.replace('&', 'and')
s = s.replace('.', '\\.')
s = s.replace('#', '\\#')
s = s.replace('+', '\\+')
s = s.replace('(', '\\(')
s = s.replace('[', '\\[')
s = s.replace('{', '\\{')
# below might not be needed
s = s.replace(')', '\\)')
s = s.replace(']', '\\]')
s = s.replace('}', '\\}')
return s
def channel_header(name):
name = markdown(name)
header = '\n\nRecent uploads by *' + name + '*:'
return header
def channel_entry(videos, message_type):
""" 'videos' being list of dict entries """
def video_line(vid):
title = markdown(vid['title'])
watch_link = 'https://www.youtube.com/watch?v=' + vid['id']
pub = vid['published']
line = '\n+\\* [{}]({}) _posted {}_'.format(title, watch_link, pub)
return line
def grab_sample(vids):
num = len(vids)
s_line = str(num) + ' entries found, including:'
e_1 = video_line(vids[0])
e_3 = video_line(vids[-1])
if num > 2:
mid = num // 2
e_2 = video_line(vids[mid])
s_line += e_1 + e_2 + e_3
return s_line
elif num == 2:
s_line += e_1 + e_3
elif num ==1:
s_line += e_1
return s_line
if message_type:
entry = ''
for video in videos:
l = video_line(video)
entry += l
return entry
else:
entry = '\nNEW CATEGORY \\- '
entry += grab_sample(videos)
return entry
def s_concatenate(list_):
str_out = ''
for i in list_:
str_out += i
return str_out | 26.768116 | 75 | 0.467244 |
acea9113ee858ae88fb70925f4cc99f96353a94d | 110,855 | py | Python | python/pyspark/sql/tests.py | Velumanir/Spark-Example | 4816c2ef5e04eb2dd70bed8b99882aa0b7fe7fd7 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 1 | 2021-03-10T04:07:07.000Z | 2021-03-10T04:07:07.000Z | python/pyspark/sql/tests.py | Velumanir/Spark-Example | 4816c2ef5e04eb2dd70bed8b99882aa0b7fe7fd7 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null | python/pyspark/sql/tests.py | Velumanir/Spark-Example | 4816c2ef5e04eb2dd70bed8b99882aa0b7fe7fd7 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import py4j
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type
from pyspark.tests import ReusedPySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.spark = SparkSession(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode
d = [Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"})]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
def test_wholefile_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
wholeFile=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_wholefile_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", wholeFile=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.spark.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _verify_type
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_verify_type(ExamplePoint(1.0, 2.0), ExamplePointUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], ExamplePointUDT()))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_verify_type(PythonOnlyPoint(1.0, 2.0), PythonOnlyUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], PythonOnlyUDT()))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
aq = df.stat.approxQuantile("a", [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", "b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile(("a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr("a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov("a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab("a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
with self.assertRaises(ValueError):
struct1 = StructType().add("name")
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
with self.assertRaises(KeyError):
not_a_field = struct1["f9"]
with self.assertRaises(IndexError):
not_a_field = struct1[9]
with self.assertRaises(TypeError):
not_a_field = struct1[9.9]
def test_metadata_null(self):
from pyspark.sql.types import StructType, StringType, StructField
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame([(u'Alice', None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with string
row = self.spark.createDataFrame([(None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
class HiveSparkSubmitTests(SparkSubmitTests):
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_contex.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
| 43.117464 | 100 | 0.59825 |
acea915456b8d809aecb5532d99d3f4f27734ee9 | 354 | py | Python | rest/taskrouter/twiml/example1/example/example.5.x.py | azaddeveloper/api-snippets | f88b153cd7186fa70b33733b205886502db0d1f2 | [
"MIT"
] | 2 | 2017-11-23T11:31:20.000Z | 2018-01-22T04:14:02.000Z | rest/taskrouter/twiml/example1/example/example.5.x.py | azaddeveloper/api-snippets | f88b153cd7186fa70b33733b205886502db0d1f2 | [
"MIT"
] | null | null | null | rest/taskrouter/twiml/example1/example/example.5.x.py | azaddeveloper/api-snippets | f88b153cd7186fa70b33733b205886502db0d1f2 | [
"MIT"
] | 2 | 2020-05-22T23:31:21.000Z | 2021-06-10T18:33:45.000Z | # Download the Python helper library from twilio.com/docs/python/install
from flask import Flask
import twilio.twiml
app = Flask(__name__)
@app.route("/enqueue_call", methods=['GET', 'POST'])
def enqueue_call():
resp = twilio.twiml.Response()
with resp.enqueue(None, workflowSid="WW0123456789abcdef0123456789abcdef"):
return str(resp)
| 27.230769 | 78 | 0.740113 |
acea9199dc051eb4d5648fcbd84e45888e441aed | 2,129 | py | Python | python/GafferScene/__init__.py | sebaDesmet/gaffer | 47b2d093c40452bd77947e3b5bd0722a366c8d59 | [
"BSD-3-Clause"
] | 1 | 2019-08-02T16:49:59.000Z | 2019-08-02T16:49:59.000Z | python/GafferScene/__init__.py | rkoschmitzky/gaffer | ec6262ae1292767bdeb9520d1447d65a4a511884 | [
"BSD-3-Clause"
] | null | null | null | python/GafferScene/__init__.py | rkoschmitzky/gaffer | ec6262ae1292767bdeb9520d1447d65a4a511884 | [
"BSD-3-Clause"
] | 1 | 2020-12-21T12:33:49.000Z | 2020-12-21T12:33:49.000Z | ##########################################################################
#
# Copyright (c) 2012-2014, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
__import__( "IECoreScene" )
__import__( "Gaffer" )
__import__( "GafferDispatch" )
__import__( "GafferImage" )
from _GafferScene import *
from ShaderBall import ShaderBall
__import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", subdirectory = "GafferScene" )
| 44.354167 | 89 | 0.693283 |
acea92acb6ff98706fdc32fffc10c9595167e740 | 19,263 | py | Python | eodatasets3/validate.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
] | null | null | null | eodatasets3/validate.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
] | null | null | null | eodatasets3/validate.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
] | null | null | null | """
Validate ODC dataset documents
"""
import collections
import enum
import math
import sys
from datetime import datetime
from pathlib import Path
from typing import List, Counter, Dict, Generator, Optional, Union, Tuple, Sequence
import attr
import click
import numpy as np
import rasterio
from boltons.iterutils import get_path
from click import style, echo, secho
from eodatasets3 import serialise, model
from eodatasets3.model import DatasetDoc
from eodatasets3.ui import PathPath, is_absolute, uri_resolve, bool_style
from eodatasets3.utils import default_utc
from rasterio import DatasetReader
from rasterio.crs import CRS
from rasterio.errors import CRSError
from shapely.validation import explain_validity
class Level(enum.Enum):
info = 1
warning = 2
error = 3
@attr.s(auto_attribs=True, frozen=True)
class ValidationMessage:
level: Level
code: str
reason: str
hint: str = None
def __str__(self) -> str:
hint = ""
if self.hint:
hint = f" (Hint: {self.hint})"
return f"{self.code}: {self.reason}{hint}"
def _info(code: str, reason: str, hint: str = None):
return ValidationMessage(Level.info, code, reason, hint=hint)
def _warning(code: str, reason: str, hint: str = None):
return ValidationMessage(Level.warning, code, reason, hint=hint)
def _error(code: str, reason: str, hint: str = None):
return ValidationMessage(Level.error, code, reason, hint=hint)
ValidationMessages = Generator[ValidationMessage, None, None]
def validate_dataset(
doc: Dict,
# Optionally check that the dataset mathces this product definition.
product_definition: Optional[Dict] = None,
# A thorough validation will try to open the data itself.
thorough: bool = False,
# Dataset location to use, if not the metadata path.
readable_location: Union[str, Path] = None,
) -> ValidationMessages:
"""
Validate a a dataset document, optionally against the given product.
By default this will only look at the metadata, run with thorough=True to
open the data files too.
"""
schema = doc.get("$schema")
if schema is None:
yield _error(
"no_schema",
f"No $schema field. "
f"You probably want an ODC dataset schema {model.ODC_DATASET_SCHEMA_URL!r}",
)
return
if schema != model.ODC_DATASET_SCHEMA_URL:
yield _error(
"unknown_doc_type",
f"Unknown doc schema {schema!r}. Only ODC datasets are supported ({model.ODC_DATASET_SCHEMA_URL!r})",
)
return
has_doc_errors = False
for error in serialise.DATASET_SCHEMA.iter_errors(doc):
has_doc_errors = True
displayable_path = ".".join(error.absolute_path)
hint = None
if displayable_path == "crs" and "not of type" in error.message:
hint = "epsg codes should be prefixed with 'epsg:1234'"
context = f"({displayable_path}) " if displayable_path else ""
yield _error("structure", f"{context}{error.message} ", hint=hint)
if has_doc_errors:
return
dataset = serialise.from_doc(doc, skip_validation=True)
if not dataset.product.href:
_info("product_href", "A url (href) is recommended for products")
yield from _validate_geo(dataset)
# Note that a dataset may have no measurements (eg. telemetry data).
# (TODO: a stricter mode for when we know we should have geo and measurement info)
if dataset.measurements:
for name, measurement in dataset.measurements.items():
grid_name = measurement.grid
if grid_name != "default" or dataset.grids:
if grid_name not in dataset.grids:
yield _error(
"invalid_grid_ref",
f"Measurement {name!r} refers to unknown grid {grid_name!r}",
)
if is_absolute(measurement.path):
yield _warning(
"absolute_path",
f"measurement {name!r} has an absolute path: {measurement.path!r}",
)
yield from _validate_stac_properties(dataset)
required_measurements: Dict[str, ExpectedMeasurement] = {}
if product_definition is not None:
required_measurements.update(
{
m.name: m
for m in map(
ExpectedMeasurement.from_definition,
product_definition.get("measurements") or (),
)
}
)
product_name = product_definition.get("name")
if product_name != dataset.product.name:
# This is only informational as it's possible products may be indexed with finer-grained
# categories than the original datasets: eg. a separate "nrt" product, or test product.
yield _info(
"product_mismatch",
f"Dataset product name {dataset.product.name!r} "
f"does not match the given product ({product_name!r}",
)
for name in required_measurements:
if name not in dataset.measurements.keys():
yield _error(
"missing_measurement",
f"Product {product_name} expects a measurement {name!r})",
)
# If we have a location:
# For each measurement, try to load it.
# If loadable:
if thorough:
for name, measurement in dataset.measurements.items():
full_path = uri_resolve(readable_location, measurement.path)
expected_measurement = required_measurements.get(name)
band = measurement.band or 1
with rasterio.open(full_path) as ds:
ds: DatasetReader
if band not in ds.indexes:
yield _error(
"incorrect_band",
f"Measurement {name!r} file contains no rio index {band!r}.",
hint=f"contains indexes {ds.indexes!r}",
)
continue
if not expected_measurement:
# The measurement is not in the product definition
#
# This is only informational because a product doesn't have to define all
# measurements that the datasets contain.
#
# This is historically because dataset documents reflect the measurements that
# are stored on disk, which can differ. But products define the set of measurments
# that are mandatory in every dataset.
#
# (datasets differ when, for example, sensors go offline, or when there's on-disk
# measurements like panchromatic that GA doesn't want in their product definitions)
if required_measurements:
yield _info(
"unspecified_measurement",
f"Measurement {name} is not in the product",
)
else:
expected_dtype = expected_measurement.dtype
band_dtype = ds.dtypes[band - 1]
# TODO: NaN handling
if expected_dtype != band_dtype:
yield _error(
"different_dtype",
f"{name} dtype: "
f"product {expected_dtype!r} != dataset {band_dtype!r}",
)
# TODO: the nodata can also be a fill value, as mentioned by Kirill.
expected_nodata = expected_measurement.nodata
ds_nodata = ds.nodatavals[band - 1]
if expected_nodata != ds_nodata and not (
_is_nan(expected_nodata) and _is_nan(ds_nodata)
):
yield _info(
"different_nodata",
f"{name} nodata: "
f"product {expected_nodata !r} != dataset {ds_nodata !r}",
)
def validate_product(doc: Dict) -> ValidationMessages:
"""
Check for common product mistakes
# TODO: validate against a schema. ODC core has one and does this already, but we don't currently depend on it.
"""
# We'll focus on the parts ODC doesn't yet do.
measurements = doc.get("measurements")
if measurements is None:
# Products don't have to have measurements. (eg. provenance-only products)
...
elif not isinstance(measurements, Sequence):
yield _error(
"measurements_list",
f"Product measurements should be a list/sequence "
f"(Found a {type(measurements).__name__!r}).",
)
else:
for measurement in measurements:
name = measurement.get("name")
dtype = measurement.get("dtype")
nodata = measurement.get("nodata")
if not numpy_value_fits_dtype(nodata, dtype):
yield _error(
"unsuitable_nodata",
f"Measurement {name!r} nodata {nodata!r} does not fit a {dtype!r}",
)
def numpy_value_fits_dtype(value, dtype):
"""
Can the value be exactly represented by the given numpy dtype?
>>> numpy_value_fits_dtype(3, 'uint8')
True
>>> numpy_value_fits_dtype(3, np.dtype('uint8'))
True
>>> numpy_value_fits_dtype(-3, 'uint8')
False
>>> numpy_value_fits_dtype(3.5, 'float32')
True
>>> numpy_value_fits_dtype(3.5, 'int16')
False
>>> numpy_value_fits_dtype(float('NaN'), 'float32')
True
>>> numpy_value_fits_dtype(float('NaN'), 'int32')
False
"""
dtype = np.dtype(dtype)
if value is None:
value = 0
if _is_nan(value):
return np.issubdtype(dtype, np.floating)
else:
return np.all(np.array([value], dtype=dtype) == [value])
@attr.s(auto_attribs=True)
class ExpectedMeasurement:
name: str
dtype: str
nodata: int
@classmethod
def from_definition(cls, doc: Dict):
return ExpectedMeasurement(doc["name"], doc.get("dtype"), doc.get("nodata"))
def validate_paths(
paths: List[Path], thorough: bool = False
) -> Generator[Tuple[Path, List[ValidationMessage]], None, None]:
"""Validate the list of paths. Product documents can be specified before their datasets."""
products: Dict[str, Dict] = {}
for path in paths:
# Load yaml. If product, add to products.
# Otherwise validate.
doc = serialise.load_yaml(path)
messages = []
if is_product(doc):
messages.extend(validate_product(doc))
products[doc["name"]] = doc
yield path, messages
continue
# TODO: follow ODC's match rules?
product = None
product_name = get_path(doc, ("product", "name"), default=None)
if products:
if len(products) == 1:
[product] = products.values()
elif product_name is not None:
product = products.get(product_name)
if product is None:
messages.append(
_warning(
"unknown_product",
"Cannot match dataset to product",
hint=f"Nothing matches {product_name!r}"
if product_name
else "No product name in dataset (TODO: field matching)",
)
)
else:
messages.append(
ValidationMessage(
Level.error if thorough else Level.info,
"no_product",
"No product provided: validating dataset information alone",
)
)
messages.extend(
validate_dataset(
doc,
product_definition=product,
readable_location=path,
thorough=thorough,
)
)
yield path, messages
def is_product(doc: Dict) -> bool:
"""Is this a product document?"""
return "metadata_type" in doc
def _validate_stac_properties(dataset: DatasetDoc):
for name, value in dataset.properties.items():
if name not in dataset.properties.KNOWN_STAC_PROPERTIES:
yield _warning("unknown_property", f"Unknown stac property {name!r}")
else:
normaliser = dataset.properties.KNOWN_STAC_PROPERTIES.get(name)
if normaliser and value is not None:
try:
normalised_value = normaliser(value)
# Special case for dates, as "no timezone" and "utc timezone" are treated identical.
if isinstance(value, datetime):
value = default_utc(value)
if not isinstance(value, type(normalised_value)):
yield _warning(
"property_type",
f"Value {value} expected to be "
f"{type(normalised_value).__name__!r} (got {type(value).__name__!r})",
)
elif normalised_value != value:
if _is_nan(normalised_value) and _is_nan(value):
# Both are NaNs, ignore.
pass
else:
yield _warning(
"property_formatting",
f"Property {value!r} expected to be {normalised_value!r}",
)
except ValueError as e:
yield _error("invalid_property", e.args[0])
if "odc:producer" in dataset.properties:
producer = dataset.properties["odc:producer"]
# We use domain name to avoid arguing about naming conventions ('ga' vs 'geoscience-australia' vs ...)
if "." not in producer:
yield _warning(
"producer_domain",
"Property 'odc:producer' should be the organisation's domain name. Eg. 'ga.gov.au'",
)
# This field is a little odd, but is expected by the current version of ODC.
# (from discussion with Kirill)
if not dataset.properties.get("odc:file_format"):
yield _warning(
"global_file_format",
"Property 'odc:file_format' is empty",
hint="Usually 'GeoTIFF'",
)
def _is_nan(v):
# Due to JSON serialisation, nan can also be represented as a string 'NaN'
if isinstance(v, str):
return v == "NaN"
return isinstance(v, float) and math.isnan(v)
def _validate_geo(dataset: DatasetDoc):
has_some_geo = _has_some_geo(dataset)
if not has_some_geo:
yield _info("non_geo", "No geo information in dataset")
return
if dataset.geometry is None:
yield _error("incomplete_geo", "Dataset has some geo fields but no geometry")
elif not dataset.geometry.is_valid:
yield _error(
"invalid_geometry",
f"Geometry is not a valid shape: {explain_validity(dataset.geometry)!r}",
)
# TODO: maybe we'll allow no grids: backwards compat with old metadata.
if not dataset.grids:
yield _error("incomplete_grids", "Dataset has some geo fields but no grids")
if not dataset.crs:
yield _error("incomplete_crs", "Dataset has some geo fields but no crs")
else:
# We only officially support epsg code (recommended) or wkt.
if dataset.crs.lower().startswith("epsg:"):
try:
CRS.from_string(dataset.crs)
except CRSError as e:
yield _error("invalid_crs_epsg", e.args[0])
if dataset.crs.lower() != dataset.crs:
yield _warning("mixed_crs_case", "Recommend lowercase 'epsg:' prefix")
else:
wkt_crs = None
try:
wkt_crs = CRS.from_wkt(dataset.crs)
except CRSError as e:
yield _error(
"invalid_crs",
f"Expect either an epsg code or a WKT string: {e.args[0]}",
)
if wkt_crs and wkt_crs.is_epsg_code:
yield _warning(
"non_epsg",
f"Prefer an EPSG code to a WKT when possible. (Can change CRS to 'epsg:{wkt_crs.to_epsg()}')",
)
def _has_some_geo(dataset):
return dataset.geometry is not None or dataset.grids or dataset.crs
@click.command(
help=__doc__
+ """
Paths can be both product and dataset
documents, but each product must come before
its datasets to be matched against it.
"""
)
@click.argument("paths", nargs=-1, type=PathPath(exists=True, readable=True))
@click.option(
"--warnings-as-errors",
"-W",
"strict_warnings",
is_flag=True,
help="Fail if any warnings are produced",
)
@click.option(
"--thorough",
is_flag=True,
help="Attempt to read the data/measurements, and check their properties match",
)
@click.option(
"-q",
"--quiet",
is_flag=True,
default=False,
help="Only print problems, one per line",
)
def run(paths: List[Path], strict_warnings, quiet, thorough: bool):
validation_counts: Counter[Level] = collections.Counter()
invalid_paths = 0
s = {
Level.info: dict(),
Level.warning: dict(fg="yellow"),
Level.error: dict(fg="red"),
}
for path, messages in validate_paths(paths, thorough=thorough):
levels = collections.Counter(m.level for m in messages)
is_invalid = levels[Level.error] > 0
if strict_warnings:
is_invalid |= levels[Level.warning] > 0
if quiet:
# Errors/Warnings only. Remove info-level.
messages = [m for m in messages if m.level != Level.info]
if messages or not quiet:
secho(f"{bool_style(not is_invalid)} {path.stem}")
if not messages:
continue
if is_invalid:
invalid_paths += 1
for message in messages:
validation_counts[message.level] += 1
displayable_code = style(f"{message.code}", **s[message.level], bold=True)
echo(
f"\t{message.level.name[0].upper()} {displayable_code} {message.reason}"
)
if message.hint:
echo(f' ({style("Hint", fg="green")}: {message.hint})')
if not quiet:
result = (
style("failure", fg="red", bold=True)
if invalid_paths > 0
else style("valid", fg="green", bold=True)
)
secho(f"\n{result}: ", nl=False, err=True)
if validation_counts:
echo(
", ".join(
f"{v} {k.name}{'s' if v > 1 else ''}"
for k, v in validation_counts.items()
),
err=True,
)
else:
secho(f"{len(paths)} paths", err=True)
sys.exit(invalid_paths)
| 34.770758 | 115 | 0.568966 |
acea9334ebf603549b3ffac832dc8ad770a98146 | 332 | py | Python | sleekxmpp/version.py | E-Tahta/sleekxmpp | ed067c9412835c5fe44bf203936262bcec09ced4 | [
"BSD-3-Clause"
] | 499 | 2015-01-04T21:45:16.000Z | 2022-02-14T13:04:08.000Z | sleekxmpp/version.py | E-Tahta/sleekxmpp | ed067c9412835c5fe44bf203936262bcec09ced4 | [
"BSD-3-Clause"
] | 159 | 2015-01-02T19:09:47.000Z | 2020-02-12T08:29:54.000Z | sleekxmpp/version.py | E-Tahta/sleekxmpp | ed067c9412835c5fe44bf203936262bcec09ced4 | [
"BSD-3-Clause"
] | 209 | 2015-01-07T16:23:16.000Z | 2022-01-26T13:02:20.000Z | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
# We don't want to have to import the entire library
# just to get the version info for setup.py
__version__ = '1.4.0'
__version_info__ = (1, 4, 0, '', 0)
| 23.714286 | 52 | 0.680723 |
acea940281ea1c5538d03ece94a15277e41b3235 | 18,036 | py | Python | sympy/matrices/tests/test_sparse.py | gxyd/sympy | 34ad4bd03e81f44ce4ed630efcc8c88b9e0b108f | [
"BSD-3-Clause"
] | 1 | 2015-07-04T12:40:41.000Z | 2015-07-04T12:40:41.000Z | sympy/matrices/tests/test_sparse.py | srihegde/sympy | 5d1d0a1602a3ed1ceb6405460e02c413bc2a83f7 | [
"BSD-3-Clause"
] | 1 | 2015-11-01T17:20:32.000Z | 2015-11-01T17:20:32.000Z | sympy/matrices/tests/test_sparse.py | srihegde/sympy | 5d1d0a1602a3ed1ceb6405460e02c413bc2a83f7 | [
"BSD-3-Clause"
] | 1 | 2018-10-22T09:17:11.000Z | 2018-10-22T09:17:11.000Z | from sympy import Abs, S, Symbol, I, Rational, PurePoly
from sympy.matrices import Matrix, SparseMatrix, eye, zeros, ShapeError
from sympy.utilities.pytest import raises
def test_sparse_matrix():
def sparse_eye(n):
return SparseMatrix.eye(n)
def sparse_zeros(n):
return SparseMatrix.zeros(n)
# creation args
raises(TypeError, lambda: SparseMatrix(1, 2))
a = SparseMatrix((
(1, 0),
(0, 1)
))
assert SparseMatrix(a) == a
from sympy.matrices import MutableSparseMatrix, MutableDenseMatrix
a = MutableSparseMatrix([])
b = MutableDenseMatrix([1, 2])
assert a.row_join(b) == b
assert a.col_join(b) == b
assert type(a.row_join(b)) == type(a)
assert type(a.col_join(b)) == type(a)
# test element assignment
a = SparseMatrix((
(1, 0),
(0, 1)
))
a[3] = 4
assert a[1, 1] == 4
a[3] = 1
a[0, 0] = 2
assert a == SparseMatrix((
(2, 0),
(0, 1)
))
a[1, 0] = 5
assert a == SparseMatrix((
(2, 0),
(5, 1)
))
a[1, 1] = 0
assert a == SparseMatrix((
(2, 0),
(5, 0)
))
assert a._smat == {(0, 0): 2, (1, 0): 5}
# test_multiplication
a = SparseMatrix((
(1, 2),
(3, 1),
(0, 6),
))
b = SparseMatrix((
(1, 2),
(3, 0),
))
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
try:
eval('c = a @ b')
except SyntaxError:
pass
else:
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
x = Symbol("x")
c = b * Symbol("x")
assert isinstance(c, SparseMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c = 5 * b
assert isinstance(c, SparseMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
#test_power
A = SparseMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == [6140, 8097, 10796, 14237]
A = SparseMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == [290, 262, 251, 448, 440, 368, 702, 954, 433]
# test_creation
x = Symbol("x")
a = SparseMatrix([[x, 0], [0, 0]])
m = a
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
b = SparseMatrix(2, 2, [x, 0, 0, 0])
m = b
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
assert a == b
S = sparse_eye(3)
S.row_del(1)
assert S == SparseMatrix([
[1, 0, 0],
[0, 0, 1]])
S = sparse_eye(3)
S.col_del(1)
assert S == SparseMatrix([
[1, 0],
[0, 0],
[0, 1]])
S = SparseMatrix.eye(3)
S[2, 1] = 2
S.col_swap(1, 0)
assert S == SparseMatrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
a = SparseMatrix(1, 2, [1, 2])
b = a.copy()
c = a.copy()
assert a[0] == 1
a.row_del(0)
assert a == SparseMatrix(0, 2, [])
b.col_del(1)
assert b == SparseMatrix(1, 1, [1])
# test_determinant
x, y = Symbol('x'), Symbol('y')
assert SparseMatrix(1, 1, [0]).det() == 0
assert SparseMatrix([[1]]).det() == 1
assert SparseMatrix(((-3, 2), (8, -5))).det() == -1
assert SparseMatrix(((x, 1), (y, 2*y))).det() == 2*x*y - y
assert SparseMatrix(( (1, 1, 1),
(1, 2, 3),
(1, 3, 6) )).det() == 1
assert SparseMatrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) )).det() == -289
assert SparseMatrix(( ( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16) )).det() == 0
assert SparseMatrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) )).det() == 275
assert SparseMatrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) )).det() == -55
assert SparseMatrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) )).det() == 11664
assert SparseMatrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) )).det() == 123
# test_slicing
m0 = sparse_eye(4)
assert m0[:3, :3] == sparse_eye(3)
assert m0[2:4, 0:2] == sparse_zeros(2)
m1 = SparseMatrix(3, 3, lambda i, j: i + j)
assert m1[0, :] == SparseMatrix(1, 3, (0, 1, 2))
assert m1[1:3, 1] == SparseMatrix(2, 1, (2, 3))
m2 = SparseMatrix(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
assert m2[:, -1] == SparseMatrix(4, 1, [3, 7, 11, 15])
assert m2[-2:, :] == SparseMatrix([[8, 9, 10, 11], [12, 13, 14, 15]])
assert SparseMatrix([[1, 2], [3, 4]])[[1], [1]] == Matrix([[4]])
# test_submatrix_assignment
m = sparse_zeros(4)
m[2:4, 2:4] = sparse_eye(2)
assert m == SparseMatrix([(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1)])
assert len(m._smat) == 2
m[:2, :2] = sparse_eye(2)
assert m == sparse_eye(4)
m[:, 0] = SparseMatrix(4, 1, (1, 2, 3, 4))
assert m == SparseMatrix([(1, 0, 0, 0),
(2, 1, 0, 0),
(3, 0, 1, 0),
(4, 0, 0, 1)])
m[:, :] = sparse_zeros(4)
assert m == sparse_zeros(4)
m[:, :] = ((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16))
assert m == SparseMatrix((( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
m[:2, 0] = [0, 0]
assert m == SparseMatrix((( 0, 2, 3, 4),
( 0, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
# test_reshape
m0 = sparse_eye(3)
assert m0.reshape(1, 9) == SparseMatrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = SparseMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(4, 3) == \
SparseMatrix([(0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)])
assert m1.reshape(2, 6) == \
SparseMatrix([(0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)])
# test_applyfunc
m0 = sparse_eye(3)
assert m0.applyfunc(lambda x: 2*x) == sparse_eye(3)*2
assert m0.applyfunc(lambda x: 0 ) == sparse_zeros(3)
# test__eval_Abs
assert abs(SparseMatrix(((x, 1), (y, 2*y)))) == SparseMatrix(((Abs(x), 1), (Abs(y), 2*Abs(y))))
# test_LUdecomp
testmat = SparseMatrix([[ 0, 2, 5, 3],
[ 3, 3, 7, 4],
[ 8, 4, 0, 2],
[-2, 6, 3, 4]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permute_rows(p, 'backward') - testmat == sparse_zeros(4)
testmat = SparseMatrix([[ 6, -2, 7, 4],
[ 0, 3, 6, 7],
[ 1, -2, 7, 4],
[-9, 2, 6, 3]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permute_rows(p, 'backward') - testmat == sparse_zeros(4)
x, y, z = Symbol('x'), Symbol('y'), Symbol('z')
M = Matrix(((1, x, 1), (2, y, 0), (y, 0, z)))
L, U, p = M.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permute_rows(p, 'backward') - M == sparse_zeros(3)
# test_LUsolve
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
x = SparseMatrix(3, 1, [3, 7, 5])
b = A*x
soln = A.LUsolve(b)
assert soln == x
A = SparseMatrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
x = SparseMatrix(3, 1, [-1, 2, 5])
b = A*x
soln = A.LUsolve(b)
assert soln == x
# test_inverse
A = sparse_eye(4)
assert A.inv() == sparse_eye(4)
assert A.inv(method="CH") == sparse_eye(4)
assert A.inv(method="LDL") == sparse_eye(4)
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[7, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method="CH") == Ainv
assert A.inv(method="LDL") == Ainv
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[5, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method="CH") == Ainv
assert A.inv(method="LDL") == Ainv
# test_cross
v1 = Matrix(1, 3, [1, 2, 3])
v2 = Matrix(1, 3, [3, 4, 5])
assert v1.cross(v2) == Matrix(1, 3, [-2, 4, -2])
assert v1.norm(2)**2 == 14
# conjugate
a = SparseMatrix(((1, 2 + I), (3, 4)))
assert a.C == SparseMatrix([
[1, 2 - I],
[3, 4]
])
# mul
assert a*Matrix(2, 2, [1, 0, 0, 1]) == a
assert a + Matrix(2, 2, [1, 1, 1, 1]) == SparseMatrix([
[2, 3 + I],
[4, 5]
])
# col join
assert a.col_join(sparse_eye(2)) == SparseMatrix([
[1, 2 + I],
[3, 4],
[1, 0],
[0, 1]
])
# symmetric
assert not a.is_symmetric(simplify=False)
# test_cofactor
assert sparse_eye(3) == sparse_eye(3).cofactor_matrix()
test = SparseMatrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])
assert test.cofactor_matrix() == \
SparseMatrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])
test = SparseMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert test.cofactor_matrix() == \
SparseMatrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])
# test_jacobian
x = Symbol('x')
y = Symbol('y')
L = SparseMatrix(1, 2, [x**2*y, 2*y**2 + x*y])
syms = [x, y]
assert L.jacobian(syms) == Matrix([[2*x*y, x**2], [y, 4*y + x]])
L = SparseMatrix(1, 2, [x, x**2*y**3])
assert L.jacobian(syms) == SparseMatrix([[1, 0], [2*x*y**3, x**2*3*y**2]])
# test_QR
A = Matrix([[1, 2], [2, 3]])
Q, S = A.QRdecomposition()
R = Rational
assert Q == Matrix([
[ 5**R(-1, 2), (R(2)/5)*(R(1)/5)**R(-1, 2)],
[2*5**R(-1, 2), (-R(1)/5)*(R(1)/5)**R(-1, 2)]])
assert S == Matrix([
[5**R(1, 2), 8*5**R(-1, 2)],
[ 0, (R(1)/5)**R(1, 2)]])
assert Q*S == A
assert Q.T * Q == sparse_eye(2)
R = Rational
# test nullspace
# first test reduced row-ech form
M = SparseMatrix([[5, 7, 2, 1],
[1, 6, 2, -1]])
out, tmp = M.rref()
assert out == Matrix([[1, 0, -R(2)/23, R(13)/23],
[0, 1, R(8)/23, R(-6)/23]])
M = SparseMatrix([[ 1, 3, 0, 2, 6, 3, 1],
[-2, -6, 0, -2, -8, 3, 1],
[ 3, 9, 0, 0, 6, 6, 2],
[-1, -3, 0, 1, 0, 9, 3]])
out, tmp = M.rref()
assert out == Matrix([[1, 3, 0, 0, 2, 0, 0],
[0, 0, 0, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 1, R(1)/3],
[0, 0, 0, 0, 0, 0, 0]])
# now check the vectors
basis = M.nullspace()
assert basis[0] == Matrix([-3, 1, 0, 0, 0, 0, 0])
assert basis[1] == Matrix([0, 0, 1, 0, 0, 0, 0])
assert basis[2] == Matrix([-2, 0, 0, -2, 1, 0, 0])
assert basis[3] == Matrix([0, 0, 0, 0, 0, R(-1)/3, 1])
# test eigen
x = Symbol('x')
y = Symbol('y')
sparse_eye3 = sparse_eye(3)
assert sparse_eye3.charpoly(x) == PurePoly(((x - 1)**3))
assert sparse_eye3.charpoly(y) == PurePoly(((y - 1)**3))
# test values
M = Matrix([( 0, 1, -1),
( 1, 1, 0),
(-1, 0, 1)])
vals = M.eigenvals()
assert sorted(vals.keys()) == [-1, 1, 2]
R = Rational
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 3, [
Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])]
M = Matrix([[5, 0, 2],
[3, 2, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 1, [Matrix([R(-1)/2, R(3)/2, 1])]),
(2, 1, [Matrix([0, 1, 0])]),
(5, 1, [Matrix([1, 1, 0])])]
assert M.zeros(3, 5) == SparseMatrix(3, 5, {})
A = SparseMatrix(10, 10, {(0, 0): 18, (0, 9): 12, (1, 4): 18, (2, 7): 16, (3, 9): 12, (4, 2): 19, (5, 7): 16, (6, 2): 12, (9, 7): 18})
assert A.row_list() == [(0, 0, 18), (0, 9, 12), (1, 4, 18), (2, 7, 16), (3, 9, 12), (4, 2, 19), (5, 7, 16), (6, 2, 12), (9, 7, 18)]
assert A.col_list() == [(0, 0, 18), (4, 2, 19), (6, 2, 12), (1, 4, 18), (2, 7, 16), (5, 7, 16), (9, 7, 18), (0, 9, 12), (3, 9, 12)]
assert SparseMatrix.eye(2).nnz() == 2
def test_transpose():
assert SparseMatrix(((1, 2), (3, 4))).transpose() == \
SparseMatrix(((1, 3), (2, 4)))
def test_trace():
assert SparseMatrix(((1, 2), (3, 4))).trace() == 5
assert SparseMatrix(((0, 0), (0, 4))).trace() == 4
def test_CL_RL():
assert SparseMatrix(((1, 2), (3, 4))).row_list() == \
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
assert SparseMatrix(((1, 2), (3, 4))).col_list() == \
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
def test_add():
assert SparseMatrix(((1, 0), (0, 1))) + SparseMatrix(((0, 1), (1, 0))) == \
SparseMatrix(((1, 1), (1, 1)))
a = SparseMatrix(100, 100, lambda i, j: int(j != 0 and i % j == 0))
b = SparseMatrix(100, 100, lambda i, j: int(i != 0 and j % i == 0))
assert (len(a._smat) + len(b._smat) - len((a + b)._smat) > 0)
def test_errors():
raises(ValueError, lambda: SparseMatrix(1.4, 2, lambda i, j: 0))
raises(TypeError, lambda: SparseMatrix([1, 2, 3], [1, 2]))
raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[(1, 2, 3)])
raises(IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[5])
raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2, 3])
raises(TypeError,
lambda: SparseMatrix([[1, 2], [3, 4]]).copyin_list([0, 1], set([])))
raises(
IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2])
raises(TypeError, lambda: SparseMatrix([1, 2, 3]).cross(1))
raises(IndexError, lambda: SparseMatrix(1, 2, [1, 2])[3])
raises(ShapeError,
lambda: SparseMatrix(1, 2, [1, 2]) + SparseMatrix(2, 1, [2, 1]))
def test_len():
assert not SparseMatrix()
assert SparseMatrix() == SparseMatrix([])
assert SparseMatrix() == SparseMatrix([[]])
def test_sparse_zeros_sparse_eye():
assert SparseMatrix.eye(3) == eye(3, cls=SparseMatrix)
assert len(SparseMatrix.eye(3)._smat) == 3
assert SparseMatrix.zeros(3) == zeros(3, cls=SparseMatrix)
assert len(SparseMatrix.zeros(3)._smat) == 0
def test_copyin():
s = SparseMatrix(3, 3, {})
s[1, 0] = 1
assert s[:, 0] == SparseMatrix(Matrix([0, 1, 0]))
assert s[3] == 1
assert s[3: 4] == [1]
s[1, 1] = 42
assert s[1, 1] == 42
assert s[1, 1:] == SparseMatrix([[42, 0]])
s[1, 1:] = Matrix([[5, 6]])
assert s[1, :] == SparseMatrix([[1, 5, 6]])
s[1, 1:] = [[42, 43]]
assert s[1, :] == SparseMatrix([[1, 42, 43]])
s[0, 0] = 17
assert s[:, :1] == SparseMatrix([17, 1, 0])
s[0, 0] = [1, 1, 1]
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = Matrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = SparseMatrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
def test_sparse_solve():
from sympy.matrices import SparseMatrix
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
assert A.cholesky() == Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
assert A.cholesky() * A.cholesky().T == Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
L, D = A.LDLdecomposition()
assert 15*L == Matrix([
[15, 0, 0],
[ 9, 15, 0],
[-3, 5, 15]])
assert D == Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
assert L * D * L.T == A
A = SparseMatrix(((3, 0, 2), (0, 0, 1), (1, 2, 0)))
assert A.inv() * A == SparseMatrix(eye(3))
A = SparseMatrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, 0, 2]])
ans = SparseMatrix([
[S(2)/3, S(1)/3, S(1)/6],
[S(1)/3, S(2)/3, S(1)/3],
[ 0, 0, S(1)/2]])
assert A.inv(method='CH') == ans
assert A.inv(method='LDL') == ans
assert A * ans == SparseMatrix(eye(3))
s = A.solve(A[:, 0], 'LDL')
assert A*s == A[:, 0]
s = A.solve(A[:, 0], 'CH')
assert A*s == A[:, 0]
A = A.col_join(A)
s = A.solve_least_squares(A[:, 0], 'CH')
assert A*s == A[:, 0]
s = A.solve_least_squares(A[:, 0], 'LDL')
assert A*s == A[:, 0]
def test_hermitian():
x = Symbol('x')
a = SparseMatrix([[0, I], [-I, 0]])
assert a.is_hermitian
a = SparseMatrix([[1, I], [-I, 1]])
assert a.is_hermitian
a[0, 0] = 2*I
assert a.is_hermitian is False
a[0, 0] = x
assert a.is_hermitian is None
a[0, 1] = a[1, 0]*I
assert a.is_hermitian is False
| 30.569492 | 139 | 0.428587 |
acea9433e22203d56f4ceb6cd92b681e35876a09 | 15,732 | py | Python | tensorflow/python/debug/wrappers/dumping_wrapper_test.py | harunpehlivan/tensorflow | 376e2cfdab31f4da251ea2e50992a9bf97fd171b | [
"Apache-2.0"
] | 22 | 2018-01-13T14:52:47.000Z | 2018-07-05T01:00:28.000Z | tensorflow/python/debug/wrappers/dumping_wrapper_test.py | hamzabekkouri/tensorflow | d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f | [
"Apache-2.0"
] | 3 | 2018-05-09T11:31:58.000Z | 2021-01-27T12:26:21.000Z | tensorflow/python/debug/wrappers/dumping_wrapper_test.py | hamzabekkouri/tensorflow | d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f | [
"Apache-2.0"
] | 13 | 2018-02-22T21:04:13.000Z | 2020-11-17T11:38:36.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit Tests for classes in dumping_wrapper.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import threading
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import stepper
from tensorflow.python.debug.wrappers import dumping_wrapper
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
class DumpingDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.session_root = tempfile.mkdtemp()
self.v = variables.Variable(10.0, dtype=dtypes.float32, name="v")
self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name="delta")
self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v")
self.ph = array_ops.placeholder(dtypes.float32, shape=(), name="ph")
self.inc_w_ph = state_ops.assign_add(self.v, self.ph, name="inc_w_ph")
self.sess = session.Session()
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self.session_root):
shutil.rmtree(self.session_root)
def _assert_correct_run_subdir_naming(self, run_subdir):
self.assertStartsWith(run_subdir, "run_")
self.assertEqual(2, run_subdir.count("_"))
self.assertGreater(int(run_subdir.split("_")[1]), 0)
def testConstructWrapperWithExistingNonEmptyRootDirRaisesException(self):
dir_path = os.path.join(self.session_root, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "session_root path points to a non-empty directory"):
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=self.session_root, log_usage=False)
def testConstructWrapperWithExistingFileDumpRootRaisesException(self):
file_path = os.path.join(self.session_root, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(gfile.Exists(file_path))
self.assertFalse(gfile.IsDirectory(file_path))
with self.assertRaisesRegexp(ValueError,
"session_root path points to a file"):
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=file_path, log_usage=False)
def testConstructWrapperWithNonexistentSessionRootCreatesDirectory(self):
new_dir_path = os.path.join(tempfile.mkdtemp(), "new_dir")
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=new_dir_path, log_usage=False)
self.assertTrue(gfile.IsDirectory(new_dir_path))
# Cleanup.
gfile.DeleteRecursively(new_dir_path)
def testDumpingOnASingleRunWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingOnASingleRunWorksWithRelativePathForDebugDumpDir(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
cwd = os.getcwd()
try:
os.chdir(self.session_root)
dump = debug_data.DebugDumpDir(
os.path.relpath(dump_dirs[0], self.session_root))
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
finally:
os.chdir(cwd)
def testDumpingOnASingleRunWithFeedDictWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
feed_dict = {self.ph: 3.2}
sess.run(self.inc_w_ph, feed_dict=feed_dict)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_w_ph), dump.run_fetches_info)
self.assertEqual(repr(feed_dict.keys()), dump.run_feed_keys_info)
def testDumpingOnMultipleRunsWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
for _ in range(3):
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(3, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testUsingNonCallableAsWatchFnRaisesTypeError(self):
bad_watch_fn = "bad_watch_fn"
with self.assertRaisesRegexp(TypeError, "watch_fn is not callable"):
dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=bad_watch_fn,
log_usage=False)
def testDumpingWithLegacyWatchFnOnFetchesWorks(self):
"""Use a watch_fn that returns different whitelists for different runs."""
def watch_fn(fetches, feeds):
del feeds
# A watch_fn that picks fetch name.
if fetches.name == "inc_v:0":
# If inc_v, watch everything.
return "DebugIdentity", r".*", r".*"
else:
# If dec_v, watch nothing.
return "DebugIdentity", r"$^", r"$^"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
for _ in range(3):
sess.run(self.inc_v)
sess.run(self.dec_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(6, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertGreater(dump.size, 0)
self.assertAllClose([10.0 - 0.4 * (i / 2)],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.dec_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingWithLegacyWatchFnWithNonDefaultDebugOpsWorks(self):
"""Use a watch_fn that specifies non-default debug ops."""
def watch_fn(fetches, feeds):
del fetches, feeds
return ["DebugIdentity", "DebugNumericSummary"], r".*", r".*"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(14,
len(dump.get_tensors("v", 0, "DebugNumericSummary")[0]))
def testDumpingWithWatchFnWithNonDefaultDebugOpsWorks(self):
"""Use a watch_fn that specifies non-default debug ops."""
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity", "DebugNumericSummary"],
node_name_regex_whitelist=r"^v.*",
op_type_regex_whitelist=r".*",
tensor_dtype_regex_whitelist=".*_ref")
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(14,
len(dump.get_tensors("v", 0, "DebugNumericSummary")[0]))
dumped_nodes = [dump.node_name for dump in dump.dumped_tensor_data]
self.assertNotIn("inc_v", dumped_nodes)
self.assertNotIn("delta", dumped_nodes)
def testDumpingDebugHookWithoutWatchFnWorks(self):
dumping_hook = hooks.DumpingDebugHook(self.session_root, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingDebugHookWithStatefulWatchFnWorks(self):
watch_fn_state = {"run_counter": 0}
def counting_watch_fn(fetches, feed_dict):
del fetches, feed_dict
watch_fn_state["run_counter"] += 1
if watch_fn_state["run_counter"] % 2 == 1:
# If odd-index run (1-based), watch every ref-type tensor.
return framework.WatchOptions(
debug_ops="DebugIdentity",
tensor_dtype_regex_whitelist=".*_ref")
else:
# If even-index run, watch nothing.
return framework.WatchOptions(
debug_ops="DebugIdentity",
node_name_regex_whitelist=r"^$",
op_type_regex_whitelist=r"^$")
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=counting_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
for _ in range(4):
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(4, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertNotIn("delta",
[datum.node_name for datum in dump.dumped_tensor_data])
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingDebugHookWithStatefulLegacyWatchFnWorks(self):
watch_fn_state = {"run_counter": 0}
def counting_watch_fn(fetches, feed_dict):
del fetches, feed_dict
watch_fn_state["run_counter"] += 1
if watch_fn_state["run_counter"] % 2 == 1:
# If odd-index run (1-based), watch everything.
return "DebugIdentity", r".*", r".*"
else:
# If even-index run, watch nothing.
return "DebugIdentity", r"$^", r"$^"
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=counting_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
for _ in range(4):
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(4, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingFromMultipleThreadsObeysThreadNameFilter(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False,
thread_name_filter=r"MainThread$")
self.assertAllClose(1.0, sess.run(self.delta))
child_thread_result = []
def child_thread_job():
child_thread_result.append(sess.run(self.eta))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
thread.join()
self.assertAllClose([-1.4], child_thread_result)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertEqual(1, dump.size)
self.assertEqual("delta", dump.dumped_tensor_data[0].node_name)
def testCallingInvokeNodeStepperOnDumpingWrapperRaisesException(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
node_stepper = stepper.NodeStepper(self.sess, self.inc_v)
with self.assertRaisesRegexp(
NotImplementedError,
r"NonInteractiveDebugWrapperSession does not support node-stepper "
r"mode\."):
sess.invoke_node_stepper(node_stepper)
if __name__ == "__main__":
googletest.main()
| 39.827848 | 80 | 0.70417 |
acea94cf8f248ad77577d1bea35a626d0c309b30 | 973 | py | Python | openstack_dashboard/dashboards/admin/routers/urls.py | dreamhost/horizon | 55569d540e6c1a6957d5127f9bae6a699ed60823 | [
"Apache-2.0"
] | 1 | 2015-06-23T08:23:12.000Z | 2015-06-23T08:23:12.000Z | openstack_dashboard/dashboards/admin/routers/urls.py | dreamhost/horizon | 55569d540e6c1a6957d5127f9bae6a699ed60823 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/admin/routers/urls.py | dreamhost/horizon | 55569d540e6c1a6957d5127f9bae6a699ed60823 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns, url
from .views import (IndexView, DetailView)
urlpatterns = patterns('horizon.dashboards.admin.routers.views',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^(?P<router_id>[^/]+)/$',
DetailView.as_view(),
name='detail'),
)
| 34.75 | 78 | 0.699897 |
acea9502aaf0f4d4a9094e5d33a9f4b5a6ed8579 | 743 | py | Python | jobs/async.py | samarpan-rai/skedulord | 21de49785b71419aa433fb6e50bf04e362a83c19 | [
"MIT"
] | null | null | null | jobs/async.py | samarpan-rai/skedulord | 21de49785b71419aa433fb6e50bf04e362a83c19 | [
"MIT"
] | null | null | null | jobs/async.py | samarpan-rai/skedulord | 21de49785b71419aa433fb6e50bf04e362a83c19 | [
"MIT"
] | null | null | null | import time
import logging
import asyncio
from skedulord.job import JobRunner
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(message)s',
)
logger = logging.getLogger(__name__)
async def timed_wait(sec):
await asyncio.sleep(sec)
logging.info(f"slept for {sec}s")
return sec
def do_many(n):
async def collector():
await asyncio.gather(*[timed_wait(0.1) for _ in range(n)])
stuff = asyncio.run(collector())
logging.info(f"scores received {stuff}")
def do_stuff(sec=1):
time.sleep(sec)
logging.info(f"slept for {sec}s")
if __name__ == "__main__":
(JobRunner()
.run(do_stuff, sec=0.1)
.run(do_many, n=10)
.run(do_stuff, sec=0.1)
.run())
| 19.552632 | 66 | 0.650067 |
acea9504e46f609bd485db76601c54d4d78043d8 | 6,263 | py | Python | examples/05_glm_second_level/plot_second_level_one_sample_test.py | ctw/nilearn | 932eee9c69cd8fbf40ee6af5cee77f8f93b25da3 | [
"BSD-2-Clause"
] | null | null | null | examples/05_glm_second_level/plot_second_level_one_sample_test.py | ctw/nilearn | 932eee9c69cd8fbf40ee6af5cee77f8f93b25da3 | [
"BSD-2-Clause"
] | null | null | null | examples/05_glm_second_level/plot_second_level_one_sample_test.py | ctw/nilearn | 932eee9c69cd8fbf40ee6af5cee77f8f93b25da3 | [
"BSD-2-Clause"
] | null | null | null | """
Second-level fMRI model: one sample test
========================================
Full step-by-step example of fitting a :term:`GLM` to perform a second-level analysis
(one-sample test) and visualizing the results.
More specifically:
1. A sequence of subject :term:`fMRI` button press contrasts is downloaded.
2. A mask of the useful brain volume is computed.
3. A one-sample t-test is applied to the brain maps.
We focus on a given contrast of the localizer dataset: the motor response to
left versus right button press. Both at the individual and group level, this is
expected to elicit activity in the motor cortex (positive in the right
hemisphere, negative in the left hemisphere).
"""
#########################################################################
# Fetch dataset
# --------------
# We download a list of left vs right button press :term:`contrasts<contrast>`
# from a localizer dataset. Note that we fetch individual t-maps that represent
# the :term:`Bold<BOLD>` activity estimate divided by the uncertainty about this
# estimate.
from nilearn.datasets import fetch_localizer_contrasts
n_subjects = 16
data = fetch_localizer_contrasts(
["left vs right button press"], n_subjects,
get_tmaps=True, legacy_format=False
)
###########################################################################
# Display subject t_maps
# ----------------------
# We plot a grid with all the subjects t-maps thresholded at t = 2 for simple
# visualization purposes. The button press effect is visible among all
# subjects.
from nilearn import plotting
import matplotlib.pyplot as plt
subjects = [subject_data[0] for subject_data in data['ext_vars']]
fig, axes = plt.subplots(nrows=4, ncols=4)
for cidx, tmap in enumerate(data['tmaps']):
plotting.plot_glass_brain(tmap, colorbar=False, threshold=2.0,
title=subjects[cidx],
axes=axes[int(cidx / 4), int(cidx % 4)],
plot_abs=False, display_mode='z')
fig.suptitle('subjects t_map left-right button press')
plt.show()
############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
import pandas as pd
second_level_input = data['cmaps']
design_matrix = pd.DataFrame([1] * len(second_level_input),
columns=['intercept'])
############################################################################
# Model specification and fit.
from nilearn.glm.second_level import SecondLevelModel
second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
second_level_model = second_level_model.fit(second_level_input,
design_matrix=design_matrix)
##########################################################################
# To estimate the :term:`contrast` is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast(output_type='z_score')
###########################################################################
# We threshold the second level contrast at uncorrected p < 0.001 and plot it.
from scipy.stats import norm
p_val = 0.001
p001_unc = norm.isf(p_val)
display = plotting.plot_glass_brain(
z_map, threshold=p001_unc, colorbar=True, display_mode='z', plot_abs=False,
title='group left-right button press (unc p<0.001)')
plotting.show()
###########################################################################
# As expected, we find the motor cortex.
##########################################################################
# Next, we compute the (corrected) p-values with a parametric test to compare
# them with the results from a nonparametric test.
import numpy as np
from nilearn.image import get_data, math_img
p_val = second_level_model.compute_contrast(output_type='p_value')
n_voxels = np.sum(get_data(second_level_model.masker_.mask_img_))
# Correcting the p-values for multiple testing and taking negative logarithm
neg_log_pval = math_img("-np.log10(np.minimum(1, img * {}))"
.format(str(n_voxels)),
img=p_val)
###########################################################################
# Let us plot the (corrected) negative log p-values for the parametric test.
cut_coords = [0]
# Since we are plotting negative log p-values and using a threshold equal to 1,
# it corresponds to corrected p-values lower than 10%, meaning that there is
# less than 10% probability to make a single false discovery (90% chance that
# we make no false discovery at all). This threshold is much more conservative
# than the previous one.
threshold = 1
title = ('Group left-right button press: \n'
'parametric test (FWER < 10%)')
display = plotting.plot_glass_brain(
neg_log_pval, colorbar=True, display_mode='z', plot_abs=False, vmax=3,
cut_coords=cut_coords, threshold=threshold, title=title)
plotting.show()
###########################################################################
# Now, we compute the (corrected) p-values with a permutation test.
from nilearn.glm.second_level import non_parametric_inference
neg_log_pvals_permuted_ols_unmasked = \
non_parametric_inference(second_level_input,
design_matrix=design_matrix,
model_intercept=True, n_perm=1000,
two_sided_test=False,
smoothing_fwhm=8.0, n_jobs=1)
###########################################################################
# Let us plot the (corrected) negative log p-values for the nonparametric test.
title = ('Group left-right button press: \n'
'permutation test (FWER < 10%)')
display = plotting.plot_glass_brain(
neg_log_pvals_permuted_ols_unmasked, colorbar=True, vmax=3,
display_mode='z', plot_abs=False, cut_coords=cut_coords,
threshold=threshold, title=title)
plotting.show()
# The neg-log p-values obtained with nonparametric testing are capped at 3
# since the number of permutations is 1e3.
# The nonparametric test yields many more discoveries and is more powerful than
# the usual parametric procedure.
| 44.41844 | 85 | 0.615839 |
acea96303c5fded9aeca8c50411a1399fbd8d2fa | 1,322 | py | Python | ariia/weather_module.py | Pandhariix/ARIIA | 88f8a76bcf5d75ff929d2db869781ce2ff5a1945 | [
"MIT"
] | null | null | null | ariia/weather_module.py | Pandhariix/ARIIA | 88f8a76bcf5d75ff929d2db869781ce2ff5a1945 | [
"MIT"
] | null | null | null | ariia/weather_module.py | Pandhariix/ARIIA | 88f8a76bcf5d75ff929d2db869781ce2ff5a1945 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# !/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Maxime Busy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class WeatherModule:
"""
Class monitoring the weather, and retreiving the weather data thanks to
pyowm
"""
def __init__(self):
"""
Constructor
"""
pass
| 34.789474 | 80 | 0.754917 |
acea96a3c57ec93eb7b96219b1e658dc145abf42 | 175 | py | Python | optimism/inverse/DesignOpt.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | null | null | null | optimism/inverse/DesignOpt.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | 1 | 2022-03-12T00:01:12.000Z | 2022-03-12T00:01:12.000Z | optimism/inverse/DesignOpt.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | 3 | 2021-12-23T19:53:31.000Z | 2022-03-27T23:12:03.000Z |
class DesignOpt:
def __init__(mesh, dofManager, quadRule):
self.mesh = mesh
self.dofManager = dofManager
self.quadRule = quadRule
| 15.909091 | 45 | 0.588571 |
acea976c672d2a3ef06fee40e5611e2e933c7215 | 4,696 | py | Python | tests/base.py | roskakori/nikola | d939b8864af4e55bde42eeeae93612555fc82eae | [
"MIT"
] | null | null | null | tests/base.py | roskakori/nikola | d939b8864af4e55bde42eeeae93612555fc82eae | [
"MIT"
] | null | null | null | tests/base.py | roskakori/nikola | d939b8864af4e55bde42eeeae93612555fc82eae | [
"MIT"
] | null | null | null | # coding: utf8
# Author: Rodrigo Bistolfi
# Date: 03/2013
""" Base class for Nikola test cases """
__all__ = ["BaseTestCase", "cd", "initialize_localeborg", "LOCALE_DEFAULT", "LOCALE_OTHER"]
import os
from contextlib import contextmanager
import unittest
import logbook
import nikola.utils
import nikola.shortcodes
from yapsy.PluginManager import PluginManager
from nikola.plugin_categories import (
Command,
Task,
LateTask,
TemplateSystem,
PageCompiler,
TaskMultiplier,
CompilerExtension,
MarkdownExtension,
RestExtension
)
nikola.utils.LOGGER.handlers.append(logbook.TestHandler())
BaseTestCase = unittest.TestCase
@contextmanager
def cd(path):
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
LOCALE_DEFAULT = os.environ.get('NIKOLA_LOCALE_DEFAULT', 'en')
LOCALE_OTHER = os.environ.get('NIKOLA_LOCALE_OTHER', 'pl')
def initialize_localeborg():
nikola.utils.LocaleBorg.reset()
nikola.utils.LocaleBorg.initialize({}, LOCALE_DEFAULT)
class FakePost(object):
def __init__(self, title, slug):
self._title = title
self._slug = slug
self._meta = {'slug': slug}
self.default_lang = 'en'
self._depfile = {}
def title(self):
return self._title
def meta(self, key):
return self._meta[key]
def permalink(self):
return '/posts/' + self._slug
class FakeSite(object):
def __init__(self):
self.template_system = self
self.invariant = False
self.debug = True
self.config = {
'DISABLED_PLUGINS': [],
'EXTRA_PLUGINS': [],
'DEFAULT_LANG': 'en',
'MARKDOWN_EXTENSIONS': ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite'],
'TRANSLATIONS_PATTERN': '{path}.{lang}.{ext}',
'LISTINGS_FOLDERS': {'listings': 'listings'},
'TRANSLATIONS': {'en': ''},
}
self.EXTRA_PLUGINS = self.config['EXTRA_PLUGINS']
self.plugin_manager = PluginManager(categories_filter={
"Command": Command,
"Task": Task,
"LateTask": LateTask,
"TemplateSystem": TemplateSystem,
"PageCompiler": PageCompiler,
"TaskMultiplier": TaskMultiplier,
"CompilerExtension": CompilerExtension,
"MarkdownExtension": MarkdownExtension,
"RestExtension": RestExtension
})
self.shortcode_registry = {}
self.plugin_manager.setPluginInfoExtension('plugin')
places = [os.path.join(os.path.dirname(nikola.utils.__file__), 'plugins')]
self.plugin_manager.setPluginPlaces(places)
self.plugin_manager.collectPlugins()
self.compiler_extensions = self._activate_plugins_of_category(
"CompilerExtension")
self.timeline = [
FakePost(title='Fake post',
slug='fake-post')
]
self.debug = True
self.rst_transforms = []
self.post_per_input_file = {}
# This is to make plugin initialization happy
self.template_system = self
self.name = 'mako'
def _activate_plugins_of_category(self, category):
"""Activate all the plugins of a given category and return them."""
# this code duplicated in nikola/nikola.py
plugins = []
for plugin_info in self.plugin_manager.getPluginsOfCategory(category):
if plugin_info.name in self.config.get('DISABLED_PLUGINS'):
self.plugin_manager.removePluginFromCategory(
plugin_info, category)
else:
self.plugin_manager.activatePluginByName(plugin_info.name)
plugin_info.plugin_object.set_site(self)
plugins.append(plugin_info)
return plugins
def render_template(self, name, _, context):
return('<img src="IMG.jpg">')
# this code duplicated in nikola/nikola.py
def register_shortcode(self, name, f):
"""Register function f to handle shortcode "name"."""
if name in self.shortcode_registry:
nikola.utils.LOGGER.warn('Shortcode name conflict: %s', name)
return
self.shortcode_registry[name] = f
def apply_shortcodes(self, data, *a, **kw):
"""Apply shortcodes from the registry on data."""
return nikola.shortcodes.apply_shortcodes(
data, self.shortcode_registry, **kw)
def apply_shortcodes_uuid(self, data, shortcodes, *a, **kw):
"""Apply shortcodes from the registry on data."""
return nikola.shortcodes.apply_shortcodes(
data, self.shortcode_registry, **kw)
| 30.493506 | 105 | 0.635221 |
acea9792c4cece473a0355aa2b5b4ce1f9d2bed6 | 22,087 | py | Python | sppas/sppas/src/annotations/Align/aligners/alignerio.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | sppas/sppas/src/annotations/Align/aligners/alignerio.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | sppas/sppas/src/annotations/Align/aligners/alignerio.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | """
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
annotations.Align.aligners.alignerio.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import codecs
import logging
from sppas.src.config import sg
from sppas.src.config import separators
from sppas.src.utils.makeunicode import sppasUnicode
# ---------------------------------------------------------------------------
class BaseAlignersReader(object):
"""Base class for readers/writers of time-aligned files.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
"""
def __init__(self):
self.extension = ""
# -----------------------------------------------------------------------
@staticmethod
def read(filename):
raise NotImplementedError
# -----------------------------------------------------------------------
@staticmethod
def get_lines(filename):
"""Return the lines of a file."""
with codecs.open(filename, 'r', sg.__encoding__) as fp:
lines = fp.readlines()
fp.close()
return lines
# -----------------------------------------------------------------------
@staticmethod
def get_units_julius(lines):
"""Return the units of a palign/walign file (in frames).
:param lines: (List of str)
:returns: List of tuples (start, end)
"""
units = list()
i = 0
while "=== begin forced alignment ===" not in lines[i]:
i += 1
if i > len(lines):
raise IOError('Time units not found')
while "=== end forced alignment ===" not in lines[i]:
i += 1
if i > len(lines):
raise IOError('Time units not found in alignment result')
if lines[i].startswith('['):
# New phonemes
line = lines[i].replace("[", "")
line = line.replace("]", "")
line = sppasUnicode(line).to_strip()
tab = line.split()
# tab 0: first frame
# tab 1: last frame
# tab 2: score of the segmentation (log proba)
# tab 3: triphone used
units.append((int(tab[0]), int(tab[1])))
return units
# -----------------------------------------------------------------------
@staticmethod
def get_phonemes_julius(lines):
"""Return the pronunciation of all words.
:param lines: (List of str)
:returns: List of tuples (ph1 ph2...phN)
"""
phonemes = list()
i = 0
while lines[i].startswith('phseq1') is False:
i += 1
if i == len(lines):
raise IOError('Phonemes sequence not found.')
line = lines[i]
line = line[7:].strip()
if len(line) == 0:
raise IOError('Empty phonemes sequence.')
words = line.split('|')
for phn in words:
phn = phn.strip()
phonemes.append(tuple(phn.split()))
return phonemes
# -----------------------------------------------------------------------
@staticmethod
def get_words_julius(lines):
"""Return all words.
:param lines: (List of str)
:returns: List
"""
i = 0
while lines[i].startswith('sentence1') is False:
i += 1
if i == len(lines):
raise IOError('Words not found in alignment result')
line = lines[i]
line = line[10:]
line = line.strip()
return line.split()
# -----------------------------------------------------------------------
@staticmethod
def get_word_scores_julius(lines):
"""Return all scores of words.
:param lines: (List of str)
:returns: List
"""
i = 0
while lines[i].startswith('cmscore1') is False:
i += 1
if i == len(lines):
raise IOError('Scores not found in alignment result')
line = lines[i]
line = line[9:]
line = line.strip()
return line.split()
# -----------------------------------------------------------------------
@staticmethod
def units_to_time(units, samplerate):
"""Return the conversion of units.
Convert units (in frames) into time values (in seconds).
:param samplerate: (int) Sample rate to be applied to the units.
:returns: List of tuples (start, end)
NOTE: DANS LES VERSIONS PREC. ON DECALAIT TOUT DE 10ms A DROITE.
"""
samplerate = float(samplerate)
u = list()
i = 0
while i < len(units):
# Fix the begin of this annotation
s = round(float(units[i][0]) / samplerate, 3)
if i+1 < len(units):
# Fix the end of this annotation to the begin of the next one
e = round(float(units[i+1][0]) / samplerate, 3)
else:
e = round(float(units[i][1]) / samplerate, 3)
u.append((s, e))
i += 1
return u
# -----------------------------------------------------------------------
@staticmethod
def shift_time_units(units, delta):
"""Return the units shifted of a delta time.
The first start time and the last end time are not shifted.
:param units: (list of tuples) Time units
:param delta: (float) Delta time value in range [-0.02;0.02]
"""
if delta > 0.02:
delta = 0.02
if delta < -0.02:
delta = -0.02
shifted = list()
i = 0
while i < len(units):
start, end = units[i]
if i > 0:
start += delta
if i + 1 < len(units):
end += delta
shifted.append((round(start, 3), round(end, 3)))
i += 1
return shifted
# -----------------------------------------------------------------------
@staticmethod
def make_result(units, words, phonemes, scores):
"""Make a unique data structure from the given data.
:param units: (List of tuples)
:param words: (List of str)
:param phonemes: (List of tuples)
:param scores: (List of str, or None)
:returns: Two data structures
1. List of (start_time end_time phoneme None)
2. List of (start_time end_time word score)
"""
if scores is None:
scores = [None]*len(words)
aligned_words = list()
aligned_phones = list()
i = 0
for wd, phn_seq, sc in zip(words, phonemes, scores):
start_wd = units[i][0]
for phn in phn_seq:
if i == len(units):
raise IOError('Phonemes/Units are not matching '
'in alignment result')
start_phn, end_phn = units[i]
aligned_phones.append((start_phn, end_phn, phn, None))
i += 1
end_wd = units[i - 1][1]
aligned_words.append((start_wd, end_wd, wd, sc))
return aligned_phones, aligned_words
# ---------------------------------------------------------------------------
class palign(BaseAlignersReader):
"""palign reader/writer of time-aligned files (Julius CSR Engine).
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
"""
def __init__(self):
"""Create a palign instance to read palign files of Julius."""
super(palign, self).__init__()
self.extension = "palign"
# -----------------------------------------------------------------------
@staticmethod
def read(filename):
"""Read an alignment file in the format of Julius CSR engine.
:param filename: (str) The input file name.
:returns: 3 lists of tuples
1. List of (start-time end-time phoneme None)
2. List of (start-time end-time word None)
3. List of (start-time end-time pron_word score)
"""
b = BaseAlignersReader()
lines = b.get_lines(filename)
try:
phonemes = b.get_phonemes_julius(lines)
except IOError:
logging.error('Got no time-aligned phonemes in file {:s}:'
''.format(filename))
raise
words = b.get_words_julius(lines)
pron_words = [separators.phonemes.join(phn) for phn in phonemes]
scores = b.get_word_scores_julius(lines)
if len(words) != len(phonemes):
logging.error('Words/Phonemes are not matching in file: {:s}'
''.format(filename))
logging.error(' - words: {}'.format(words))
logging.error(' - phonemes: {}'.format(phonemes))
raise IOError("Words/Phonemes are not matching "
"in alignment result of file {:s}".format(filename))
if len(words) != len(scores):
logging.error('Words/Scores are not matching in file: {:s}'
''.format(filename))
logging.error(' - words: {}'.format(words))
logging.error(' - scores: {}'.format(scores))
raise IOError("Words/Scores are not matching in alignment result "
"of file {:s}".format(filename))
units = b.get_units_julius(lines)
units = b.units_to_time(units, 100)
units = b.shift_time_units(units, 0.01)
data_phon, data_words = b.make_result(units, words, phonemes, None)
d, data_pron = b.make_result(units, pron_words, phonemes, scores)
return data_phon, data_words, data_pron
# -----------------------------------------------------------------------
@staticmethod
def write(phoneslist, tokenslist, alignments, outputfilename):
"""Write an alignment output file.
:param phoneslist: (list) The phonetization of each token
:param tokenslist: (list) Each token
:param alignments: (list) Tuples (start-time end-time phoneme)
:param outputfilename: (str) Output file name (a Julius-like output).
"""
with codecs.open(outputfilename, 'w', sg.__encoding__) as fp:
fp.write("----------------------- System Information begin "
"---------------------\n")
fp.write("\n")
fp.write(" Basic Alignment\n")
fp.write("\n")
fp.write("----------------------- System Information end "
"-----------------------\n")
fp.write("\n### Recognition: 1st pass\n")
fp.write("pass1_best: ")
fp.write("{:s}\n".format(" ".join(tokenslist)))
fp.write("pass1_best_wordseq: ")
fp.write("{:s}\n".format(" ".join(tokenslist)))
fp.write("pass1_best_phonemeseq: ")
fp.write("{:s}\n".format(" | ".join(phoneslist)))
fp.write("\n### Recognition: 2nd pass\n")
fp.write("ALIGN: === phoneme alignment begin ===\n")
fp.write("sentence1: ")
fp.write("{:s}\n".format(" ".join(tokenslist)))
fp.write("wseq1: ")
fp.write("{:s}\n".format(" ".join(tokenslist)))
fp.write("phseq1: ")
fp.write("{:s}\n".format(" | ".join(phoneslist)))
fp.write("cmscore1: ")
fp.write("{:s}\n".format("0.000 "*len(phoneslist)))
fp.write("=== begin forced alignment ===\n")
fp.write("-- phoneme alignment --\n")
fp.write(" id: from to n_score unit\n")
fp.write(" ----------------------------------------\n")
for tv1, tv2, phon in alignments:
fp.write("[ {:d} ".format(tv1))
fp.write(" {:d}]".format(tv2))
fp.write(" -30.000000 " + str(phon) + "\n")
fp.write("=== end forced alignment ===\n")
fp.close()
# ---------------------------------------------------------------------------
class walign(BaseAlignersReader):
"""walign reader of time-aligned files (Julius CSR Engine).
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
"""
def __init__(self):
"""Create a walign instance to read walign files of Julius."""
super(walign, self).__init__()
self.extension = "walign"
# -----------------------------------------------------------------------
@staticmethod
def read(filename):
"""Read an alignment file in the format of Julius CSR engine.
:param filename: (str) The input file name.
:returns: A list of tuples (start-time end-time word score)
"""
b = BaseAlignersReader()
lines = b.get_lines(filename)
words = b.get_words_julius(lines)
scores = b.get_word_scores_julius(lines)
if len(words) != len(scores):
logging.error('Got words: {}'.format(words))
logging.error('Got scores: {}'.format(scores))
raise IOError("Words/Scores are not matching in alignment result")
units = b.get_units_julius(lines)
units = b.units_to_time(units, 100)
units = b.shift_time_units(units, 0.01)
aligned_words = list()
i = 0
for wd, sc in zip(words, scores):
if i == len(units):
logging.error('Got words: {}'.format(words))
logging.error('Got units: {}'.format(units))
raise IOError('Phonemes/Units are not matching '
'in alignment result')
start_wd = units[i][0]
end_wd = units[i][1]
aligned_words.append((start_wd, end_wd, wd, sc))
i += 1
return aligned_words
# ---------------------------------------------------------------------------
class mlf(BaseAlignersReader):
"""mlf reader of time-aligned files (HTK Toolkit).
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
When the -m option is used, the transcriptions output by HVITE would by
default contain both the model level and word level transcriptions .
For example, a typical fragment of the output might be:
7500000 8700000 f -1081.604736 FOUR 30.000000
8700000 9800000 ao -903.821350
9800000 10400000 r -665.931641
10400000 10400000 sp -0.103585
10400000 11700000 s -1266.470093 SEVEN 22.860001
11700000 12500000 eh -765.568237
12500000 13000000 v -476.323334
13000000 14400000 n -1285.369629
14400000 14400000 sp -0.103585
"""
def __init__(self):
"""Create a mlf instance to parse mlf files from HVite."""
super(mlf, self).__init__()
self.extension = "mlf"
# -----------------------------------------------------------------------
@staticmethod
def is_integer(s):
"""Check whether a string is an integer or not.
:param s: (str or unicode)
:returns: (bool)
"""
try:
int(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# -----------------------------------------------------------------------
@staticmethod
def get_units(lines):
"""Return the units of a mlf file (in nano-seconds).
:param lines: (List of str)
:returns: List of tuples (start, end)
"""
units = list()
for line in lines:
columns = line.split()
if len(columns) > 3:
if mlf.is_integer(columns[0]) and mlf.is_integer(columns[1]):
units.append((int(columns[0]), int(columns[1])))
return units
# -----------------------------------------------------------------------
@staticmethod
def get_phonemes(lines):
"""Return the pronunciation of all words.
:param lines: (List of str)
:returns: List of tuples (ph1 ph2...phN)
"""
phonemes = list()
phon_seq = list()
for line in lines:
columns = line.split()
if len(columns) > 3:
if mlf.is_integer(columns[0]) and mlf.is_integer(columns[1]):
phon = columns[2]
if len(columns) >= 5:
if len(phon_seq) > 0:
phonemes.append(tuple(phon_seq))
phon_seq = list()
phon_seq.append(phon)
if len(phon_seq) > 0:
phonemes.append(tuple(phon_seq))
return phonemes
# -----------------------------------------------------------------------
@staticmethod
def get_words(lines):
"""Return all words.
:param lines: (List of str)
:returns: List
"""
words = list()
for line in lines:
columns = line.split()
if len(columns) > 3:
if mlf.is_integer(columns[0]) and mlf.is_integer(columns[1]):
if len(columns) >= 5:
words.append(columns[4])
return words
# -----------------------------------------------------------------------
@staticmethod
def read(filename):
"""Read an alignment file (a mlf file).
:param filename: (str) the input file (a HVite mlf output file).
:returns: 2 lists of tuples:
- (start-time end-time phoneme None)
- (start-time end-time word None)
"""
b = BaseAlignersReader()
lines = b.get_lines(filename)
units = mlf.get_units(lines)
units = b.units_to_time(units, 10e6)
units = b.shift_time_units(units, 0.01)
phonemes = mlf.get_phonemes(lines)
pron_words = [separators.phonemes.join(phn) for phn in phonemes]
words = mlf.get_words(lines)
if len(words) != len(phonemes):
logging.error('Got words: {}'.format(words))
logging.error('Got phonemes: {}'.format(phonemes))
raise IOError("Words/Phonemes are not matching "
"in alignment result")
data_phon, data_words = b.make_result(units, words, phonemes, None)
data_phon, data_pron = b.make_result(units, pron_words, phonemes, None)
return data_phon, data_words, data_pron
# ---------------------------------------------------------------------------
class AlignerIO(object):
"""Reader/writer of the output files of the aligners.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
AlignerIO implements methods to read/write files of the external aligner
systems.
"""
# List of file extensions this class is able to read and/or write.
EXTENSIONS_READ = {palign().extension: palign,
mlf().extension: mlf,
walign().extension: walign}
EXTENSIONS_WRITE = {palign().extension: palign}
# -----------------------------------------------------------------------
@staticmethod
def read_aligned(basename):
"""Find an aligned file and read it.
:param basename: (str) File name without extension
:returns: Two lists of tuples with phones and words
- (start-time end-time phoneme score)
- (start-time end-time word score)
The score can be None.
todo: The "phoneme" column can be a sequence of alternative phonemes.
"""
for ext in AlignerIO.EXTENSIONS_READ:
track_name = basename + "." + ext
if os.path.isfile(track_name) is True:
return AlignerIO.EXTENSIONS_READ[ext]().read(track_name)
raise IOError('No time-aligned file was found for {:s}'
''.format(basename))
| 33.313725 | 79 | 0.496401 |
acea984f97d146a7ec4fdc348e96d3df79616b1a | 1,890 | py | Python | sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/featurization_settings.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/featurization_settings.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/ml/azure-ai-ml/azure/ai/ml/_schema/automl/featurization_settings.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from marshmallow import fields as flds, post_load
from azure.ai.ml.constants import AutoMLConstants
from azure.ai.ml._schema import PatchedSchemaMeta, NestedField, StringTransformedEnum, UnionField
class ColumnTransformerSchema(metaclass=PatchedSchemaMeta):
fields = flds.List(flds.Str())
parameters = flds.Dict(
keys=flds.Str(), values=UnionField([flds.Float(), flds.Str()], allow_none=True, missing=None)
)
@post_load
def make(self, data, **kwargs):
from azure.ai.ml.automl import ColumnTransformer
return ColumnTransformer(**data)
class FeaturizationSettingsSchema(metaclass=PatchedSchemaMeta):
dataset_language = flds.Str()
class NlpFeaturizationSettingsSchema(FeaturizationSettingsSchema):
dataset_language = flds.Str()
@post_load
def make(self, data, **kwargs) -> "NlpFeaturizationSettings":
from azure.ai.ml.automl import NlpFeaturizationSettings
return NlpFeaturizationSettings(**data)
class TableFeaturizationSettingsSchema(FeaturizationSettingsSchema):
mode = StringTransformedEnum(
allowed_values=[AutoMLConstants.AUTO, AutoMLConstants.OFF, AutoMLConstants.CUSTOM],
load_default=AutoMLConstants.AUTO,
)
blocked_transformers = flds.List(flds.Str())
column_name_and_types = flds.Dict(keys=flds.Str(), values=flds.Str())
transformer_params = flds.Dict(keys=flds.Str(), values=flds.List(NestedField(ColumnTransformerSchema())))
enable_dnn_featurization = flds.Bool()
@post_load
def make(self, data, **kwargs) -> "TabularFeaturizationSettings":
from azure.ai.ml.automl import TabularFeaturizationSettings
return TabularFeaturizationSettings(**data)
| 36.346154 | 109 | 0.701587 |
acea9893ae7545ad5928f82303fbd00880adc147 | 7,269 | py | Python | vk_api/audio.py | burninggoose/vk_api | 1a03728b70821901e3b92506a468e12d3e682251 | [
"Apache-2.0"
] | 1 | 2020-08-08T17:58:25.000Z | 2020-08-08T17:58:25.000Z | vk_api/audio.py | burninggoose/vk_api | 1a03728b70821901e3b92506a468e12d3e682251 | [
"Apache-2.0"
] | null | null | null | vk_api/audio.py | burninggoose/vk_api | 1a03728b70821901e3b92506a468e12d3e682251 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:authors: python273
:license: Apache License, Version 2.0, see LICENSE file
:copyright: (c) 2018 python273
"""
import re
from itertools import islice
from bs4 import BeautifulSoup
from .audio_url_decoder import decode_audio_url
from .exceptions import AccessDenied
RE_AUDIO_ID = re.compile(r'audio(-?\d+)_(\d+)')
RE_ALBUM_ID = re.compile(r'act=audio_playlist(-?\d+)_(\d+)')
TRACKS_PER_USER_PAGE = 50
TRACKS_PER_ALBUM_PAGE = 100
ALBUMS_PER_USER_PAGE = 100
class VkAudio(object):
""" Модуль для получения аудиозаписей без использования официального API.
:param vk: Объект :class:`VkApi`
"""
__slots__ = ('_vk', 'user_id')
def __init__(self, vk):
self.user_id = vk.method('users.get')[0]['id']
self._vk = vk
def get_iter(self, owner_id=None, album_id=None):
""" Получить список аудиозаписей пользователя (по частям)
:param owner_id: ID владельца (отрицательные значения для групп)
:param album_id: ID альбома
"""
if owner_id is None:
owner_id = self.user_id
if album_id is not None:
url = 'https://m.vk.com/audio?act=audio_playlist{}_{}'.format(
owner_id, album_id
)
offset_diff = TRACKS_PER_ALBUM_PAGE
else:
url = 'https://m.vk.com/audios{}'.format(owner_id)
offset_diff = TRACKS_PER_USER_PAGE
offset = 0
while True:
response = self._vk.http.get(
url,
params={
'offset': offset
},
allow_redirects=False
)
if not response.text:
raise AccessDenied(
'You don\'t have permissions to browse user\'s audio'
)
tracks = scrap_data(response.text, self.user_id)
if not tracks:
break
for i in tracks:
yield i
offset += offset_diff
def get(self, owner_id=None, album_id=None):
""" Получить список аудиозаписей пользователя
:param owner_id: ID владельца (отрицательные значения для групп)
:param album_id: ID альбома
"""
return list(self.get_iter(owner_id, album_id))
def get_albums_iter(self, owner_id=None):
""" Получить список альбомов пользователя (по частям)
:param owner_id: ID владельца (отрицательные значения для групп)
"""
if owner_id is None:
owner_id = self.user_id
offset = 0
while True:
response = self._vk.http.get(
'https://m.vk.com/audio?act=audio_playlists{}'.format(
owner_id
),
params={
'offset': offset
},
allow_redirects=False
)
if not response.text:
raise AccessDenied(
'You don\'t have permissions to browse {}\'s albums'.format(
owner_id
)
)
albums = scrap_albums(response.text)
if not albums:
break
for i in albums:
yield i
offset += ALBUMS_PER_USER_PAGE
def get_albums(self, owner_id=None):
""" Получить список альбомов пользователя
:param owner_id: ID владельца (отрицательные значения для групп)
"""
return list(self.get_albums_iter(owner_id))
def search_user(self, owner_id=None, q=''):
""" Искать по аудиозаписям пользователя
:param owner_id: ID владельца (отрицательные значения для групп)
:param q: запрос
"""
if owner_id is None:
owner_id = self.user_id
response = self._vk.http.get(
'https://m.vk.com/audio',
params={
'id': owner_id,
'q': q
},
allow_redirects=False
)
if not response.text:
raise AccessDenied(
'You don\'t have permissions to browse {}\'s audio'.format(
owner_id
)
)
tracks = scrap_data(
response.text,
self.user_id,
filter_root_el={'class_': 'AudioSerp__foundOwned'}
)
return [track for track in tracks if track['owner_id'] == owner_id]
def search(self, q, count=50):
""" Искать аудиозаписи
:param q: запрос
:param count: количество
"""
return islice(self.search_iter(q), count)
def search_iter(self, q, offset=0):
""" Искать аудиозаписи (генератор)
:param q: запрос
:param offset: смещение
"""
while True:
response = self._vk.http.get(
'https://m.vk.com/audio',
params={
'act': 'search',
'q': q,
'offset': offset
}
)
tracks = scrap_data(response.text, self.user_id)
if not tracks:
break
for track in tracks:
yield track
offset += 50
def scrap_data(html, user_id, filter_root_el=None):
""" Парсинг списка аудиозаписей из html страницы """
if filter_root_el is None:
filter_root_el = {'id': 'au_search_items'}
soup = BeautifulSoup(html, 'html.parser')
tracks = []
root_el = soup.find(**filter_root_el)
for audio in root_el.find_all('div', {'class': 'audio_item'}):
if 'audio_item_disabled' in audio['class']:
continue
artist = audio.select_one('.ai_artist').text
title = audio.select_one('.ai_title').text
duration = int(audio.select_one('.ai_dur')['data-dur'])
full_id = tuple(
int(i) for i in RE_AUDIO_ID.search(audio['id']).groups()
)
link = audio.select_one('.ai_body').input['value']
if 'audio_api_unavailable' in link:
link = decode_audio_url(link, user_id)
tracks.append({
'id': full_id[1],
'owner_id': full_id[0],
'url': link,
'artist': artist,
'title': title,
'duration': duration,
})
return tracks
def scrap_albums(html):
""" Парсинг списка альбомов из html страницы """
soup = BeautifulSoup(html, 'html.parser')
albums = []
for album in soup.find_all('div', {'class': 'audioPlaylistsPage__item'}):
link = album.select_one('.audioPlaylistsPage__itemLink')['href']
full_id = tuple(int(i) for i in RE_ALBUM_ID.search(link).groups())
stats_text = album.select_one('.audioPlaylistsPage__stats').text
# "1 011 прослушиваний"
plays = int(stats_text.rsplit(' ', 1)[0].replace(' ', ''))
albums.append({
'id': full_id[1],
'owner_id': full_id[0],
'url': 'https://m.vk.com/audio?act=audio_playlist{}_{}'.format(
*full_id
),
'title': album.select_one('.audioPlaylistsPage__title').text,
'plays': plays
})
return albums
| 26.336957 | 80 | 0.537075 |
acea993fe494712e8da4dadc313ee8a158e4ebb0 | 1,475 | py | Python | model/dataset/dataset_sample.py | yacoubb/lang-classifier | d39a342cf8ad64b191ea235f9af3f833033f254a | [
"MIT"
] | 1 | 2019-07-03T11:28:55.000Z | 2019-07-03T11:28:55.000Z | model/dataset/dataset_sample.py | yacoubb/lang-classifier | d39a342cf8ad64b191ea235f9af3f833033f254a | [
"MIT"
] | null | null | null | model/dataset/dataset_sample.py | yacoubb/lang-classifier | d39a342cf8ad64b191ea235f9af3f833033f254a | [
"MIT"
] | null | null | null | import os
import sys
import random
import numpy as np
import shutil
from tqdm import tqdm
sys.path.append("/".join(os.path.abspath(__file__).split("/")[:-3]))
from model.dataset import generate_random
from model.dataset import utils
def sample_all_datasets(n=1000000):
folder_path = "/".join(os.path.abspath(__file__).split("/")[:-1])
if os.path.isdir(os.path.join(folder_path, "languages_train")):
shutil.rmtree(os.path.join(folder_path, "languages_train"))
os.mkdir(os.path.join(folder_path, "languages_train"))
generate_random.generate_random_dataset(n)
for lang in os.listdir(os.path.join(folder_path, "languages_converted")):
print("loading", lang)
words = []
with open(
os.path.join(folder_path, "languages_converted", lang), "r", newline=""
) as original_lang_file:
words = original_lang_file.readlines()
words = list(
filter(
lambda x: len(x) > utils.min_word_length
and len(x) < utils.max_word_length,
words,
)
)
print(lang, "with", len(words), "words")
assert len(words) > n
sample = np.random.choice(words, size=n)
with open(
os.path.join(folder_path, "languages_train", lang), "w+"
) as lang_file:
for word in sample:
lang_file.write(word + "\n")
if __name__ == "__main__":
sample_all_datasets(500000)
| 32.777778 | 83 | 0.616949 |
acea99a2d09b417fc3b0539b200c862401580398 | 1,031 | py | Python | main.py | paliwalvimal/ssl-expiry-check | 4985569419da6336f61783a027f7f59bad955e9b | [
"MIT"
] | null | null | null | main.py | paliwalvimal/ssl-expiry-check | 4985569419da6336f61783a027f7f59bad955e9b | [
"MIT"
] | null | null | null | main.py | paliwalvimal/ssl-expiry-check | 4985569419da6336f61783a027f7f59bad955e9b | [
"MIT"
] | null | null | null | import json
import logging
import os
import boto3
import ssl_expiry
sns = boto3.client("sns")
def lambda_handler(event, *args, **kwargs) -> list:
# use the env var HOSTLIST to define a default list of hostnames
HOST_LIST = os.environ['HOSTLIST'].split(',')
EXPIRY_BUFFER = int(os.environ['EXPIRY_BUFFER'])
# cleanup the host list
HOST_LIST = filter(None, (x.strip() for x in HOST_LIST))
response = [
ssl_expiry.test_host(host + ".xyz.com", buffer_days=EXPIRY_BUFFER)
for host in HOST_LIST
]
resp = []
for msg in response:
tmp = {}
if 'error' in msg or 'expire' in msg:
tmp = {
'message': 'Error',
"result": msg
}
resp.append(tmp)
if len(resp) > 0:
res = sns.publish(
TopicArn='',
Message=json.dumps(resp),
Subject='SSL Expiry Alert'
)
else:
print("SSL not expiring for any (sub-)domain in next 30 days.")
return resp
| 25.146341 | 74 | 0.56741 |
acea99eaa3a04e01c5bbcb6c7ca110e28fd53064 | 378 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractUniqueBooks.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractUniqueBooks.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractUniqueBooks.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractUniqueBooks(item):
"""
Unique Books
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Allgemein' in item['tags']:
return buildReleaseMessageWithType(item, 'Survival of a Healer', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
return False
| 34.363636 | 119 | 0.719577 |
acea9a55ea45a8b6b42a6aaf19cf35ac87061259 | 92 | py | Python | tests/test_speculative.py | Capybasilisk/Speculative-Fiction-Bot | 8572f88a56b1602187095a4358c90f49413ba106 | [
"BSD-3-Clause"
] | 5 | 2020-06-18T13:05:21.000Z | 2021-09-26T16:10:42.000Z | tests/test_speculative.py | Capybasilisk/Speculative-Fiction-Bot | 8572f88a56b1602187095a4358c90f49413ba106 | [
"BSD-3-Clause"
] | null | null | null | tests/test_speculative.py | Capybasilisk/Speculative-Fiction-Bot | 8572f88a56b1602187095a4358c90f49413ba106 | [
"BSD-3-Clause"
] | null | null | null | from speculative import __version__
def test_version():
assert __version__ == '0.1.0'
| 15.333333 | 35 | 0.728261 |
acea9be16281c4aa750227898b82c52974c93a90 | 2,110 | py | Python | Game5/modules/interfaces/switchLevelIterface.py | ttkaixin1998/pikachupythongames | 609a3a5a2be3f5a187c332c7980bb5bb14548f02 | [
"MIT"
] | 4,013 | 2018-06-16T08:00:02.000Z | 2022-03-30T11:48:14.000Z | Game5/modules/interfaces/switchLevelIterface.py | pigbearcat/Games | b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2 | [
"MIT"
] | 22 | 2018-10-18T00:15:50.000Z | 2022-01-13T08:16:15.000Z | Game5/modules/interfaces/switchLevelIterface.py | pigbearcat/Games | b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2 | [
"MIT"
] | 2,172 | 2018-07-20T04:03:14.000Z | 2022-03-31T14:18:29.000Z | '''
Function:
关卡切换界面
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import sys
import pygame
'''关卡切换界面'''
def switchLevelIterface(screen, cfg, level_next=1):
background_img = pygame.image.load(cfg.OTHER_IMAGE_PATHS.get('background'))
color_white = (255, 255, 255)
color_gray = (192, 192, 192)
font = pygame.font.Font(cfg.FONTPATH, cfg.WIDTH//20)
logo_img = pygame.image.load(cfg.OTHER_IMAGE_PATHS.get('logo'))
logo_img = pygame.transform.scale(logo_img, (446, 70))
logo_rect = logo_img.get_rect()
logo_rect.centerx, logo_rect.centery = cfg.WIDTH/2, cfg.HEIGHT//4
# 游戏加载提示
font_render = font.render('Loading game data, You will enter Level-%s' % level_next, True, color_white)
font_rect = font_render.get_rect()
font_rect.centerx, font_rect.centery = cfg.WIDTH/2, cfg.HEIGHT/2
# 游戏加载进度条
gamebar = pygame.image.load(cfg.OTHER_IMAGE_PATHS.get('gamebar')).convert_alpha()
gamebar_rect = gamebar.get_rect()
gamebar_rect.centerx, gamebar_rect.centery = cfg.WIDTH/2, cfg.HEIGHT/1.4
tank_cursor = pygame.image.load(cfg.PLAYER_TANK_IMAGE_PATHS.get('player1')[0]).convert_alpha().subsurface((0, 144), (48, 48))
tank_rect = tank_cursor.get_rect()
tank_rect.left = gamebar_rect.left
tank_rect.centery = gamebar_rect.centery
# 加载所需时间
load_time_left = gamebar_rect.right - tank_rect.right + 8
# 主循环
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if load_time_left <= 0:
return
screen.blit(background_img, (0, 0))
screen.blit(logo_img, logo_rect)
screen.blit(font_render, font_rect)
screen.blit(gamebar, gamebar_rect)
screen.blit(tank_cursor, tank_rect)
pygame.draw.rect(screen, color_gray, (gamebar_rect.left+8, gamebar_rect.top+8, tank_rect.left-gamebar_rect.left-8, tank_rect.bottom-gamebar_rect.top-16))
tank_rect.left += 1
load_time_left -= 1
pygame.display.update()
clock.tick(60) | 38.363636 | 161 | 0.67346 |
acea9bfaa5226b076609bb671dc262a3bdc87b86 | 15,654 | py | Python | xfel/ui/components/run_stats_plotter.py | Anthchirp/cctbx | b8064f755b1dbadf05b8fbf806b7d50d73ef69bf | [
"BSD-3-Clause-LBNL"
] | null | null | null | xfel/ui/components/run_stats_plotter.py | Anthchirp/cctbx | b8064f755b1dbadf05b8fbf806b7d50d73ef69bf | [
"BSD-3-Clause-LBNL"
] | null | null | null | xfel/ui/components/run_stats_plotter.py | Anthchirp/cctbx | b8064f755b1dbadf05b8fbf806b7d50d73ef69bf | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import absolute_import, division, print_function
from six.moves import range
from dials.array_family import flex
from matplotlib import pyplot as plt
from xfel.ui.components.timeit import duration
import time
from six.moves import map
# get_hitrate_stats takes a tuple (run, trial, rungroup, d_min)
# and returns a tuple of flex arrays as follows:
# time (s) -- flex.double, timestamp of the shot,
# ratio -- flex.double, ratio of intensities at two angles in the radial average
# n_strong -- flex.int, number of strong spots identified by hitfinder,
# I_sig_I_low -- flex.double, the average I/sig(I) in the low res bin of each shot, if it indexed
# I_sig_I_high -- flex.double, the average I/sig(I) in the high res bin of each shot, if it indexed
def get_should_have_indexed_timestamps(timestamps,
n_strong,
isigi_low,
n_strong_cutoff,
indexed=False):
if indexed:
should_have_indexed_sel = (isigi_low > 0) & (n_strong >= n_strong_cutoff)
else:
should_have_indexed_sel = (isigi_low == 0) & (n_strong >= n_strong_cutoff) # isigi = 0 if not indexed
should_have_indexed = timestamps.select(should_have_indexed_sel)
return should_have_indexed
def get_multirun_should_have_indexed_timestamps(stats_by_run,
run_numbers,
d_min,
n_strong_cutoff=16,
indexed=False):
timestamps = []
for idx in range(len(stats_by_run)):
r = stats_by_run[idx]
if len(r[0]) > 0:
timestamps.append(
get_should_have_indexed_timestamps(r[0], r[3], r[4], n_strong_cutoff, indexed=indexed))
else:
timestamps.append(flex.double())
return (run_numbers, timestamps)
def get_string_from_timestamp(ts, long_form=False):
import time, math
time_seconds = int(math.floor(ts))
time_milliseconds = int(round((ts - time_seconds)*1000))
time_obj = time.gmtime(time_seconds)
if long_form:
string = "%04d-%02d-%02dT%02d:%02dZ%02d.%03d" % (
time_obj.tm_year,
time_obj.tm_mon,
time_obj.tm_mday,
time_obj.tm_hour,
time_obj.tm_min,
time_obj.tm_sec,
time_milliseconds)
else:
string = "%04d%02d%02d%02d%02d%02d%03d" % (
time_obj.tm_year,
time_obj.tm_mon,
time_obj.tm_mday,
time_obj.tm_hour,
time_obj.tm_min,
time_obj.tm_sec,
time_milliseconds)
return string
def get_strings_from_timestamps(timestamps, long_form=False):
import os
get_strings = lambda ts: get_string_from_timestamp(ts, long_form=long_form)
names = [get_strings(ts) for ts in timestamps]
return names
def get_paths_from_timestamps(timestamps,
prepend="",
tag="idx",
ext="cbf",
long_form=False):
import os
def convert(s):
timestamp_string = get_string_from_timestamp(s, long_form=long_form)
name = "%s-%s.%s" % (
tag,
timestamp_string,
ext)
return name
names = map(convert, timestamps)
paths = [os.path.join(prepend, name) for name in names]
return paths
def get_run_stats(timestamps,
two_theta_low,
two_theta_high,
n_strong,
resolutions,
n_lattices,
tuple_of_timestamp_boundaries,
lengths,
run_numbers,
n_multiples=2,
ratio_cutoff=1,
n_strong_cutoff=16,
i_sigi_cutoff=1,
d_min=2,
):
print("")
print("%d shots" % len(timestamps))
print("%d first lattices" % (n_lattices >= 1).count(True))
print("%d multiple lattices" % (n_lattices >= 2).count(True))
print("%d total lattices" % (flex.sum(n_lattices)))
iterator = range(len(resolutions))
# hit rate of drops (observe solvent) or crystals (observe strong spots)
# since -1 is used as a flag for "did not store this value", and we want a quotient,
# set the numerator value to 0 whenever either the numerator or denominator is -1
invalid = (two_theta_low <= 0) or (two_theta_high < 0) # <= to prevent /0
numerator = two_theta_high.set_selected(invalid, 0)
denominator = two_theta_low.set_selected(two_theta_low == 0, 1) # prevent /0
drop_ratios = numerator/denominator
drop_hits = drop_ratios >= ratio_cutoff
xtal_hits = n_strong >= n_strong_cutoff
half_idx_rate_window = min(50, max(int(len(timestamps)//20), 1))
half_hq_rate_window = 500
indexed_sel = n_lattices > 0
hq_sel = (resolutions > 0) & (resolutions <= d_min)
# indexing and droplet hit rate in a sliding window
idx_rate = flex.double()
multiples_rate = flex.double()
hq_rate = flex.double()
drop_hit_rate = flex.double()
for i in iterator:
idx_min = max(0, i - half_idx_rate_window)
idx_max = min(i + half_idx_rate_window, len(resolutions))
n_lattices_local = n_lattices[idx_min:idx_max]
shots_this_span = len(n_lattices_local)
first_lattices_local = n_lattices_local >= 1
idx_local_rate = first_lattices_local.count(True)/shots_this_span
idx_rate.append(idx_local_rate)
multiples_sel = n_lattices_local >= n_multiples
multiples_local_rate = multiples_sel.count(True)/shots_this_span
multiples_rate.append(multiples_local_rate)
drop_sel = drop_hits[idx_min:idx_max]
drop_local_rate = drop_sel.count(True)/shots_this_span
drop_hit_rate.append(drop_local_rate)
# different sliding window for "high quality" xtals
hq_min = max(0, i - half_hq_rate_window)
hq_max = min(i + half_hq_rate_window, len(resolutions))
n_lattices_local_hq = n_lattices[hq_min:hq_max]
first_lattices_local_hq = n_lattices_local_hq >= 1
hq_high_sel = hq_sel[hq_min:hq_max].select(first_lattices_local_hq)
n_first_lattices_local_hq = first_lattices_local_hq.count(True)
if n_first_lattices_local_hq > 0:
hq_rate.append(hq_high_sel.count(True)/n_first_lattices_local_hq)
else:
hq_rate.append(0)
return (timestamps,
drop_ratios,
drop_hits,
drop_hit_rate,
n_strong,
xtal_hits,
idx_rate,
multiples_rate,
hq_rate,
indexed_sel,
hq_sel,
resolutions,
half_idx_rate_window*2,
lengths,
tuple_of_timestamp_boundaries,
run_numbers)
def plot_run_stats(stats,
d_min,
n_multiples=2,
run_tags=[],
run_statuses=[],
minimalist=False,
interactive=True,
xsize=30,
ysize=10,
high_vis=False,
title=None,
ext='cbf',
figure=None,
):
t1 = time.time()
if figure:
f = figure
else:
f = plt.figure()
plot_ratio = max(min(xsize, ysize)/2.5, 3)
if high_vis:
spot_ratio = plot_ratio*4
text_ratio = plot_ratio*4.5
else:
spot_ratio = plot_ratio*2
text_ratio = plot_ratio*3
t, drop_ratios, drop_hits, drop_hit_rate, n_strong, xtal_hits, \
idx_rate, multiples_rate, hq_rate, indexed_sel, hq_sel, resolutions, \
window, lengths, boundaries, run_numbers = stats
if len(t) == 0:
return None
n_runs = len(boundaries)//2
if len(run_tags) != n_runs:
run_tags = [[] for i in range(n_runs)]
if len(run_statuses) != n_runs:
run_statuses = [None for i in range(n_runs)]
if minimalist:
print("Minimalist mode activated.")
ax1, ax2, ax3 = f.subplots(3, sharex=True, sharey=False)
axset = (ax1, ax2, ax3)
else:
ax1, ax2, ax3, ax4 = f.subplots(4, sharex=True, sharey=False)
axset = (ax1, ax2, ax3, ax4)
for a in axset:
a.tick_params(axis='x', which='both', bottom='off', top='off')
ax1.scatter(t.select(~indexed_sel), n_strong.select(~indexed_sel), edgecolors="none", color ='#d9d9d9', s=spot_ratio)
ax1.scatter(t.select(indexed_sel), n_strong.select(indexed_sel), edgecolors="none", color='blue', s=spot_ratio)
ax1.set_ylim(ymin=0)
ax1.axis('tight')
ax1.set_ylabel("strong spots\nblue: idx\ngray: did not idx", fontsize=text_ratio)
ax2.plot(t, idx_rate*100)
ax2.plot(t, multiples_rate*100, color='magenta')
ax2_twin = ax2.twinx()
ax2_twin.plot(t, drop_hit_rate*100, color='green')
ax2_twin.set_ylim(ymin=0)
ax2.axis('tight')
ax2.set_ylabel("blue:%% idx\npink:%% %d lattices" % n_multiples, fontsize=text_ratio)
ax2_twin.set_ylabel("green:\n% solvent", fontsize=text_ratio)
gtz = resolutions > 0 # no indexing solution stored as resolution of 0
# ax3.semilogy()
ax3.invert_yaxis()
ax3.scatter(t.select(gtz), resolutions.select(gtz), edgecolors="none", color='orange', s=spot_ratio)
ax3_twin = ax3.twinx()
ax3_twin.plot(t, hq_rate*100, color='orange')
ax3_twin.set_ylim(ymin=0)
ax3.axis('tight')
ax3.set_ylabel("high res\nbin (Ang)", fontsize=text_ratio)
ax3_twin.set_ylabel("line:%% <=\n%.2f Ang" % d_min, fontsize=text_ratio)
axset_with_twins = list(axset) + [ax2_twin, ax3_twin]
for a in axset_with_twins:
xlab = a.get_xticklabels()
ylab = a.get_yticklabels()
for l in xlab + ylab:
l.set_fontsize(text_ratio)
f.subplots_adjust(hspace=0)
# add lines and text summaries at the timestamp boundaries
if not minimalist:
for boundary in boundaries:
if boundary is not None:
for a in (ax1, ax2, ax3):
a.axvline(x=boundary, ymin=0, ymax=3, linewidth=1, color='k')
run_starts = boundaries[0::2]
run_ends = boundaries[1::2]
start = 0
end = -1
for idx in range(len(run_numbers)):
start_t = run_starts[idx]
end_t = run_ends[idx]
if start_t is None or end_t is None: continue
end += lengths[idx]
slice_t = t[start:end+1]
slice_hits = xtal_hits[start:end+1]
n_hits = slice_hits.count(True)
slice_drops = drop_hits[start:end+1]
n_drops = slice_drops.count(True)
slice_indexed_sel = indexed_sel[start:end+1]
slice_hq_sel = hq_sel[start:end+1]
n_idx_low = slice_indexed_sel.count(True)
n_idx_high = slice_hq_sel.count(True)
tags = run_tags[idx]
status = run_statuses[idx]
if status == "DONE":
status_color = 'blue'
elif status in ["RUN", "PEND", "SUBMITTED"]:
status_color = 'green'
elif status is None:
status_color = 'black'
else:
status_color = 'red'
if minimalist:
ax3.set_xlabel("timestamp (s)\n# images shown as all (%3.1f Angstroms)" % d_min, fontsize=text_ratio)
ax3.set_yticks([])
else:
ax4.text(start_t, 3.85, " " + ", ".join(tags) + " [%s]" % status, fontsize=text_ratio, color=status_color, rotation=10)
ax4.text(start_t, .85, "run %s" % str(run_numbers[idx]), fontsize=text_ratio)
ax4.text(start_t, .65, "%d img/%d hit" % (lengths[idx], n_hits), fontsize=text_ratio)
ax4.text(start_t, .45, "%d (%d) idx" % (n_idx_low, n_idx_high), fontsize=text_ratio)
ax4.text(start_t, .25, "%-3.1f%% solv/%-3.1f%% xtal" % ((100*n_drops/lengths[idx]),(100*n_hits/lengths[idx])), fontsize=text_ratio)
ax4.text(start_t, .05, "%-3.1f (%-3.1f)%% idx" % \
(100*n_idx_low/lengths[idx], 100*n_idx_high/lengths[idx]), fontsize=text_ratio)
ax4.set_xlabel("timestamp (s)\n# images shown as all (%3.1f Angstroms)" % d_min, fontsize=text_ratio)
ax4.set_yticks([])
for item in axset:
item.tick_params(labelsize=text_ratio)
start += lengths[idx]
if title is not None:
plt.title(title)
if interactive:
def onclick(event):
import math
ts = event.xdata
if ts is None: return
diffs = flex.abs(t - ts)
ts = t[flex.first_index(diffs, flex.min(diffs))]
print(get_paths_from_timestamps([ts], tag="shot", ext=ext)[0])
if hasattr(f, '_cid'):
f.canvas.mpl_disconnect(f._cid)
f._cid = f.canvas.mpl_connect('button_press_event', onclick)
if not figure:
plt.show()
else:
f.set_size_inches(xsize, ysize)
f.savefig("runstats_tmp.png", bbox_inches='tight', dpi=100)
plt.close(f)
t2 = time.time()
# print "plot_run_stats took %s" % duration(t1, t2)
return "runstats_tmp.png"
def plot_multirun_stats(runs,
run_numbers,
d_min,
n_multiples=2,
ratio_cutoff=1,
n_strong_cutoff=40,
i_sigi_cutoff=1,
run_tags=[],
run_statuses=[],
minimalist=False,
interactive=False,
easy_run=False,
compress_runs=True,
xsize=30,
ysize=10,
high_vis=False,
title=None,
figure=None):
tset = flex.double()
two_theta_low_set = flex.double()
two_theta_high_set = flex.double()
nset = flex.int()
resolutions_set = flex.double()
n_lattices = flex.int()
boundaries = []
lengths = []
runs_with_data = []
run_tags_with_data = []
run_statuses_with_data =[]
offset = 0
for idx in range(len(runs)):
r = runs[idx]
if len(r[0]) > 0:
if compress_runs:
tslice = r[0] - r[0][0] + offset
offset += (r[0][-1] - r[0][0] + 1/120.)
else:
tslice = r[0]
last_end = r[0][-1]
tset.extend(tslice)
two_theta_low_set.extend(r[1])
two_theta_high_set.extend(r[2])
nset.extend(r[3])
resolutions_set.extend(r[4])
n_lattices.extend(r[5])
boundaries.append(tslice[0])
boundaries.append(tslice[-1])
lengths.append(len(tslice))
runs_with_data.append(run_numbers[idx])
if run_tags:
run_tags_with_data.append(run_tags[idx])
if run_statuses:
run_statuses_with_data.append(run_statuses[idx])
stats_tuple = get_run_stats(tset,
two_theta_low_set,
two_theta_high_set,
nset,
resolutions_set,
n_lattices,
tuple(boundaries),
tuple(lengths),
runs_with_data,
n_multiples=n_multiples,
ratio_cutoff=ratio_cutoff,
n_strong_cutoff=n_strong_cutoff,
i_sigi_cutoff=i_sigi_cutoff,
d_min=d_min)
if easy_run:
from libtbx import easy_run, easy_pickle
easy_pickle.dump("plot_run_stats_tmp.pickle", (stats_tuple, d_min, n_multiples, run_tags_with_data, run_statuses_with_data, minimalist, interactive, xsize, ysize, high_vis, title))
result = easy_run.fully_buffered(command="cctbx.xfel.plot_run_stats_from_stats_pickle plot_run_stats_tmp.pickle")
try:
png = result.stdout_lines[-1]
if png == "None":
return None
except Exception:
return None
else:
png = plot_run_stats(stats_tuple, d_min, n_multiples=n_multiples, run_tags=run_tags_with_data, run_statuses=run_statuses_with_data, minimalist=minimalist,
interactive=interactive, xsize=xsize, ysize=ysize, high_vis=high_vis, title=title, figure=figure)
return png
| 38.651852 | 184 | 0.617861 |
acea9d3444efc1ca9cde73b05670e6cb879821b1 | 2,597 | py | Python | plug.py | null14/plugEditor | b13e88732d652ee123a0e5dfbb3f86d961a517c3 | [
"MIT"
] | null | null | null | plug.py | null14/plugEditor | b13e88732d652ee123a0e5dfbb3f86d961a517c3 | [
"MIT"
] | null | null | null | plug.py | null14/plugEditor | b13e88732d652ee123a0e5dfbb3f86d961a517c3 | [
"MIT"
] | null | null | null | from maya.api import OpenMaya as om
sList = om.MGlobal.getActiveSelectionList()
node = NodePlug("pSphere1")
plug = node.find_plug("rotate")
plug.source()
plug.destinations()
plugs = node.plugs()
print node.destinations(0)[0].name()
print node.destinations(1)[0].name()
print node.sources(0)[0].name()
print node.sources(1)[0].name()
srcnodes = node.source_nodes()
destnodes = node.destination_nodes()
srcnodes[0].plugs()
pplug = plug.parent()
aplug = AttrPlug(plug)
aplug.attr.keyable
class NodePlug:
def __init__(self, node=""):
if isinstance(node, str):
self.selnode = om.MGlobal.getSelectionListByName(node)
self.mobject = self.selnode.getDependNode(0)
elif isinstance(node, om.MObject):
self.mobject = node
self.dependency = om.MFnDependencyNode(self.mobject)
def name(self):#str
return self.dependency.name()
def find_plug(self, attrname):#MPlug
return self.dependency.findPlug(attrname, False)
def sources(self,connections=0):#[MPlug]
return self.connected_all(1,0,connections)
def source_nodes(self):#[NodePlug]
result = []
for plug in self.sources():
result.append(NodePlug(plug.node()))
return result
def destinations(self, connections=0):#[MPlug]
return self.connected_all(0,1,connections)
def destination_nodes(self):#[NodePlug]
result = []
for plug in self.destinations():
result.append(NodePlug(plug.node()))
return result
def plugs(self):#[MPlug]
results = []
count = self.dependency.attributeCount()
for i in xrange(count):
attrObj = self.dependency.attribute(i)
plug = self.find_plug(attrObj)
results.append(plug)
#print plug.partialName(True, True, True, False, True, True)
return results
def connected_all(self, source=0, destination=0, connections=0):#[MPlug]
results = []
for plug in self.plugs():
for cnct in plug.connectedTo(source,destination):
if connections:
results.append(plug)
else:
results.append(cnct)
return results
class AttrPlug:
def __init__(self, mplug):
if isinstance(mplug, om.MPlug):
self.mobject = mplug.attribute()
elif isinstance(mplug, om.MObject):
self.mobject = mplug
self.attr = om.MFnAttribute(self.mobject)
def attribute(self):
return self.mfnattr | 28.855556 | 76 | 0.618406 |
acea9e89d272e08e2cecd40ecad435af3750fbde | 3,460 | py | Python | app.py | nirmalya8/api-flask | 5d262b0230aa3fa20c64b227c67407e1ba677e69 | [
"MIT"
] | null | null | null | app.py | nirmalya8/api-flask | 5d262b0230aa3fa20c64b227c67407e1ba677e69 | [
"MIT"
] | null | null | null | app.py | nirmalya8/api-flask | 5d262b0230aa3fa20c64b227c67407e1ba677e69 | [
"MIT"
] | null | null | null | from flask import Flask, json, jsonify
app = Flask(__name__)
menucard = [{'Item' : 'Rice', 'Price':10},{'Item': 'Dal','Price':15},{'Item':'Chicken','Price':20},{'Item':'Mutton', 'Price':25},{'Item':'Fish','Price':20},{'Item':'IceCream','Price':10}]
orders = []
@app.route('/')
def hello_world():
response = jsonify('Hello world!')
response.status_code = 200
return response
@app.route('/showmenu')
def show_menu():
response = jsonify({'Menu':menucard})
response.status_code = 200
return response
@app.route('/order/<int:id>',methods=['GET','POST'])
def take_orders(id):
response = {}
if id<len(menucard) and menucard[id] not in orders:
d = menucard[id]
d['Quantity'] = 1
orders.append(d)
response = jsonify({'Status':'Added','Item':d})
response.status_code = 200
elif id>= len(menucard):
response = jsonify({'Status': 'Not in menu'})
response.status_code = 404
elif menucard[id] in orders:
for i in orders:
if i['Item'] == menucard[id]['Item']:
i['Quantity']+=1
response = jsonify({'Status': 'Updated quantity','Item':menucard[id]})
response.status_code =200
return response
@app.route('/show',methods=['GET'])
def show_orders():
response = ' '
if len(orders)==0:
response = jsonify({'Your orders':'Haven\'t ordered anything yet'} )
response.status_code = 404
else :
response = jsonify({'Your orders':orders})
response.status_code = 200
return response
@app.route('/price', methods = ['GET'])
def show_price():
response = {}
if len(orders) == 0:
response = jsonify({'Orders':'Haven\'t ordered yet','Price':0})
response.status_code = 404
else:
p = 0
for i in orders:
p = p + i['Price']*i['Quantity']
response = jsonify({'Orders':orders,'Price':p})
response.status_code = 200
return response
@app.route('/delete/<int:delid>',methods=['GET','POST'])
def delete_order(delid):
response = {}
if(delid<len(orders) and delid>=0):
for i in range(len(orders)):
if i == delid:
orders[i]['Quantity']-=1
for i in orders:
if i['Quantity'] == 0:
orders.remove(i)
response = jsonify({'Status':'Successfully Deleted'})
response.status_code = 200
else:
response = jsonify({'Status':'Wasn\'t in the menu'})
response.status_code = 404
return response
@app.route('/additem',methods=['GET','PUT'])
def add_to_menu():
item = {'Item':'New Item','Price':15}
f = False
for i in menucard:
if i ==item:
f = True
if not f:
menucard.append(item)
response = jsonify({'Status': 'Added','Item':item})
response.status_code =201
else:
response = jsonify({'Status': 'Already There','Item':item})
response.status_code =400
return response
@app.route('/delitem',methods = ['GET','DELETE'])
def delete_from_menu():
item = menucard[2]
for i in menucard:
if i == item:
menucard.remove(i)
response = jsonify({'Status':'Deleted','Item':item})
response.status_code = 200
return response
response = jsonify({'Status':'Not in Menu','Item':item})
response.status_code = 404
if __name__ == "__main__":
app.run(debug=True)
| 30.350877 | 187 | 0.573988 |
acea9eb59f162b9dcc5610ba51ae8399a47b3b73 | 3,363 | py | Python | archive/tutorial_jy/tutorial_jy/settings.py | junyitt/fch-virus-combat | 8092a6a97034e48f422ea30de16004cf6afb30b0 | [
"MIT"
] | 1 | 2021-07-06T23:23:39.000Z | 2021-07-06T23:23:39.000Z | archive/tutorial_jy/tutorial_jy/settings.py | EnJunChoong/fch-virus-combat | 8092a6a97034e48f422ea30de16004cf6afb30b0 | [
"MIT"
] | null | null | null | archive/tutorial_jy/tutorial_jy/settings.py | EnJunChoong/fch-virus-combat | 8092a6a97034e48f422ea30de16004cf6afb30b0 | [
"MIT"
] | 1 | 2020-06-06T12:17:44.000Z | 2020-06-06T12:17:44.000Z | # -*- coding: utf-8 -*-
# Scrapy settings for tutorial_jy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tutorial_jy'
SPIDER_MODULES = ['tutorial_jy.spiders']
NEWSPIDER_MODULE = 'tutorial_jy.spiders'
MONGO_URI = "localhost:27017"
MONGO_DATABASE = "news"
SEBENARNYA_COLLECTION = "sebenarnya_v1_test2"
RECRAWL_FREQUENCY = 30 # after 30 seconds crawl the same page again
# LIMIT_PAGE = 3
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial_jy (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tutorial_jy.middlewares.TutorialJySpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tutorial_jy.middlewares.TutorialJyDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
# 'tutorial_jy.pipelines.TutorialJyPipeline': 300,
'tutorial_jy.pipelines.MongoPipeline': 1
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.316327 | 103 | 0.77758 |
acea9fd79e4aa5f80ca34e2e7233af4d50c8ff41 | 9,038 | py | Python | kubernetes/client/models/v1_replication_controller_status.py | kevingessner/python | 3f4d09d260cf0839fae8173852c69e0419188454 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_replication_controller_status.py | kevingessner/python | 3f4d09d260cf0839fae8173852c69e0419188454 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_replication_controller_status.py | kevingessner/python | 3f4d09d260cf0839fae8173852c69e0419188454 | [
"Apache-2.0"
] | 1 | 2018-07-19T16:37:20.000Z | 2018-07-19T16:37:20.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ReplicationControllerStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'available_replicas': 'int',
'conditions': 'list[V1ReplicationControllerCondition]',
'fully_labeled_replicas': 'int',
'observed_generation': 'int',
'ready_replicas': 'int',
'replicas': 'int'
}
attribute_map = {
'available_replicas': 'availableReplicas',
'conditions': 'conditions',
'fully_labeled_replicas': 'fullyLabeledReplicas',
'observed_generation': 'observedGeneration',
'ready_replicas': 'readyReplicas',
'replicas': 'replicas'
}
def __init__(self, available_replicas=None, conditions=None, fully_labeled_replicas=None, observed_generation=None, ready_replicas=None, replicas=None):
"""
V1ReplicationControllerStatus - a model defined in Swagger
"""
self._available_replicas = None
self._conditions = None
self._fully_labeled_replicas = None
self._observed_generation = None
self._ready_replicas = None
self._replicas = None
self.discriminator = None
if available_replicas is not None:
self.available_replicas = available_replicas
if conditions is not None:
self.conditions = conditions
if fully_labeled_replicas is not None:
self.fully_labeled_replicas = fully_labeled_replicas
if observed_generation is not None:
self.observed_generation = observed_generation
if ready_replicas is not None:
self.ready_replicas = ready_replicas
self.replicas = replicas
@property
def available_replicas(self):
"""
Gets the available_replicas of this V1ReplicationControllerStatus.
The number of available replicas (ready for at least minReadySeconds) for this replication controller.
:return: The available_replicas of this V1ReplicationControllerStatus.
:rtype: int
"""
return self._available_replicas
@available_replicas.setter
def available_replicas(self, available_replicas):
"""
Sets the available_replicas of this V1ReplicationControllerStatus.
The number of available replicas (ready for at least minReadySeconds) for this replication controller.
:param available_replicas: The available_replicas of this V1ReplicationControllerStatus.
:type: int
"""
self._available_replicas = available_replicas
@property
def conditions(self):
"""
Gets the conditions of this V1ReplicationControllerStatus.
Represents the latest available observations of a replication controller's current state.
:return: The conditions of this V1ReplicationControllerStatus.
:rtype: list[V1ReplicationControllerCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""
Sets the conditions of this V1ReplicationControllerStatus.
Represents the latest available observations of a replication controller's current state.
:param conditions: The conditions of this V1ReplicationControllerStatus.
:type: list[V1ReplicationControllerCondition]
"""
self._conditions = conditions
@property
def fully_labeled_replicas(self):
"""
Gets the fully_labeled_replicas of this V1ReplicationControllerStatus.
The number of pods that have labels matching the labels of the pod template of the replication controller.
:return: The fully_labeled_replicas of this V1ReplicationControllerStatus.
:rtype: int
"""
return self._fully_labeled_replicas
@fully_labeled_replicas.setter
def fully_labeled_replicas(self, fully_labeled_replicas):
"""
Sets the fully_labeled_replicas of this V1ReplicationControllerStatus.
The number of pods that have labels matching the labels of the pod template of the replication controller.
:param fully_labeled_replicas: The fully_labeled_replicas of this V1ReplicationControllerStatus.
:type: int
"""
self._fully_labeled_replicas = fully_labeled_replicas
@property
def observed_generation(self):
"""
Gets the observed_generation of this V1ReplicationControllerStatus.
ObservedGeneration reflects the generation of the most recently observed replication controller.
:return: The observed_generation of this V1ReplicationControllerStatus.
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""
Sets the observed_generation of this V1ReplicationControllerStatus.
ObservedGeneration reflects the generation of the most recently observed replication controller.
:param observed_generation: The observed_generation of this V1ReplicationControllerStatus.
:type: int
"""
self._observed_generation = observed_generation
@property
def ready_replicas(self):
"""
Gets the ready_replicas of this V1ReplicationControllerStatus.
The number of ready replicas for this replication controller.
:return: The ready_replicas of this V1ReplicationControllerStatus.
:rtype: int
"""
return self._ready_replicas
@ready_replicas.setter
def ready_replicas(self, ready_replicas):
"""
Sets the ready_replicas of this V1ReplicationControllerStatus.
The number of ready replicas for this replication controller.
:param ready_replicas: The ready_replicas of this V1ReplicationControllerStatus.
:type: int
"""
self._ready_replicas = ready_replicas
@property
def replicas(self):
"""
Gets the replicas of this V1ReplicationControllerStatus.
Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
:return: The replicas of this V1ReplicationControllerStatus.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1ReplicationControllerStatus.
Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
:param replicas: The replicas of this V1ReplicationControllerStatus.
:type: int
"""
if replicas is None:
raise ValueError("Invalid value for `replicas`, must not be `None`")
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ReplicationControllerStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 33.723881 | 188 | 0.65634 |
aceaa0758af96c4e76af2c4a926a93788b6b1b4e | 13,679 | py | Python | datacube/testutils/__init__.py | Kirill888/datacube-core | 996b395e15f975decb77c0ca9fa0555177674b2f | [
"Apache-2.0"
] | 1 | 2020-04-15T16:10:27.000Z | 2020-04-15T16:10:27.000Z | datacube/testutils/__init__.py | Kirill888/datacube-core | 996b395e15f975decb77c0ca9fa0555177674b2f | [
"Apache-2.0"
] | null | null | null | datacube/testutils/__init__.py | Kirill888/datacube-core | 996b395e15f975decb77c0ca9fa0555177674b2f | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""
Useful methods for tests (particularly: reading/writing and checking files)
"""
import atexit
import os
import shutil
import tempfile
import json
import uuid
import numpy as np
import xarray as xr
from datetime import datetime
from collections.abc import Sequence, Mapping
import pathlib
from affine import Affine
from datacube import Datacube
from datacube.model import Measurement
from datacube.utils.dates import mk_time_coord
from datacube.model import Dataset, DatasetType, MetadataType
from datacube.ui.common import get_metadata_path
from datacube.utils import read_documents, SimpleDocNav
from datacube.utils.geometry import GeoBox, CRS
from datacube.model.fields import parse_search_field
_DEFAULT = object()
def assert_file_structure(folder, expected_structure, root=''):
"""
Assert that the contents of a folder (filenames and subfolder names recursively)
match the given nested dictionary structure.
:type folder: pathlib.Path
:type expected_structure: dict[str,str|dict]
"""
expected_filenames = set(expected_structure.keys())
actual_filenames = {f.name for f in folder.iterdir()}
if expected_filenames != actual_filenames:
missing_files = expected_filenames - actual_filenames
missing_text = 'Missing: %r' % (sorted(list(missing_files)))
extra_files = actual_filenames - expected_filenames
added_text = 'Extra : %r' % (sorted(list(extra_files)))
raise AssertionError('Folder mismatch of %r\n\t%s\n\t%s' % (root, missing_text, added_text))
for k, v in expected_structure.items():
id_ = '%s/%s' % (root, k) if root else k
f = folder.joinpath(k)
if isinstance(v, Mapping):
assert f.is_dir(), "%s is not a dir" % (id_,)
assert_file_structure(f, v, id_)
elif isinstance(v, (str, Sequence)):
assert f.is_file(), "%s is not a file" % (id_,)
else:
assert False, "Only strings|[strings] and dicts expected when defining a folder structure."
def write_files(file_dict):
"""
Convenience method for writing a bunch of files to a temporary directory.
Dict format is "filename": "text content"
If content is another dict, it is created recursively in the same manner.
writeFiles({'test.txt': 'contents of text file'})
:type file_dict: dict
:rtype: pathlib.Path
:return: Created temporary directory path
"""
containing_dir = tempfile.mkdtemp(suffix='neotestrun')
_write_files_to_dir(containing_dir, file_dict)
def remove_if_exists(path):
if os.path.exists(path):
shutil.rmtree(path)
atexit.register(remove_if_exists, containing_dir)
return pathlib.Path(containing_dir)
def _write_files_to_dir(directory_path, file_dict):
"""
Convenience method for writing a bunch of files to a given directory.
:type directory_path: str
:type file_dict: dict
"""
for filename, contents in file_dict.items():
path = os.path.join(directory_path, filename)
if isinstance(contents, Mapping):
os.mkdir(path)
_write_files_to_dir(path, contents)
else:
with open(path, 'w') as f:
if isinstance(contents, str):
f.write(contents)
elif isinstance(contents, Sequence):
f.writelines(contents)
else:
raise ValueError('Unexpected file contents: %s' % type(contents))
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
"""
Testing aproximate equality for floats
See https://docs.python.org/3/whatsnew/3.5.html#pep-485-a-function-for-testing-approximate-equality
"""
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def geobox_to_gridspatial(geobox):
if geobox is None:
return {}
l, b, r, t = geobox.extent.boundingbox
return {"grid_spatial": {
"projection": {
"geo_ref_points": {
"ll": {"x": l, "y": b},
"lr": {"x": r, "y": b},
"ul": {"x": l, "y": t},
"ur": {"x": r, "y": t}},
"spatial_reference": str(geobox.crs)}}}
def mk_sample_product(name,
description='Sample',
measurements=('red', 'green', 'blue'),
with_grid_spec=False,
storage=None):
if storage is None and with_grid_spec is True:
storage = {'crs': 'EPSG:3577',
'resolution': {'x': 25, 'y': -25},
'tile_size': {'x': 100000.0, 'y': 100000.0}}
eo_type = MetadataType({
'name': 'eo',
'description': 'Sample',
'dataset': dict(
id=['id'],
label=['ga_label'],
creation_time=['creation_dt'],
measurements=['image', 'bands'],
sources=['lineage', 'source_datasets'],
format=['format', 'name'],
grid_spatial=['grid_spatial', 'projection'],
)
}, dataset_search_fields={
'time': parse_search_field({
'type': 'datetime-range',
'min_offset': [['time']],
'max_offset': [['time']],
}),
})
common = dict(dtype='int16',
nodata=-999,
units='1',
aliases=[])
def mk_measurement(m):
if isinstance(m, str):
return dict(name=m, **common)
elif isinstance(m, tuple):
name, dtype, nodata = m
m = common.copy()
m.update(name=name, dtype=dtype, nodata=nodata)
return m
elif isinstance(m, dict):
m_merged = common.copy()
m_merged.update(m)
return m_merged
else:
raise ValueError('Only support str|dict|(name, dtype, nodata)')
measurements = [mk_measurement(m) for m in measurements]
definition = dict(
name=name,
description=description,
metadata_type='eo',
metadata={},
measurements=measurements
)
if storage is not None:
definition['storage'] = storage
return DatasetType(eo_type, definition)
def mk_sample_dataset(bands,
uri='file:///tmp',
product_name='sample',
format='GeoTiff',
timestamp=None,
id='3a1df9e0-8484-44fc-8102-79184eab85dd',
geobox=None,
product_opts=None):
# pylint: disable=redefined-builtin
image_bands_keys = 'path layer band'.split(' ')
measurement_keys = 'dtype units nodata aliases name'.split(' ')
def with_keys(d, keys):
return dict((k, d[k]) for k in keys if k in d)
measurements = [with_keys(m, measurement_keys) for m in bands]
image_bands = dict((m['name'], with_keys(m, image_bands_keys)) for m in bands)
if product_opts is None:
product_opts = {}
ds_type = mk_sample_product(product_name,
measurements=measurements,
**product_opts)
if timestamp is None:
timestamp = '2018-06-29'
if uri is None:
uris = []
elif isinstance(uri, list):
uris = uri.copy()
else:
uris = [uri]
return Dataset(ds_type, {
'id': id,
'format': {'name': format},
'image': {'bands': image_bands},
'time': timestamp,
**geobox_to_gridspatial(geobox),
}, uris=uris)
def make_graph_abcde(node):
"""
A -> B
| |
| v
+--> C -> D
|
+--> E
"""
d = node('D')
e = node('E')
c = node('C', cd=d)
b = node('B', bc=c)
a = node('A', ab=b, ac=c, ae=e)
return a, b, c, d, e
def dataset_maker(idx, t=None):
""" Return function that generates "dataset documents"
(name, sources={}, **kwargs) -> dict
"""
ns = uuid.UUID('c0fefefe-2470-3b03-803f-e7599f39ceff')
postfix = '' if idx is None else '{:04d}'.format(idx)
if t is None:
t = datetime.fromordinal(736637 + (0 if idx is None else idx))
t = t.isoformat()
def make(name, sources=_DEFAULT, **kwargs):
if sources is _DEFAULT:
sources = {}
return dict(id=str(uuid.uuid5(ns, name + postfix)),
label=name+postfix,
creation_dt=t,
n=idx,
lineage=dict(source_datasets=sources),
**kwargs)
return make
def gen_dataset_test_dag(idx, t=None, force_tree=False):
"""Build document suitable for consumption by dataset add
when force_tree is True pump the object graph through json
serialise->deserialise, this converts DAG to a tree (no object sharing,
copies instead).
"""
def node_maker(n, t):
mk = dataset_maker(n, t)
def node(name, **kwargs):
return mk(name,
product_type=name,
sources=kwargs)
return node
def deref(a):
return json.loads(json.dumps(a))
root, *_ = make_graph_abcde(node_maker(idx, t))
return deref(root) if force_tree else root
def load_dataset_definition(path):
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
fname = get_metadata_path(path)
for _, doc in read_documents(fname):
return SimpleDocNav(doc)
def mk_test_image(w, h,
dtype='int16',
nodata=-999,
nodata_width=4):
"""
Create 2d ndarray where each pixel value is formed by packing x coordinate in
to the upper half of the pixel value and y coordinate is in the lower part.
So for uint16: im[y, x] == (x<<8) | y IF abs(x-y) >= nodata_width
im[y, x] == nodata IF abs(x-y) < nodata_width
really it's actually: im[y, x] == ((x & 0xFF ) <<8) | (y & 0xFF)
If dtype is of floating point type:
im[y, x] = (x + ((y%1024)/1024))
Pixels along the diagonal are set to nodata values (to disable set nodata_width=0)
"""
dtype = np.dtype(dtype)
xx, yy = np.meshgrid(np.arange(w),
np.arange(h))
if dtype.kind == 'f':
aa = xx.astype(dtype) + (yy.astype(dtype) % 1024.0) / 1024.0
else:
nshift = dtype.itemsize*8//2
mask = (1 << nshift) - 1
aa = ((xx & mask) << nshift) | (yy & mask)
aa = aa.astype(dtype)
if nodata is not None:
aa[abs(xx-yy) < nodata_width] = nodata
return aa
def split_test_image(aa):
"""
Separate image created by mk_test_image into x,y components
"""
if aa.dtype.kind == 'f':
y = np.round((aa % 1)*1024)
x = np.floor(aa)
else:
nshift = (aa.dtype.itemsize*8)//2
mask = (1 << nshift) - 1
y = aa & mask
x = aa >> nshift
return x, y
def gen_tiff_dataset(bands,
base_folder,
prefix='',
timestamp='2018-07-19',
**kwargs):
"""
each band:
.name - string
.values - ndarray
.nodata - numeric|None
:returns: (Dataset, GeoBox)
"""
from .io import write_gtiff
from pathlib import Path
if not isinstance(bands, Sequence):
bands = (bands,)
# write arrays to disk and construct compatible measurement definitions
gbox = None
mm = []
for band in bands:
name = band.name
fname = prefix + name + '.tiff'
meta = write_gtiff(base_folder/fname, band.values,
nodata=band.nodata,
overwrite=True,
**kwargs)
gbox = meta.gbox
mm.append(dict(name=name,
path=fname,
layer=1,
dtype=meta.dtype))
uri = Path(base_folder/'metadata.yaml').absolute().as_uri()
ds = mk_sample_dataset(mm,
uri=uri,
timestamp=timestamp,
geobox=gbox)
return ds, gbox
def mk_sample_xr_dataset(crs="EPSG:3578",
shape=(33, 74),
resolution=None,
xy=(0, 0),
time='2020-02-13T11:12:13.1234567Z',
name='band',
dtype='int16',
nodata=-999,
units='1'):
""" Note that resolution is in Y,X order to match that of GeoBox.
shape (height, width)
resolution (y: float, x: float) - in YX, to match GeoBox/shape notation
xy (x: float, y: float) -- location of the top-left corner of the top-left pixel in CRS units
"""
if isinstance(crs, str):
crs = CRS(crs)
if resolution is None:
resolution = (-10, 10) if crs is None or crs.projected else (-0.01, 0.01)
t_coords = {}
if time is not None:
t_coords['time'] = mk_time_coord([time])
transform = Affine.translation(*xy)*Affine.scale(*resolution[::-1])
h, w = shape
geobox = GeoBox(w, h, transform, crs)
return Datacube.create_storage(t_coords, geobox, [Measurement(name=name, dtype=dtype, nodata=nodata, units=units)])
def remove_crs(xx):
xx = xx.reset_coords(['spatial_ref'], drop=True)
xx.attrs.pop('crs', None)
for x in xx.coords.values():
x.attrs.pop('crs', None)
if isinstance(xx, xr.Dataset):
for x in xx.data_vars.values():
x.attrs.pop('crs', None)
return xx
| 29.608225 | 119 | 0.560494 |
aceaa0dff5388f86b46902071065baf25c9dc800 | 61 | py | Python | shoottikala/__init__.py | conikuvat/shootti-ilmo | bf5ab15e20173994bac25e6b5cd3aec42f671f05 | [
"MIT"
] | null | null | null | shoottikala/__init__.py | conikuvat/shootti-ilmo | bf5ab15e20173994bac25e6b5cd3aec42f671f05 | [
"MIT"
] | 9 | 2017-02-15T20:36:49.000Z | 2017-05-26T12:10:43.000Z | shoottikala/__init__.py | conikuvat/shootti-ilmo | bf5ab15e20173994bac25e6b5cd3aec42f671f05 | [
"MIT"
] | null | null | null | default_app_config = 'shoottikala.apps.ShoottikalaAppConfig'
| 30.5 | 60 | 0.868852 |
aceaa131f155553f79eb31caa8715ef74f27f676 | 126,874 | py | Python | third-party/llvm/llvm-src/tools/clang/bindings/python/clang/cindex.py | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 1,602 | 2015-01-06T11:26:31.000Z | 2022-03-30T06:17:21.000Z | third-party/llvm/llvm-src/tools/clang/bindings/python/clang/cindex.py | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 11,789 | 2015-01-05T04:50:15.000Z | 2022-03-31T23:39:19.000Z | third-party/llvm/llvm-src/tools/clang/bindings/python/clang/cindex.py | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 498 | 2015-01-08T18:58:18.000Z | 2022-03-20T15:37:45.000Z | #===- cindex.py - Python Indexing Library Bindings -----------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
r"""
Clang Indexing Library Bindings
===============================
This module provides an interface to the Clang indexing library. It is a
low-level interface to the indexing library which attempts to match the Clang
API directly while also being "pythonic". Notable differences from the C API
are:
* string results are returned as Python strings, not CXString objects.
* null cursors are translated to None.
* access to child cursors is done via iteration, not visitation.
The major indexing objects are:
Index
The top-level object which manages some global library state.
TranslationUnit
High-level object encapsulating the AST for a single translation unit. These
can be loaded from .ast files or parsed on the fly.
Cursor
Generic object for representing a node in the AST.
SourceRange, SourceLocation, and File
Objects representing information about the input source.
Most object information is exposed using properties, when the underlying API
call is efficient.
"""
from __future__ import absolute_import, division, print_function
# TODO
# ====
#
# o API support for invalid translation units. Currently we can't even get the
# diagnostics on failure because they refer to locations in an object that
# will have been invalidated.
#
# o fix memory management issues (currently client must hold on to index and
# translation unit, or risk crashes).
#
# o expose code completion APIs.
#
# o cleanup ctypes wrapping, would be nice to separate the ctypes details more
# clearly, and hide from the external interface (i.e., help(cindex)).
#
# o implement additional SourceLocation, SourceRange, and File methods.
from ctypes import *
import clang.enumerations
import os
import sys
if sys.version_info[0] == 3:
# Python 3 strings are unicode, translate them to/from utf8 for C-interop.
class c_interop_string(c_char_p):
def __init__(self, p=None):
if p is None:
p = ""
if isinstance(p, str):
p = p.encode("utf8")
super(c_char_p, self).__init__(p)
def __str__(self):
return self.value
@property
def value(self):
if super(c_char_p, self).value is None:
return None
return super(c_char_p, self).value.decode("utf8")
@classmethod
def from_param(cls, param):
if isinstance(param, str):
return cls(param)
if isinstance(param, bytes):
return cls(param)
if param is None:
# Support passing null to C functions expecting char arrays
return None
raise TypeError("Cannot convert '{}' to '{}'".format(type(param).__name__, cls.__name__))
@staticmethod
def to_python_string(x, *args):
return x.value
def b(x):
if isinstance(x, bytes):
return x
return x.encode('utf8')
elif sys.version_info[0] == 2:
# Python 2 strings are utf8 byte strings, no translation is needed for
# C-interop.
c_interop_string = c_char_p
def _to_python_string(x, *args):
return x
c_interop_string.to_python_string = staticmethod(_to_python_string)
def b(x):
return x
# Importing ABC-s directly from collections is deprecated since Python 3.7,
# will stop working in Python 3.8.
# See: https://docs.python.org/dev/whatsnew/3.7.html#id3
if sys.version_info[:2] >= (3, 7):
from collections import abc as collections_abc
else:
import collections as collections_abc
# We only support PathLike objects on Python version with os.fspath present
# to be consistent with the Python standard library. On older Python versions
# we only support strings and we have dummy fspath to just pass them through.
try:
fspath = os.fspath
except AttributeError:
def fspath(x):
return x
# ctypes doesn't implicitly convert c_void_p to the appropriate wrapper
# object. This is a problem, because it means that from_parameter will see an
# integer and pass the wrong value on platforms where int != void*. Work around
# this by marshalling object arguments as void**.
c_object_p = POINTER(c_void_p)
callbacks = {}
### Exception Classes ###
class TranslationUnitLoadError(Exception):
"""Represents an error that occurred when loading a TranslationUnit.
This is raised in the case where a TranslationUnit could not be
instantiated due to failure in the libclang library.
FIXME: Make libclang expose additional error information in this scenario.
"""
pass
class TranslationUnitSaveError(Exception):
"""Represents an error that occurred when saving a TranslationUnit.
Each error has associated with it an enumerated value, accessible under
e.save_error. Consumers can compare the value with one of the ERROR_
constants in this class.
"""
# Indicates that an unknown error occurred. This typically indicates that
# I/O failed during save.
ERROR_UNKNOWN = 1
# Indicates that errors during translation prevented saving. The errors
# should be available via the TranslationUnit's diagnostics.
ERROR_TRANSLATION_ERRORS = 2
# Indicates that the translation unit was somehow invalid.
ERROR_INVALID_TU = 3
def __init__(self, enumeration, message):
assert isinstance(enumeration, int)
if enumeration < 1 or enumeration > 3:
raise Exception("Encountered undefined TranslationUnit save error "
"constant: %d. Please file a bug to have this "
"value supported." % enumeration)
self.save_error = enumeration
Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
### Structures and Utility Classes ###
class CachedProperty(object):
"""Decorator that lazy-loads the value of a property.
The first time the property is accessed, the original property function is
executed. The value it returns is set as the new value of that instance's
property, replacing the original method.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except:
pass
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
class _CXString(Structure):
"""Helper for transforming CXString results."""
_fields_ = [("spelling", c_char_p), ("free", c_int)]
def __del__(self):
conf.lib.clang_disposeString(self)
@staticmethod
def from_result(res, fn=None, args=None):
assert isinstance(res, _CXString)
return conf.lib.clang_getCString(res)
class SourceLocation(Structure):
"""
A SourceLocation represents a particular location within a source file.
"""
_fields_ = [("ptr_data", c_void_p * 2), ("int_data", c_uint)]
_data = None
def _get_instantiation(self):
if self._data is None:
f, l, c, o = c_object_p(), c_uint(), c_uint(), c_uint()
conf.lib.clang_getInstantiationLocation(self, byref(f), byref(l),
byref(c), byref(o))
if f:
f = File(f)
else:
f = None
self._data = (f, int(l.value), int(c.value), int(o.value))
return self._data
@staticmethod
def from_position(tu, file, line, column):
"""
Retrieve the source location associated with a given file/line/column in
a particular translation unit.
"""
return conf.lib.clang_getLocation(tu, file, line, column)
@staticmethod
def from_offset(tu, file, offset):
"""Retrieve a SourceLocation from a given character offset.
tu -- TranslationUnit file belongs to
file -- File instance to obtain offset from
offset -- Integer character offset within file
"""
return conf.lib.clang_getLocationForOffset(tu, file, offset)
@property
def file(self):
"""Get the file represented by this source location."""
return self._get_instantiation()[0]
@property
def line(self):
"""Get the line represented by this source location."""
return self._get_instantiation()[1]
@property
def column(self):
"""Get the column represented by this source location."""
return self._get_instantiation()[2]
@property
def offset(self):
"""Get the file offset represented by this source location."""
return self._get_instantiation()[3]
def __eq__(self, other):
return conf.lib.clang_equalLocations(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.file:
filename = self.file.name
else:
filename = None
return "<SourceLocation file %r, line %r, column %r>" % (
filename, self.line, self.column)
class SourceRange(Structure):
"""
A SourceRange describes a range of source locations within the source
code.
"""
_fields_ = [
("ptr_data", c_void_p * 2),
("begin_int_data", c_uint),
("end_int_data", c_uint)]
# FIXME: Eliminate this and make normal constructor? Requires hiding ctypes
# object.
@staticmethod
def from_locations(start, end):
return conf.lib.clang_getRange(start, end)
@property
def start(self):
"""
Return a SourceLocation representing the first character within a
source range.
"""
return conf.lib.clang_getRangeStart(self)
@property
def end(self):
"""
Return a SourceLocation representing the last character within a
source range.
"""
return conf.lib.clang_getRangeEnd(self)
def __eq__(self, other):
return conf.lib.clang_equalRanges(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __contains__(self, other):
"""Useful to detect the Token/Lexer bug"""
if not isinstance(other, SourceLocation):
return False
if other.file is None and self.start.file is None:
pass
elif ( self.start.file.name != other.file.name or
other.file.name != self.end.file.name):
# same file name
return False
# same file, in between lines
if self.start.line < other.line < self.end.line:
return True
elif self.start.line == other.line:
# same file first line
if self.start.column <= other.column:
return True
elif other.line == self.end.line:
# same file last line
if other.column <= self.end.column:
return True
return False
def __repr__(self):
return "<SourceRange start %r, end %r>" % (self.start, self.end)
class Diagnostic(object):
"""
A Diagnostic is a single instance of a Clang diagnostic. It includes the
diagnostic severity, the message, the location the diagnostic occurred, as
well as additional source ranges and associated fix-it hints.
"""
Ignored = 0
Note = 1
Warning = 2
Error = 3
Fatal = 4
DisplaySourceLocation = 0x01
DisplayColumn = 0x02
DisplaySourceRanges = 0x04
DisplayOption = 0x08
DisplayCategoryId = 0x10
DisplayCategoryName = 0x20
_FormatOptionsMask = 0x3f
def __init__(self, ptr):
self.ptr = ptr
def __del__(self):
conf.lib.clang_disposeDiagnostic(self)
@property
def severity(self):
return conf.lib.clang_getDiagnosticSeverity(self)
@property
def location(self):
return conf.lib.clang_getDiagnosticLocation(self)
@property
def spelling(self):
return conf.lib.clang_getDiagnosticSpelling(self)
@property
def ranges(self):
class RangeIterator(object):
def __init__(self, diag):
self.diag = diag
def __len__(self):
return int(conf.lib.clang_getDiagnosticNumRanges(self.diag))
def __getitem__(self, key):
if (key >= len(self)):
raise IndexError
return conf.lib.clang_getDiagnosticRange(self.diag, key)
return RangeIterator(self)
@property
def fixits(self):
class FixItIterator(object):
def __init__(self, diag):
self.diag = diag
def __len__(self):
return int(conf.lib.clang_getDiagnosticNumFixIts(self.diag))
def __getitem__(self, key):
range = SourceRange()
value = conf.lib.clang_getDiagnosticFixIt(self.diag, key,
byref(range))
if len(value) == 0:
raise IndexError
return FixIt(range, value)
return FixItIterator(self)
@property
def children(self):
class ChildDiagnosticsIterator(object):
def __init__(self, diag):
self.diag_set = conf.lib.clang_getChildDiagnostics(diag)
def __len__(self):
return int(conf.lib.clang_getNumDiagnosticsInSet(self.diag_set))
def __getitem__(self, key):
diag = conf.lib.clang_getDiagnosticInSet(self.diag_set, key)
if not diag:
raise IndexError
return Diagnostic(diag)
return ChildDiagnosticsIterator(self)
@property
def category_number(self):
"""The category number for this diagnostic or 0 if unavailable."""
return conf.lib.clang_getDiagnosticCategory(self)
@property
def category_name(self):
"""The string name of the category for this diagnostic."""
return conf.lib.clang_getDiagnosticCategoryText(self)
@property
def option(self):
"""The command-line option that enables this diagnostic."""
return conf.lib.clang_getDiagnosticOption(self, None)
@property
def disable_option(self):
"""The command-line option that disables this diagnostic."""
disable = _CXString()
conf.lib.clang_getDiagnosticOption(self, byref(disable))
return _CXString.from_result(disable)
def format(self, options=None):
"""
Format this diagnostic for display. The options argument takes
Diagnostic.Display* flags, which can be combined using bitwise OR. If
the options argument is not provided, the default display options will
be used.
"""
if options is None:
options = conf.lib.clang_defaultDiagnosticDisplayOptions()
if options & ~Diagnostic._FormatOptionsMask:
raise ValueError('Invalid format options')
return conf.lib.clang_formatDiagnostic(self, options)
def __repr__(self):
return "<Diagnostic severity %r, location %r, spelling %r>" % (
self.severity, self.location, self.spelling)
def __str__(self):
return self.format()
def from_param(self):
return self.ptr
class FixIt(object):
"""
A FixIt represents a transformation to be applied to the source to
"fix-it". The fix-it shouldbe applied by replacing the given source range
with the given value.
"""
def __init__(self, range, value):
self.range = range
self.value = value
def __repr__(self):
return "<FixIt range %r, value %r>" % (self.range, self.value)
class TokenGroup(object):
"""Helper class to facilitate token management.
Tokens are allocated from libclang in chunks. They must be disposed of as a
collective group.
One purpose of this class is for instances to represent groups of allocated
tokens. Each token in a group contains a reference back to an instance of
this class. When all tokens from a group are garbage collected, it allows
this class to be garbage collected. When this class is garbage collected,
it calls the libclang destructor which invalidates all tokens in the group.
You should not instantiate this class outside of this module.
"""
def __init__(self, tu, memory, count):
self._tu = tu
self._memory = memory
self._count = count
def __del__(self):
conf.lib.clang_disposeTokens(self._tu, self._memory, self._count)
@staticmethod
def get_tokens(tu, extent):
"""Helper method to return all tokens in an extent.
This functionality is needed multiple places in this module. We define
it here because it seems like a logical place.
"""
tokens_memory = POINTER(Token)()
tokens_count = c_uint()
conf.lib.clang_tokenize(tu, extent, byref(tokens_memory),
byref(tokens_count))
count = int(tokens_count.value)
# If we get no tokens, no memory was allocated. Be sure not to return
# anything and potentially call a destructor on nothing.
if count < 1:
return
tokens_array = cast(tokens_memory, POINTER(Token * count)).contents
token_group = TokenGroup(tu, tokens_memory, tokens_count)
for i in range(0, count):
token = Token()
token.int_data = tokens_array[i].int_data
token.ptr_data = tokens_array[i].ptr_data
token._tu = tu
token._group = token_group
yield token
class TokenKind(object):
"""Describes a specific type of a Token."""
_value_map = {} # int -> TokenKind
def __init__(self, value, name):
"""Create a new TokenKind instance from a numeric value and a name."""
self.value = value
self.name = name
def __repr__(self):
return 'TokenKind.%s' % (self.name,)
@staticmethod
def from_value(value):
"""Obtain a registered TokenKind instance from its value."""
result = TokenKind._value_map.get(value, None)
if result is None:
raise ValueError('Unknown TokenKind: %d' % value)
return result
@staticmethod
def register(value, name):
"""Register a new TokenKind enumeration.
This should only be called at module load time by code within this
package.
"""
if value in TokenKind._value_map:
raise ValueError('TokenKind already registered: %d' % value)
kind = TokenKind(value, name)
TokenKind._value_map[value] = kind
setattr(TokenKind, name, kind)
### Cursor Kinds ###
class BaseEnumeration(object):
"""
Common base class for named enumerations held in sync with Index.h values.
Subclasses must define their own _kinds and _name_map members, as:
_kinds = []
_name_map = None
These values hold the per-subclass instances and value-to-name mappings,
respectively.
"""
def __init__(self, value):
if value >= len(self.__class__._kinds):
self.__class__._kinds += [None] * (value - len(self.__class__._kinds) + 1)
if self.__class__._kinds[value] is not None:
raise ValueError('{0} value {1} already loaded'.format(
str(self.__class__), value))
self.value = value
self.__class__._kinds[value] = self
self.__class__._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this cursor kind."""
if self._name_map is None:
self._name_map = {}
for key, value in self.__class__.__dict__.items():
if isinstance(value, self.__class__):
self._name_map[value] = key
return self._name_map[self]
@classmethod
def from_id(cls, id):
if id >= len(cls._kinds) or cls._kinds[id] is None:
raise ValueError('Unknown template argument kind %d' % id)
return cls._kinds[id]
def __repr__(self):
return '%s.%s' % (self.__class__, self.name,)
class CursorKind(BaseEnumeration):
"""
A CursorKind describes the kind of entity that a cursor points to.
"""
# The required BaseEnumeration declarations.
_kinds = []
_name_map = None
@staticmethod
def get_all_kinds():
"""Return all CursorKind enumeration instances."""
return [x for x in CursorKind._kinds if not x is None]
def is_declaration(self):
"""Test if this is a declaration kind."""
return conf.lib.clang_isDeclaration(self)
def is_reference(self):
"""Test if this is a reference kind."""
return conf.lib.clang_isReference(self)
def is_expression(self):
"""Test if this is an expression kind."""
return conf.lib.clang_isExpression(self)
def is_statement(self):
"""Test if this is a statement kind."""
return conf.lib.clang_isStatement(self)
def is_attribute(self):
"""Test if this is an attribute kind."""
return conf.lib.clang_isAttribute(self)
def is_invalid(self):
"""Test if this is an invalid kind."""
return conf.lib.clang_isInvalid(self)
def is_translation_unit(self):
"""Test if this is a translation unit kind."""
return conf.lib.clang_isTranslationUnit(self)
def is_preprocessing(self):
"""Test if this is a preprocessing kind."""
return conf.lib.clang_isPreprocessing(self)
def is_unexposed(self):
"""Test if this is an unexposed kind."""
return conf.lib.clang_isUnexposed(self)
def __repr__(self):
return 'CursorKind.%s' % (self.name,)
###
# Declaration Kinds
# A declaration whose specific kind is not exposed via this interface.
#
# Unexposed declarations have the same operations as any other kind of
# declaration; one can extract their location information, spelling, find their
# definitions, etc. However, the specific kind of the declaration is not
# reported.
CursorKind.UNEXPOSED_DECL = CursorKind(1)
# A C or C++ struct.
CursorKind.STRUCT_DECL = CursorKind(2)
# A C or C++ union.
CursorKind.UNION_DECL = CursorKind(3)
# A C++ class.
CursorKind.CLASS_DECL = CursorKind(4)
# An enumeration.
CursorKind.ENUM_DECL = CursorKind(5)
# A field (in C) or non-static data member (in C++) in a struct, union, or C++
# class.
CursorKind.FIELD_DECL = CursorKind(6)
# An enumerator constant.
CursorKind.ENUM_CONSTANT_DECL = CursorKind(7)
# A function.
CursorKind.FUNCTION_DECL = CursorKind(8)
# A variable.
CursorKind.VAR_DECL = CursorKind(9)
# A function or method parameter.
CursorKind.PARM_DECL = CursorKind(10)
# An Objective-C @interface.
CursorKind.OBJC_INTERFACE_DECL = CursorKind(11)
# An Objective-C @interface for a category.
CursorKind.OBJC_CATEGORY_DECL = CursorKind(12)
# An Objective-C @protocol declaration.
CursorKind.OBJC_PROTOCOL_DECL = CursorKind(13)
# An Objective-C @property declaration.
CursorKind.OBJC_PROPERTY_DECL = CursorKind(14)
# An Objective-C instance variable.
CursorKind.OBJC_IVAR_DECL = CursorKind(15)
# An Objective-C instance method.
CursorKind.OBJC_INSTANCE_METHOD_DECL = CursorKind(16)
# An Objective-C class method.
CursorKind.OBJC_CLASS_METHOD_DECL = CursorKind(17)
# An Objective-C @implementation.
CursorKind.OBJC_IMPLEMENTATION_DECL = CursorKind(18)
# An Objective-C @implementation for a category.
CursorKind.OBJC_CATEGORY_IMPL_DECL = CursorKind(19)
# A typedef.
CursorKind.TYPEDEF_DECL = CursorKind(20)
# A C++ class method.
CursorKind.CXX_METHOD = CursorKind(21)
# A C++ namespace.
CursorKind.NAMESPACE = CursorKind(22)
# A linkage specification, e.g. 'extern "C"'.
CursorKind.LINKAGE_SPEC = CursorKind(23)
# A C++ constructor.
CursorKind.CONSTRUCTOR = CursorKind(24)
# A C++ destructor.
CursorKind.DESTRUCTOR = CursorKind(25)
# A C++ conversion function.
CursorKind.CONVERSION_FUNCTION = CursorKind(26)
# A C++ template type parameter
CursorKind.TEMPLATE_TYPE_PARAMETER = CursorKind(27)
# A C++ non-type template parameter.
CursorKind.TEMPLATE_NON_TYPE_PARAMETER = CursorKind(28)
# A C++ template template parameter.
CursorKind.TEMPLATE_TEMPLATE_PARAMETER = CursorKind(29)
# A C++ function template.
CursorKind.FUNCTION_TEMPLATE = CursorKind(30)
# A C++ class template.
CursorKind.CLASS_TEMPLATE = CursorKind(31)
# A C++ class template partial specialization.
CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION = CursorKind(32)
# A C++ namespace alias declaration.
CursorKind.NAMESPACE_ALIAS = CursorKind(33)
# A C++ using directive
CursorKind.USING_DIRECTIVE = CursorKind(34)
# A C++ using declaration
CursorKind.USING_DECLARATION = CursorKind(35)
# A Type alias decl.
CursorKind.TYPE_ALIAS_DECL = CursorKind(36)
# A Objective-C synthesize decl
CursorKind.OBJC_SYNTHESIZE_DECL = CursorKind(37)
# A Objective-C dynamic decl
CursorKind.OBJC_DYNAMIC_DECL = CursorKind(38)
# A C++ access specifier decl.
CursorKind.CXX_ACCESS_SPEC_DECL = CursorKind(39)
###
# Reference Kinds
CursorKind.OBJC_SUPER_CLASS_REF = CursorKind(40)
CursorKind.OBJC_PROTOCOL_REF = CursorKind(41)
CursorKind.OBJC_CLASS_REF = CursorKind(42)
# A reference to a type declaration.
#
# A type reference occurs anywhere where a type is named but not
# declared. For example, given:
# typedef unsigned size_type;
# size_type size;
#
# The typedef is a declaration of size_type (CXCursor_TypedefDecl),
# while the type of the variable "size" is referenced. The cursor
# referenced by the type of size is the typedef for size_type.
CursorKind.TYPE_REF = CursorKind(43)
CursorKind.CXX_BASE_SPECIFIER = CursorKind(44)
# A reference to a class template, function template, template
# template parameter, or class template partial specialization.
CursorKind.TEMPLATE_REF = CursorKind(45)
# A reference to a namespace or namepsace alias.
CursorKind.NAMESPACE_REF = CursorKind(46)
# A reference to a member of a struct, union, or class that occurs in
# some non-expression context, e.g., a designated initializer.
CursorKind.MEMBER_REF = CursorKind(47)
# A reference to a labeled statement.
CursorKind.LABEL_REF = CursorKind(48)
# A reference to a set of overloaded functions or function templates
# that has not yet been resolved to a specific function or function template.
CursorKind.OVERLOADED_DECL_REF = CursorKind(49)
# A reference to a variable that occurs in some non-expression
# context, e.g., a C++ lambda capture list.
CursorKind.VARIABLE_REF = CursorKind(50)
###
# Invalid/Error Kinds
CursorKind.INVALID_FILE = CursorKind(70)
CursorKind.NO_DECL_FOUND = CursorKind(71)
CursorKind.NOT_IMPLEMENTED = CursorKind(72)
CursorKind.INVALID_CODE = CursorKind(73)
###
# Expression Kinds
# An expression whose specific kind is not exposed via this interface.
#
# Unexposed expressions have the same operations as any other kind of
# expression; one can extract their location information, spelling, children,
# etc. However, the specific kind of the expression is not reported.
CursorKind.UNEXPOSED_EXPR = CursorKind(100)
# An expression that refers to some value declaration, such as a function,
# variable, or enumerator.
CursorKind.DECL_REF_EXPR = CursorKind(101)
# An expression that refers to a member of a struct, union, class, Objective-C
# class, etc.
CursorKind.MEMBER_REF_EXPR = CursorKind(102)
# An expression that calls a function.
CursorKind.CALL_EXPR = CursorKind(103)
# An expression that sends a message to an Objective-C object or class.
CursorKind.OBJC_MESSAGE_EXPR = CursorKind(104)
# An expression that represents a block literal.
CursorKind.BLOCK_EXPR = CursorKind(105)
# An integer literal.
CursorKind.INTEGER_LITERAL = CursorKind(106)
# A floating point number literal.
CursorKind.FLOATING_LITERAL = CursorKind(107)
# An imaginary number literal.
CursorKind.IMAGINARY_LITERAL = CursorKind(108)
# A string literal.
CursorKind.STRING_LITERAL = CursorKind(109)
# A character literal.
CursorKind.CHARACTER_LITERAL = CursorKind(110)
# A parenthesized expression, e.g. "(1)".
#
# This AST node is only formed if full location information is requested.
CursorKind.PAREN_EXPR = CursorKind(111)
# This represents the unary-expression's (except sizeof and
# alignof).
CursorKind.UNARY_OPERATOR = CursorKind(112)
# [C99 6.5.2.1] Array Subscripting.
CursorKind.ARRAY_SUBSCRIPT_EXPR = CursorKind(113)
# A builtin binary operation expression such as "x + y" or
# "x <= y".
CursorKind.BINARY_OPERATOR = CursorKind(114)
# Compound assignment such as "+=".
CursorKind.COMPOUND_ASSIGNMENT_OPERATOR = CursorKind(115)
# The ?: ternary operator.
CursorKind.CONDITIONAL_OPERATOR = CursorKind(116)
# An explicit cast in C (C99 6.5.4) or a C-style cast in C++
# (C++ [expr.cast]), which uses the syntax (Type)expr.
#
# For example: (int)f.
CursorKind.CSTYLE_CAST_EXPR = CursorKind(117)
# [C99 6.5.2.5]
CursorKind.COMPOUND_LITERAL_EXPR = CursorKind(118)
# Describes an C or C++ initializer list.
CursorKind.INIT_LIST_EXPR = CursorKind(119)
# The GNU address of label extension, representing &&label.
CursorKind.ADDR_LABEL_EXPR = CursorKind(120)
# This is the GNU Statement Expression extension: ({int X=4; X;})
CursorKind.StmtExpr = CursorKind(121)
# Represents a C11 generic selection.
CursorKind.GENERIC_SELECTION_EXPR = CursorKind(122)
# Implements the GNU __null extension, which is a name for a null
# pointer constant that has integral type (e.g., int or long) and is the same
# size and alignment as a pointer.
#
# The __null extension is typically only used by system headers, which define
# NULL as __null in C++ rather than using 0 (which is an integer that may not
# match the size of a pointer).
CursorKind.GNU_NULL_EXPR = CursorKind(123)
# C++'s static_cast<> expression.
CursorKind.CXX_STATIC_CAST_EXPR = CursorKind(124)
# C++'s dynamic_cast<> expression.
CursorKind.CXX_DYNAMIC_CAST_EXPR = CursorKind(125)
# C++'s reinterpret_cast<> expression.
CursorKind.CXX_REINTERPRET_CAST_EXPR = CursorKind(126)
# C++'s const_cast<> expression.
CursorKind.CXX_CONST_CAST_EXPR = CursorKind(127)
# Represents an explicit C++ type conversion that uses "functional"
# notion (C++ [expr.type.conv]).
#
# Example:
# \code
# x = int(0.5);
# \endcode
CursorKind.CXX_FUNCTIONAL_CAST_EXPR = CursorKind(128)
# A C++ typeid expression (C++ [expr.typeid]).
CursorKind.CXX_TYPEID_EXPR = CursorKind(129)
# [C++ 2.13.5] C++ Boolean Literal.
CursorKind.CXX_BOOL_LITERAL_EXPR = CursorKind(130)
# [C++0x 2.14.7] C++ Pointer Literal.
CursorKind.CXX_NULL_PTR_LITERAL_EXPR = CursorKind(131)
# Represents the "this" expression in C++
CursorKind.CXX_THIS_EXPR = CursorKind(132)
# [C++ 15] C++ Throw Expression.
#
# This handles 'throw' and 'throw' assignment-expression. When
# assignment-expression isn't present, Op will be null.
CursorKind.CXX_THROW_EXPR = CursorKind(133)
# A new expression for memory allocation and constructor calls, e.g:
# "new CXXNewExpr(foo)".
CursorKind.CXX_NEW_EXPR = CursorKind(134)
# A delete expression for memory deallocation and destructor calls,
# e.g. "delete[] pArray".
CursorKind.CXX_DELETE_EXPR = CursorKind(135)
# Represents a unary expression.
CursorKind.CXX_UNARY_EXPR = CursorKind(136)
# ObjCStringLiteral, used for Objective-C string literals i.e. "foo".
CursorKind.OBJC_STRING_LITERAL = CursorKind(137)
# ObjCEncodeExpr, used for in Objective-C.
CursorKind.OBJC_ENCODE_EXPR = CursorKind(138)
# ObjCSelectorExpr used for in Objective-C.
CursorKind.OBJC_SELECTOR_EXPR = CursorKind(139)
# Objective-C's protocol expression.
CursorKind.OBJC_PROTOCOL_EXPR = CursorKind(140)
# An Objective-C "bridged" cast expression, which casts between
# Objective-C pointers and C pointers, transferring ownership in the process.
#
# \code
# NSString *str = (__bridge_transfer NSString *)CFCreateString();
# \endcode
CursorKind.OBJC_BRIDGE_CAST_EXPR = CursorKind(141)
# Represents a C++0x pack expansion that produces a sequence of
# expressions.
#
# A pack expansion expression contains a pattern (which itself is an
# expression) followed by an ellipsis. For example:
CursorKind.PACK_EXPANSION_EXPR = CursorKind(142)
# Represents an expression that computes the length of a parameter
# pack.
CursorKind.SIZE_OF_PACK_EXPR = CursorKind(143)
# Represents a C++ lambda expression that produces a local function
# object.
#
# \code
# void abssort(float *x, unsigned N) {
# std::sort(x, x + N,
# [](float a, float b) {
# return std::abs(a) < std::abs(b);
# });
# }
# \endcode
CursorKind.LAMBDA_EXPR = CursorKind(144)
# Objective-c Boolean Literal.
CursorKind.OBJ_BOOL_LITERAL_EXPR = CursorKind(145)
# Represents the "self" expression in a ObjC method.
CursorKind.OBJ_SELF_EXPR = CursorKind(146)
# OpenMP 4.0 [2.4, Array Section].
CursorKind.OMP_ARRAY_SECTION_EXPR = CursorKind(147)
# Represents an @available(...) check.
CursorKind.OBJC_AVAILABILITY_CHECK_EXPR = CursorKind(148)
# A statement whose specific kind is not exposed via this interface.
#
# Unexposed statements have the same operations as any other kind of statement;
# one can extract their location information, spelling, children, etc. However,
# the specific kind of the statement is not reported.
CursorKind.UNEXPOSED_STMT = CursorKind(200)
# A labelled statement in a function.
CursorKind.LABEL_STMT = CursorKind(201)
# A compound statement
CursorKind.COMPOUND_STMT = CursorKind(202)
# A case statement.
CursorKind.CASE_STMT = CursorKind(203)
# A default statement.
CursorKind.DEFAULT_STMT = CursorKind(204)
# An if statement.
CursorKind.IF_STMT = CursorKind(205)
# A switch statement.
CursorKind.SWITCH_STMT = CursorKind(206)
# A while statement.
CursorKind.WHILE_STMT = CursorKind(207)
# A do statement.
CursorKind.DO_STMT = CursorKind(208)
# A for statement.
CursorKind.FOR_STMT = CursorKind(209)
# A goto statement.
CursorKind.GOTO_STMT = CursorKind(210)
# An indirect goto statement.
CursorKind.INDIRECT_GOTO_STMT = CursorKind(211)
# A continue statement.
CursorKind.CONTINUE_STMT = CursorKind(212)
# A break statement.
CursorKind.BREAK_STMT = CursorKind(213)
# A return statement.
CursorKind.RETURN_STMT = CursorKind(214)
# A GNU-style inline assembler statement.
CursorKind.ASM_STMT = CursorKind(215)
# Objective-C's overall @try-@catch-@finally statement.
CursorKind.OBJC_AT_TRY_STMT = CursorKind(216)
# Objective-C's @catch statement.
CursorKind.OBJC_AT_CATCH_STMT = CursorKind(217)
# Objective-C's @finally statement.
CursorKind.OBJC_AT_FINALLY_STMT = CursorKind(218)
# Objective-C's @throw statement.
CursorKind.OBJC_AT_THROW_STMT = CursorKind(219)
# Objective-C's @synchronized statement.
CursorKind.OBJC_AT_SYNCHRONIZED_STMT = CursorKind(220)
# Objective-C's autorealease pool statement.
CursorKind.OBJC_AUTORELEASE_POOL_STMT = CursorKind(221)
# Objective-C's for collection statement.
CursorKind.OBJC_FOR_COLLECTION_STMT = CursorKind(222)
# C++'s catch statement.
CursorKind.CXX_CATCH_STMT = CursorKind(223)
# C++'s try statement.
CursorKind.CXX_TRY_STMT = CursorKind(224)
# C++'s for (* : *) statement.
CursorKind.CXX_FOR_RANGE_STMT = CursorKind(225)
# Windows Structured Exception Handling's try statement.
CursorKind.SEH_TRY_STMT = CursorKind(226)
# Windows Structured Exception Handling's except statement.
CursorKind.SEH_EXCEPT_STMT = CursorKind(227)
# Windows Structured Exception Handling's finally statement.
CursorKind.SEH_FINALLY_STMT = CursorKind(228)
# A MS inline assembly statement extension.
CursorKind.MS_ASM_STMT = CursorKind(229)
# The null statement.
CursorKind.NULL_STMT = CursorKind(230)
# Adaptor class for mixing declarations with statements and expressions.
CursorKind.DECL_STMT = CursorKind(231)
# OpenMP parallel directive.
CursorKind.OMP_PARALLEL_DIRECTIVE = CursorKind(232)
# OpenMP SIMD directive.
CursorKind.OMP_SIMD_DIRECTIVE = CursorKind(233)
# OpenMP for directive.
CursorKind.OMP_FOR_DIRECTIVE = CursorKind(234)
# OpenMP sections directive.
CursorKind.OMP_SECTIONS_DIRECTIVE = CursorKind(235)
# OpenMP section directive.
CursorKind.OMP_SECTION_DIRECTIVE = CursorKind(236)
# OpenMP single directive.
CursorKind.OMP_SINGLE_DIRECTIVE = CursorKind(237)
# OpenMP parallel for directive.
CursorKind.OMP_PARALLEL_FOR_DIRECTIVE = CursorKind(238)
# OpenMP parallel sections directive.
CursorKind.OMP_PARALLEL_SECTIONS_DIRECTIVE = CursorKind(239)
# OpenMP task directive.
CursorKind.OMP_TASK_DIRECTIVE = CursorKind(240)
# OpenMP master directive.
CursorKind.OMP_MASTER_DIRECTIVE = CursorKind(241)
# OpenMP critical directive.
CursorKind.OMP_CRITICAL_DIRECTIVE = CursorKind(242)
# OpenMP taskyield directive.
CursorKind.OMP_TASKYIELD_DIRECTIVE = CursorKind(243)
# OpenMP barrier directive.
CursorKind.OMP_BARRIER_DIRECTIVE = CursorKind(244)
# OpenMP taskwait directive.
CursorKind.OMP_TASKWAIT_DIRECTIVE = CursorKind(245)
# OpenMP flush directive.
CursorKind.OMP_FLUSH_DIRECTIVE = CursorKind(246)
# Windows Structured Exception Handling's leave statement.
CursorKind.SEH_LEAVE_STMT = CursorKind(247)
# OpenMP ordered directive.
CursorKind.OMP_ORDERED_DIRECTIVE = CursorKind(248)
# OpenMP atomic directive.
CursorKind.OMP_ATOMIC_DIRECTIVE = CursorKind(249)
# OpenMP for SIMD directive.
CursorKind.OMP_FOR_SIMD_DIRECTIVE = CursorKind(250)
# OpenMP parallel for SIMD directive.
CursorKind.OMP_PARALLELFORSIMD_DIRECTIVE = CursorKind(251)
# OpenMP target directive.
CursorKind.OMP_TARGET_DIRECTIVE = CursorKind(252)
# OpenMP teams directive.
CursorKind.OMP_TEAMS_DIRECTIVE = CursorKind(253)
# OpenMP taskgroup directive.
CursorKind.OMP_TASKGROUP_DIRECTIVE = CursorKind(254)
# OpenMP cancellation point directive.
CursorKind.OMP_CANCELLATION_POINT_DIRECTIVE = CursorKind(255)
# OpenMP cancel directive.
CursorKind.OMP_CANCEL_DIRECTIVE = CursorKind(256)
# OpenMP target data directive.
CursorKind.OMP_TARGET_DATA_DIRECTIVE = CursorKind(257)
# OpenMP taskloop directive.
CursorKind.OMP_TASK_LOOP_DIRECTIVE = CursorKind(258)
# OpenMP taskloop simd directive.
CursorKind.OMP_TASK_LOOP_SIMD_DIRECTIVE = CursorKind(259)
# OpenMP distribute directive.
CursorKind.OMP_DISTRIBUTE_DIRECTIVE = CursorKind(260)
# OpenMP target enter data directive.
CursorKind.OMP_TARGET_ENTER_DATA_DIRECTIVE = CursorKind(261)
# OpenMP target exit data directive.
CursorKind.OMP_TARGET_EXIT_DATA_DIRECTIVE = CursorKind(262)
# OpenMP target parallel directive.
CursorKind.OMP_TARGET_PARALLEL_DIRECTIVE = CursorKind(263)
# OpenMP target parallel for directive.
CursorKind.OMP_TARGET_PARALLELFOR_DIRECTIVE = CursorKind(264)
# OpenMP target update directive.
CursorKind.OMP_TARGET_UPDATE_DIRECTIVE = CursorKind(265)
# OpenMP distribute parallel for directive.
CursorKind.OMP_DISTRIBUTE_PARALLELFOR_DIRECTIVE = CursorKind(266)
# OpenMP distribute parallel for simd directive.
CursorKind.OMP_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE = CursorKind(267)
# OpenMP distribute simd directive.
CursorKind.OMP_DISTRIBUTE_SIMD_DIRECTIVE = CursorKind(268)
# OpenMP target parallel for simd directive.
CursorKind.OMP_TARGET_PARALLEL_FOR_SIMD_DIRECTIVE = CursorKind(269)
# OpenMP target simd directive.
CursorKind.OMP_TARGET_SIMD_DIRECTIVE = CursorKind(270)
# OpenMP teams distribute directive.
CursorKind.OMP_TEAMS_DISTRIBUTE_DIRECTIVE = CursorKind(271)
###
# Other Kinds
# Cursor that represents the translation unit itself.
#
# The translation unit cursor exists primarily to act as the root cursor for
# traversing the contents of a translation unit.
CursorKind.TRANSLATION_UNIT = CursorKind(300)
###
# Attributes
# An attribute whoe specific kind is note exposed via this interface
CursorKind.UNEXPOSED_ATTR = CursorKind(400)
CursorKind.IB_ACTION_ATTR = CursorKind(401)
CursorKind.IB_OUTLET_ATTR = CursorKind(402)
CursorKind.IB_OUTLET_COLLECTION_ATTR = CursorKind(403)
CursorKind.CXX_FINAL_ATTR = CursorKind(404)
CursorKind.CXX_OVERRIDE_ATTR = CursorKind(405)
CursorKind.ANNOTATE_ATTR = CursorKind(406)
CursorKind.ASM_LABEL_ATTR = CursorKind(407)
CursorKind.PACKED_ATTR = CursorKind(408)
CursorKind.PURE_ATTR = CursorKind(409)
CursorKind.CONST_ATTR = CursorKind(410)
CursorKind.NODUPLICATE_ATTR = CursorKind(411)
CursorKind.CUDACONSTANT_ATTR = CursorKind(412)
CursorKind.CUDADEVICE_ATTR = CursorKind(413)
CursorKind.CUDAGLOBAL_ATTR = CursorKind(414)
CursorKind.CUDAHOST_ATTR = CursorKind(415)
CursorKind.CUDASHARED_ATTR = CursorKind(416)
CursorKind.VISIBILITY_ATTR = CursorKind(417)
CursorKind.DLLEXPORT_ATTR = CursorKind(418)
CursorKind.DLLIMPORT_ATTR = CursorKind(419)
CursorKind.CONVERGENT_ATTR = CursorKind(438)
CursorKind.WARN_UNUSED_ATTR = CursorKind(439)
CursorKind.WARN_UNUSED_RESULT_ATTR = CursorKind(440)
CursorKind.ALIGNED_ATTR = CursorKind(441)
###
# Preprocessing
CursorKind.PREPROCESSING_DIRECTIVE = CursorKind(500)
CursorKind.MACRO_DEFINITION = CursorKind(501)
CursorKind.MACRO_INSTANTIATION = CursorKind(502)
CursorKind.INCLUSION_DIRECTIVE = CursorKind(503)
###
# Extra declaration
# A module import declaration.
CursorKind.MODULE_IMPORT_DECL = CursorKind(600)
# A type alias template declaration
CursorKind.TYPE_ALIAS_TEMPLATE_DECL = CursorKind(601)
# A static_assert or _Static_assert node
CursorKind.STATIC_ASSERT = CursorKind(602)
# A friend declaration
CursorKind.FRIEND_DECL = CursorKind(603)
# A code completion overload candidate.
CursorKind.OVERLOAD_CANDIDATE = CursorKind(700)
### Template Argument Kinds ###
class TemplateArgumentKind(BaseEnumeration):
"""
A TemplateArgumentKind describes the kind of entity that a template argument
represents.
"""
# The required BaseEnumeration declarations.
_kinds = []
_name_map = None
TemplateArgumentKind.NULL = TemplateArgumentKind(0)
TemplateArgumentKind.TYPE = TemplateArgumentKind(1)
TemplateArgumentKind.DECLARATION = TemplateArgumentKind(2)
TemplateArgumentKind.NULLPTR = TemplateArgumentKind(3)
TemplateArgumentKind.INTEGRAL = TemplateArgumentKind(4)
### Exception Specification Kinds ###
class ExceptionSpecificationKind(BaseEnumeration):
"""
An ExceptionSpecificationKind describes the kind of exception specification
that a function has.
"""
# The required BaseEnumeration declarations.
_kinds = []
_name_map = None
def __repr__(self):
return 'ExceptionSpecificationKind.{}'.format(self.name)
ExceptionSpecificationKind.NONE = ExceptionSpecificationKind(0)
ExceptionSpecificationKind.DYNAMIC_NONE = ExceptionSpecificationKind(1)
ExceptionSpecificationKind.DYNAMIC = ExceptionSpecificationKind(2)
ExceptionSpecificationKind.MS_ANY = ExceptionSpecificationKind(3)
ExceptionSpecificationKind.BASIC_NOEXCEPT = ExceptionSpecificationKind(4)
ExceptionSpecificationKind.COMPUTED_NOEXCEPT = ExceptionSpecificationKind(5)
ExceptionSpecificationKind.UNEVALUATED = ExceptionSpecificationKind(6)
ExceptionSpecificationKind.UNINSTANTIATED = ExceptionSpecificationKind(7)
ExceptionSpecificationKind.UNPARSED = ExceptionSpecificationKind(8)
### Cursors ###
class Cursor(Structure):
"""
The Cursor class represents a reference to an element within the AST. It
acts as a kind of iterator.
"""
_fields_ = [("_kind_id", c_int), ("xdata", c_int), ("data", c_void_p * 3)]
@staticmethod
def from_location(tu, location):
# We store a reference to the TU in the instance so the TU won't get
# collected before the cursor.
cursor = conf.lib.clang_getCursor(tu, location)
cursor._tu = tu
return cursor
def __eq__(self, other):
return conf.lib.clang_equalCursors(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def is_definition(self):
"""
Returns true if the declaration pointed at by the cursor is also a
definition of that entity.
"""
return conf.lib.clang_isCursorDefinition(self)
def is_const_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared 'const'.
"""
return conf.lib.clang_CXXMethod_isConst(self)
def is_converting_constructor(self):
"""Returns True if the cursor refers to a C++ converting constructor.
"""
return conf.lib.clang_CXXConstructor_isConvertingConstructor(self)
def is_copy_constructor(self):
"""Returns True if the cursor refers to a C++ copy constructor.
"""
return conf.lib.clang_CXXConstructor_isCopyConstructor(self)
def is_default_constructor(self):
"""Returns True if the cursor refers to a C++ default constructor.
"""
return conf.lib.clang_CXXConstructor_isDefaultConstructor(self)
def is_move_constructor(self):
"""Returns True if the cursor refers to a C++ move constructor.
"""
return conf.lib.clang_CXXConstructor_isMoveConstructor(self)
def is_default_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared '= default'.
"""
return conf.lib.clang_CXXMethod_isDefaulted(self)
def is_mutable_field(self):
"""Returns True if the cursor refers to a C++ field that is declared
'mutable'.
"""
return conf.lib.clang_CXXField_isMutable(self)
def is_pure_virtual_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared pure virtual.
"""
return conf.lib.clang_CXXMethod_isPureVirtual(self)
def is_static_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared 'static'.
"""
return conf.lib.clang_CXXMethod_isStatic(self)
def is_virtual_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared 'virtual'.
"""
return conf.lib.clang_CXXMethod_isVirtual(self)
def is_abstract_record(self):
"""Returns True if the cursor refers to a C++ record declaration
that has pure virtual member functions.
"""
return conf.lib.clang_CXXRecord_isAbstract(self)
def is_scoped_enum(self):
"""Returns True if the cursor refers to a scoped enum declaration.
"""
return conf.lib.clang_EnumDecl_isScoped(self)
def get_definition(self):
"""
If the cursor is a reference to a declaration or a declaration of
some entity, return a cursor that points to the definition of that
entity.
"""
# TODO: Should probably check that this is either a reference or
# declaration prior to issuing the lookup.
return conf.lib.clang_getCursorDefinition(self)
def get_usr(self):
"""Return the Unified Symbol Resolution (USR) for the entity referenced
by the given cursor (or None).
A Unified Symbol Resolution (USR) is a string that identifies a
particular entity (function, class, variable, etc.) within a
program. USRs can be compared across translation units to determine,
e.g., when references in one translation refer to an entity defined in
another translation unit."""
return conf.lib.clang_getCursorUSR(self)
def get_included_file(self):
"""Returns the File that is included by the current inclusion cursor."""
assert self.kind == CursorKind.INCLUSION_DIRECTIVE
return conf.lib.clang_getIncludedFile(self)
@property
def kind(self):
"""Return the kind of this cursor."""
return CursorKind.from_id(self._kind_id)
@property
def spelling(self):
"""Return the spelling of the entity pointed at by the cursor."""
if not hasattr(self, '_spelling'):
self._spelling = conf.lib.clang_getCursorSpelling(self)
return self._spelling
@property
def displayname(self):
"""
Return the display name for the entity referenced by this cursor.
The display name contains extra information that helps identify the
cursor, such as the parameters of a function or template or the
arguments of a class template specialization.
"""
if not hasattr(self, '_displayname'):
self._displayname = conf.lib.clang_getCursorDisplayName(self)
return self._displayname
@property
def mangled_name(self):
"""Return the mangled name for the entity referenced by this cursor."""
if not hasattr(self, '_mangled_name'):
self._mangled_name = conf.lib.clang_Cursor_getMangling(self)
return self._mangled_name
@property
def location(self):
"""
Return the source location (the starting character) of the entity
pointed at by the cursor.
"""
if not hasattr(self, '_loc'):
self._loc = conf.lib.clang_getCursorLocation(self)
return self._loc
@property
def linkage(self):
"""Return the linkage of this cursor."""
if not hasattr(self, '_linkage'):
self._linkage = conf.lib.clang_getCursorLinkage(self)
return LinkageKind.from_id(self._linkage)
@property
def tls_kind(self):
"""Return the thread-local storage (TLS) kind of this cursor."""
if not hasattr(self, '_tls_kind'):
self._tls_kind = conf.lib.clang_getCursorTLSKind(self)
return TLSKind.from_id(self._tls_kind)
@property
def extent(self):
"""
Return the source range (the range of text) occupied by the entity
pointed at by the cursor.
"""
if not hasattr(self, '_extent'):
self._extent = conf.lib.clang_getCursorExtent(self)
return self._extent
@property
def storage_class(self):
"""
Retrieves the storage class (if any) of the entity pointed at by the
cursor.
"""
if not hasattr(self, '_storage_class'):
self._storage_class = conf.lib.clang_Cursor_getStorageClass(self)
return StorageClass.from_id(self._storage_class)
@property
def availability(self):
"""
Retrieves the availability of the entity pointed at by the cursor.
"""
if not hasattr(self, '_availability'):
self._availability = conf.lib.clang_getCursorAvailability(self)
return AvailabilityKind.from_id(self._availability)
@property
def access_specifier(self):
"""
Retrieves the access specifier (if any) of the entity pointed at by the
cursor.
"""
if not hasattr(self, '_access_specifier'):
self._access_specifier = conf.lib.clang_getCXXAccessSpecifier(self)
return AccessSpecifier.from_id(self._access_specifier)
@property
def type(self):
"""
Retrieve the Type (if any) of the entity pointed at by the cursor.
"""
if not hasattr(self, '_type'):
self._type = conf.lib.clang_getCursorType(self)
return self._type
@property
def canonical(self):
"""Return the canonical Cursor corresponding to this Cursor.
The canonical cursor is the cursor which is representative for the
underlying entity. For example, if you have multiple forward
declarations for the same class, the canonical cursor for the forward
declarations will be identical.
"""
if not hasattr(self, '_canonical'):
self._canonical = conf.lib.clang_getCanonicalCursor(self)
return self._canonical
@property
def result_type(self):
"""Retrieve the Type of the result for this Cursor."""
if not hasattr(self, '_result_type'):
self._result_type = conf.lib.clang_getCursorResultType(self)
return self._result_type
@property
def exception_specification_kind(self):
'''
Retrieve the exception specification kind, which is one of the values
from the ExceptionSpecificationKind enumeration.
'''
if not hasattr(self, '_exception_specification_kind'):
exc_kind = conf.lib.clang_getCursorExceptionSpecificationType(self)
self._exception_specification_kind = ExceptionSpecificationKind.from_id(exc_kind)
return self._exception_specification_kind
@property
def underlying_typedef_type(self):
"""Return the underlying type of a typedef declaration.
Returns a Type for the typedef this cursor is a declaration for. If
the current cursor is not a typedef, this raises.
"""
if not hasattr(self, '_underlying_type'):
assert self.kind.is_declaration()
self._underlying_type = \
conf.lib.clang_getTypedefDeclUnderlyingType(self)
return self._underlying_type
@property
def enum_type(self):
"""Return the integer type of an enum declaration.
Returns a Type corresponding to an integer. If the cursor is not for an
enum, this raises.
"""
if not hasattr(self, '_enum_type'):
assert self.kind == CursorKind.ENUM_DECL
self._enum_type = conf.lib.clang_getEnumDeclIntegerType(self)
return self._enum_type
@property
def enum_value(self):
"""Return the value of an enum constant."""
if not hasattr(self, '_enum_value'):
assert self.kind == CursorKind.ENUM_CONSTANT_DECL
# Figure out the underlying type of the enum to know if it
# is a signed or unsigned quantity.
underlying_type = self.type
if underlying_type.kind == TypeKind.ENUM:
underlying_type = underlying_type.get_declaration().enum_type
if underlying_type.kind in (TypeKind.CHAR_U,
TypeKind.UCHAR,
TypeKind.CHAR16,
TypeKind.CHAR32,
TypeKind.USHORT,
TypeKind.UINT,
TypeKind.ULONG,
TypeKind.ULONGLONG,
TypeKind.UINT128):
self._enum_value = \
conf.lib.clang_getEnumConstantDeclUnsignedValue(self)
else:
self._enum_value = conf.lib.clang_getEnumConstantDeclValue(self)
return self._enum_value
@property
def objc_type_encoding(self):
"""Return the Objective-C type encoding as a str."""
if not hasattr(self, '_objc_type_encoding'):
self._objc_type_encoding = \
conf.lib.clang_getDeclObjCTypeEncoding(self)
return self._objc_type_encoding
@property
def hash(self):
"""Returns a hash of the cursor as an int."""
if not hasattr(self, '_hash'):
self._hash = conf.lib.clang_hashCursor(self)
return self._hash
@property
def semantic_parent(self):
"""Return the semantic parent for this cursor."""
if not hasattr(self, '_semantic_parent'):
self._semantic_parent = conf.lib.clang_getCursorSemanticParent(self)
return self._semantic_parent
@property
def lexical_parent(self):
"""Return the lexical parent for this cursor."""
if not hasattr(self, '_lexical_parent'):
self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self)
return self._lexical_parent
@property
def translation_unit(self):
"""Returns the TranslationUnit to which this Cursor belongs."""
# If this triggers an AttributeError, the instance was not properly
# created.
return self._tu
@property
def referenced(self):
"""
For a cursor that is a reference, returns a cursor
representing the entity that it references.
"""
if not hasattr(self, '_referenced'):
self._referenced = conf.lib.clang_getCursorReferenced(self)
return self._referenced
@property
def brief_comment(self):
"""Returns the brief comment text associated with that Cursor"""
return conf.lib.clang_Cursor_getBriefCommentText(self)
@property
def raw_comment(self):
"""Returns the raw comment text associated with that Cursor"""
return conf.lib.clang_Cursor_getRawCommentText(self)
def get_arguments(self):
"""Return an iterator for accessing the arguments of this cursor."""
num_args = conf.lib.clang_Cursor_getNumArguments(self)
for i in range(0, num_args):
yield conf.lib.clang_Cursor_getArgument(self, i)
def get_num_template_arguments(self):
"""Returns the number of template args associated with this cursor."""
return conf.lib.clang_Cursor_getNumTemplateArguments(self)
def get_template_argument_kind(self, num):
"""Returns the TemplateArgumentKind for the indicated template
argument."""
return conf.lib.clang_Cursor_getTemplateArgumentKind(self, num)
def get_template_argument_type(self, num):
"""Returns the CXType for the indicated template argument."""
return conf.lib.clang_Cursor_getTemplateArgumentType(self, num)
def get_template_argument_value(self, num):
"""Returns the value of the indicated arg as a signed 64b integer."""
return conf.lib.clang_Cursor_getTemplateArgumentValue(self, num)
def get_template_argument_unsigned_value(self, num):
"""Returns the value of the indicated arg as an unsigned 64b integer."""
return conf.lib.clang_Cursor_getTemplateArgumentUnsignedValue(self, num)
def get_children(self):
"""Return an iterator for accessing the children of this cursor."""
# FIXME: Expose iteration from CIndex, PR6125.
def visitor(child, parent, children):
# FIXME: Document this assertion in API.
# FIXME: There should just be an isNull method.
assert child != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
child._tu = self._tu
children.append(child)
return 1 # continue
children = []
conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),
children)
return iter(children)
def walk_preorder(self):
"""Depth-first preorder walk over the cursor and its descendants.
Yields cursors.
"""
yield self
for child in self.get_children():
for descendant in child.walk_preorder():
yield descendant
def get_tokens(self):
"""Obtain Token instances formulating that compose this Cursor.
This is a generator for Token instances. It returns all tokens which
occupy the extent this cursor occupies.
"""
return TokenGroup.get_tokens(self._tu, self.extent)
def get_field_offsetof(self):
"""Returns the offsetof the FIELD_DECL pointed by this Cursor."""
return conf.lib.clang_Cursor_getOffsetOfField(self)
def is_anonymous(self):
"""
Check if the record is anonymous.
"""
if self.kind == CursorKind.FIELD_DECL:
return self.type.get_declaration().is_anonymous()
return conf.lib.clang_Cursor_isAnonymous(self)
def is_bitfield(self):
"""
Check if the field is a bitfield.
"""
return conf.lib.clang_Cursor_isBitField(self)
def get_bitfield_width(self):
"""
Retrieve the width of a bitfield.
"""
return conf.lib.clang_getFieldDeclBitWidth(self)
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, Cursor)
# FIXME: There should just be an isNull method.
if res == conf.lib.clang_getNullCursor():
return None
# Store a reference to the TU in the Python object so it won't get GC'd
# before the Cursor.
tu = None
for arg in args:
if isinstance(arg, TranslationUnit):
tu = arg
break
if hasattr(arg, 'translation_unit'):
tu = arg.translation_unit
break
assert tu is not None
res._tu = tu
return res
@staticmethod
def from_cursor_result(res, fn, args):
assert isinstance(res, Cursor)
if res == conf.lib.clang_getNullCursor():
return None
res._tu = args[0]._tu
return res
class StorageClass(object):
"""
Describes the storage class of a declaration
"""
# The unique kind objects, index by id.
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(StorageClass._kinds):
StorageClass._kinds += [None] * (value - len(StorageClass._kinds) + 1)
if StorageClass._kinds[value] is not None:
raise ValueError('StorageClass already loaded')
self.value = value
StorageClass._kinds[value] = self
StorageClass._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this storage class."""
if self._name_map is None:
self._name_map = {}
for key,value in StorageClass.__dict__.items():
if isinstance(value,StorageClass):
self._name_map[value] = key
return self._name_map[self]
@staticmethod
def from_id(id):
if id >= len(StorageClass._kinds) or not StorageClass._kinds[id]:
raise ValueError('Unknown storage class %d' % id)
return StorageClass._kinds[id]
def __repr__(self):
return 'StorageClass.%s' % (self.name,)
StorageClass.INVALID = StorageClass(0)
StorageClass.NONE = StorageClass(1)
StorageClass.EXTERN = StorageClass(2)
StorageClass.STATIC = StorageClass(3)
StorageClass.PRIVATEEXTERN = StorageClass(4)
StorageClass.OPENCLWORKGROUPLOCAL = StorageClass(5)
StorageClass.AUTO = StorageClass(6)
StorageClass.REGISTER = StorageClass(7)
### Availability Kinds ###
class AvailabilityKind(BaseEnumeration):
"""
Describes the availability of an entity.
"""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def __repr__(self):
return 'AvailabilityKind.%s' % (self.name,)
AvailabilityKind.AVAILABLE = AvailabilityKind(0)
AvailabilityKind.DEPRECATED = AvailabilityKind(1)
AvailabilityKind.NOT_AVAILABLE = AvailabilityKind(2)
AvailabilityKind.NOT_ACCESSIBLE = AvailabilityKind(3)
### C++ access specifiers ###
class AccessSpecifier(BaseEnumeration):
"""
Describes the access of a C++ class member
"""
# The unique kind objects, index by id.
_kinds = []
_name_map = None
def from_param(self):
return self.value
def __repr__(self):
return 'AccessSpecifier.%s' % (self.name,)
AccessSpecifier.INVALID = AccessSpecifier(0)
AccessSpecifier.PUBLIC = AccessSpecifier(1)
AccessSpecifier.PROTECTED = AccessSpecifier(2)
AccessSpecifier.PRIVATE = AccessSpecifier(3)
AccessSpecifier.NONE = AccessSpecifier(4)
### Type Kinds ###
class TypeKind(BaseEnumeration):
"""
Describes the kind of type.
"""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
@property
def spelling(self):
"""Retrieve the spelling of this TypeKind."""
return conf.lib.clang_getTypeKindSpelling(self.value)
def __repr__(self):
return 'TypeKind.%s' % (self.name,)
TypeKind.INVALID = TypeKind(0)
TypeKind.UNEXPOSED = TypeKind(1)
TypeKind.VOID = TypeKind(2)
TypeKind.BOOL = TypeKind(3)
TypeKind.CHAR_U = TypeKind(4)
TypeKind.UCHAR = TypeKind(5)
TypeKind.CHAR16 = TypeKind(6)
TypeKind.CHAR32 = TypeKind(7)
TypeKind.USHORT = TypeKind(8)
TypeKind.UINT = TypeKind(9)
TypeKind.ULONG = TypeKind(10)
TypeKind.ULONGLONG = TypeKind(11)
TypeKind.UINT128 = TypeKind(12)
TypeKind.CHAR_S = TypeKind(13)
TypeKind.SCHAR = TypeKind(14)
TypeKind.WCHAR = TypeKind(15)
TypeKind.SHORT = TypeKind(16)
TypeKind.INT = TypeKind(17)
TypeKind.LONG = TypeKind(18)
TypeKind.LONGLONG = TypeKind(19)
TypeKind.INT128 = TypeKind(20)
TypeKind.FLOAT = TypeKind(21)
TypeKind.DOUBLE = TypeKind(22)
TypeKind.LONGDOUBLE = TypeKind(23)
TypeKind.NULLPTR = TypeKind(24)
TypeKind.OVERLOAD = TypeKind(25)
TypeKind.DEPENDENT = TypeKind(26)
TypeKind.OBJCID = TypeKind(27)
TypeKind.OBJCCLASS = TypeKind(28)
TypeKind.OBJCSEL = TypeKind(29)
TypeKind.FLOAT128 = TypeKind(30)
TypeKind.HALF = TypeKind(31)
TypeKind.COMPLEX = TypeKind(100)
TypeKind.POINTER = TypeKind(101)
TypeKind.BLOCKPOINTER = TypeKind(102)
TypeKind.LVALUEREFERENCE = TypeKind(103)
TypeKind.RVALUEREFERENCE = TypeKind(104)
TypeKind.RECORD = TypeKind(105)
TypeKind.ENUM = TypeKind(106)
TypeKind.TYPEDEF = TypeKind(107)
TypeKind.OBJCINTERFACE = TypeKind(108)
TypeKind.OBJCOBJECTPOINTER = TypeKind(109)
TypeKind.FUNCTIONNOPROTO = TypeKind(110)
TypeKind.FUNCTIONPROTO = TypeKind(111)
TypeKind.CONSTANTARRAY = TypeKind(112)
TypeKind.VECTOR = TypeKind(113)
TypeKind.INCOMPLETEARRAY = TypeKind(114)
TypeKind.VARIABLEARRAY = TypeKind(115)
TypeKind.DEPENDENTSIZEDARRAY = TypeKind(116)
TypeKind.MEMBERPOINTER = TypeKind(117)
TypeKind.AUTO = TypeKind(118)
TypeKind.ELABORATED = TypeKind(119)
TypeKind.PIPE = TypeKind(120)
TypeKind.OCLIMAGE1DRO = TypeKind(121)
TypeKind.OCLIMAGE1DARRAYRO = TypeKind(122)
TypeKind.OCLIMAGE1DBUFFERRO = TypeKind(123)
TypeKind.OCLIMAGE2DRO = TypeKind(124)
TypeKind.OCLIMAGE2DARRAYRO = TypeKind(125)
TypeKind.OCLIMAGE2DDEPTHRO = TypeKind(126)
TypeKind.OCLIMAGE2DARRAYDEPTHRO = TypeKind(127)
TypeKind.OCLIMAGE2DMSAARO = TypeKind(128)
TypeKind.OCLIMAGE2DARRAYMSAARO = TypeKind(129)
TypeKind.OCLIMAGE2DMSAADEPTHRO = TypeKind(130)
TypeKind.OCLIMAGE2DARRAYMSAADEPTHRO = TypeKind(131)
TypeKind.OCLIMAGE3DRO = TypeKind(132)
TypeKind.OCLIMAGE1DWO = TypeKind(133)
TypeKind.OCLIMAGE1DARRAYWO = TypeKind(134)
TypeKind.OCLIMAGE1DBUFFERWO = TypeKind(135)
TypeKind.OCLIMAGE2DWO = TypeKind(136)
TypeKind.OCLIMAGE2DARRAYWO = TypeKind(137)
TypeKind.OCLIMAGE2DDEPTHWO = TypeKind(138)
TypeKind.OCLIMAGE2DARRAYDEPTHWO = TypeKind(139)
TypeKind.OCLIMAGE2DMSAAWO = TypeKind(140)
TypeKind.OCLIMAGE2DARRAYMSAAWO = TypeKind(141)
TypeKind.OCLIMAGE2DMSAADEPTHWO = TypeKind(142)
TypeKind.OCLIMAGE2DARRAYMSAADEPTHWO = TypeKind(143)
TypeKind.OCLIMAGE3DWO = TypeKind(144)
TypeKind.OCLIMAGE1DRW = TypeKind(145)
TypeKind.OCLIMAGE1DARRAYRW = TypeKind(146)
TypeKind.OCLIMAGE1DBUFFERRW = TypeKind(147)
TypeKind.OCLIMAGE2DRW = TypeKind(148)
TypeKind.OCLIMAGE2DARRAYRW = TypeKind(149)
TypeKind.OCLIMAGE2DDEPTHRW = TypeKind(150)
TypeKind.OCLIMAGE2DARRAYDEPTHRW = TypeKind(151)
TypeKind.OCLIMAGE2DMSAARW = TypeKind(152)
TypeKind.OCLIMAGE2DARRAYMSAARW = TypeKind(153)
TypeKind.OCLIMAGE2DMSAADEPTHRW = TypeKind(154)
TypeKind.OCLIMAGE2DARRAYMSAADEPTHRW = TypeKind(155)
TypeKind.OCLIMAGE3DRW = TypeKind(156)
TypeKind.OCLSAMPLER = TypeKind(157)
TypeKind.OCLEVENT = TypeKind(158)
TypeKind.OCLQUEUE = TypeKind(159)
TypeKind.OCLRESERVEID = TypeKind(160)
TypeKind.EXTVECTOR = TypeKind(176)
TypeKind.ATOMIC = TypeKind(177)
class RefQualifierKind(BaseEnumeration):
"""Describes a specific ref-qualifier of a type."""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def from_param(self):
return self.value
def __repr__(self):
return 'RefQualifierKind.%s' % (self.name,)
RefQualifierKind.NONE = RefQualifierKind(0)
RefQualifierKind.LVALUE = RefQualifierKind(1)
RefQualifierKind.RVALUE = RefQualifierKind(2)
class LinkageKind(BaseEnumeration):
"""Describes the kind of linkage of a cursor."""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def from_param(self):
return self.value
def __repr__(self):
return 'LinkageKind.%s' % (self.name,)
LinkageKind.INVALID = LinkageKind(0)
LinkageKind.NO_LINKAGE = LinkageKind(1)
LinkageKind.INTERNAL = LinkageKind(2)
LinkageKind.UNIQUE_EXTERNAL = LinkageKind(3)
LinkageKind.EXTERNAL = LinkageKind(4)
class TLSKind(BaseEnumeration):
"""Describes the kind of thread-local storage (TLS) of a cursor."""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def from_param(self):
return self.value
def __repr__(self):
return 'TLSKind.%s' % (self.name,)
TLSKind.NONE = TLSKind(0)
TLSKind.DYNAMIC = TLSKind(1)
TLSKind.STATIC = TLSKind(2)
class Type(Structure):
"""
The type of an element in the abstract syntax tree.
"""
_fields_ = [("_kind_id", c_int), ("data", c_void_p * 2)]
@property
def kind(self):
"""Return the kind of this type."""
return TypeKind.from_id(self._kind_id)
def argument_types(self):
"""Retrieve a container for the non-variadic arguments for this type.
The returned object is iterable and indexable. Each item in the
container is a Type instance.
"""
class ArgumentsIterator(collections_abc.Sequence):
def __init__(self, parent):
self.parent = parent
self.length = None
def __len__(self):
if self.length is None:
self.length = conf.lib.clang_getNumArgTypes(self.parent)
return self.length
def __getitem__(self, key):
# FIXME Support slice objects.
if not isinstance(key, int):
raise TypeError("Must supply a non-negative int.")
if key < 0:
raise IndexError("Only non-negative indexes are accepted.")
if key >= len(self):
raise IndexError("Index greater than container length: "
"%d > %d" % ( key, len(self) ))
result = conf.lib.clang_getArgType(self.parent, key)
if result.kind == TypeKind.INVALID:
raise IndexError("Argument could not be retrieved.")
return result
assert self.kind == TypeKind.FUNCTIONPROTO
return ArgumentsIterator(self)
@property
def element_type(self):
"""Retrieve the Type of elements within this Type.
If accessed on a type that is not an array, complex, or vector type, an
exception will be raised.
"""
result = conf.lib.clang_getElementType(self)
if result.kind == TypeKind.INVALID:
raise Exception('Element type not available on this type.')
return result
@property
def element_count(self):
"""Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises.
"""
result = conf.lib.clang_getNumElements(self)
if result < 0:
raise Exception('Type does not have elements.')
return result
@property
def translation_unit(self):
"""The TranslationUnit to which this Type is associated."""
# If this triggers an AttributeError, the instance was not properly
# instantiated.
return self._tu
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, Type)
tu = None
for arg in args:
if hasattr(arg, 'translation_unit'):
tu = arg.translation_unit
break
assert tu is not None
res._tu = tu
return res
def get_num_template_arguments(self):
return conf.lib.clang_Type_getNumTemplateArguments(self)
def get_template_argument_type(self, num):
return conf.lib.clang_Type_getTemplateArgumentAsType(self, num)
def get_canonical(self):
"""
Return the canonical type for a Type.
Clang's type system explicitly models typedefs and all the
ways a specific type can be represented. The canonical type
is the underlying type with all the "sugar" removed. For
example, if 'T' is a typedef for 'int', the canonical type for
'T' would be 'int'.
"""
return conf.lib.clang_getCanonicalType(self)
def is_const_qualified(self):
"""Determine whether a Type has the "const" qualifier set.
This does not look through typedefs that may have added "const"
at a different level.
"""
return conf.lib.clang_isConstQualifiedType(self)
def is_volatile_qualified(self):
"""Determine whether a Type has the "volatile" qualifier set.
This does not look through typedefs that may have added "volatile"
at a different level.
"""
return conf.lib.clang_isVolatileQualifiedType(self)
def is_restrict_qualified(self):
"""Determine whether a Type has the "restrict" qualifier set.
This does not look through typedefs that may have added "restrict" at
a different level.
"""
return conf.lib.clang_isRestrictQualifiedType(self)
def is_function_variadic(self):
"""Determine whether this function Type is a variadic function type."""
assert self.kind == TypeKind.FUNCTIONPROTO
return conf.lib.clang_isFunctionTypeVariadic(self)
def get_address_space(self):
return conf.lib.clang_getAddressSpace(self)
def get_typedef_name(self):
return conf.lib.clang_getTypedefName(self)
def is_pod(self):
"""Determine whether this Type represents plain old data (POD)."""
return conf.lib.clang_isPODType(self)
def get_pointee(self):
"""
For pointer types, returns the type of the pointee.
"""
return conf.lib.clang_getPointeeType(self)
def get_declaration(self):
"""
Return the cursor for the declaration of the given type.
"""
return conf.lib.clang_getTypeDeclaration(self)
def get_result(self):
"""
Retrieve the result type associated with a function type.
"""
return conf.lib.clang_getResultType(self)
def get_array_element_type(self):
"""
Retrieve the type of the elements of the array type.
"""
return conf.lib.clang_getArrayElementType(self)
def get_array_size(self):
"""
Retrieve the size of the constant array.
"""
return conf.lib.clang_getArraySize(self)
def get_class_type(self):
"""
Retrieve the class type of the member pointer type.
"""
return conf.lib.clang_Type_getClassType(self)
def get_named_type(self):
"""
Retrieve the type named by the qualified-id.
"""
return conf.lib.clang_Type_getNamedType(self)
def get_align(self):
"""
Retrieve the alignment of the record.
"""
return conf.lib.clang_Type_getAlignOf(self)
def get_size(self):
"""
Retrieve the size of the record.
"""
return conf.lib.clang_Type_getSizeOf(self)
def get_offset(self, fieldname):
"""
Retrieve the offset of a field in the record.
"""
return conf.lib.clang_Type_getOffsetOf(self, fieldname)
def get_ref_qualifier(self):
"""
Retrieve the ref-qualifier of the type.
"""
return RefQualifierKind.from_id(
conf.lib.clang_Type_getCXXRefQualifier(self))
def get_fields(self):
"""Return an iterator for accessing the fields of this type."""
def visitor(field, children):
assert field != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
field._tu = self._tu
fields.append(field)
return 1 # continue
fields = []
conf.lib.clang_Type_visitFields(self,
callbacks['fields_visit'](visitor), fields)
return iter(fields)
def get_exception_specification_kind(self):
"""
Return the kind of the exception specification; a value from
the ExceptionSpecificationKind enumeration.
"""
return ExceptionSpecificationKind.from_id(
conf.lib.clang.getExceptionSpecificationType(self))
@property
def spelling(self):
"""Retrieve the spelling of this Type."""
return conf.lib.clang_getTypeSpelling(self)
def __eq__(self, other):
if type(other) != type(self):
return False
return conf.lib.clang_equalTypes(self, other)
def __ne__(self, other):
return not self.__eq__(other)
## CIndex Objects ##
# CIndex objects (derived from ClangObject) are essentially lightweight
# wrappers attached to some underlying object, which is exposed via CIndex as
# a void*.
class ClangObject(object):
"""
A helper for Clang objects. This class helps act as an intermediary for
the ctypes library and the Clang CIndex library.
"""
def __init__(self, obj):
assert isinstance(obj, c_object_p) and obj
self.obj = self._as_parameter_ = obj
def from_param(self):
return self._as_parameter_
class _CXUnsavedFile(Structure):
"""Helper for passing unsaved file arguments."""
_fields_ = [("name", c_char_p), ("contents", c_char_p), ('length', c_ulong)]
# Functions calls through the python interface are rather slow. Fortunately,
# for most symboles, we do not need to perform a function call. Their spelling
# never changes and is consequently provided by this spelling cache.
SpellingCache = {
# 0: CompletionChunk.Kind("Optional"),
# 1: CompletionChunk.Kind("TypedText"),
# 2: CompletionChunk.Kind("Text"),
# 3: CompletionChunk.Kind("Placeholder"),
# 4: CompletionChunk.Kind("Informative"),
# 5 : CompletionChunk.Kind("CurrentParameter"),
6: '(', # CompletionChunk.Kind("LeftParen"),
7: ')', # CompletionChunk.Kind("RightParen"),
8: '[', # CompletionChunk.Kind("LeftBracket"),
9: ']', # CompletionChunk.Kind("RightBracket"),
10: '{', # CompletionChunk.Kind("LeftBrace"),
11: '}', # CompletionChunk.Kind("RightBrace"),
12: '<', # CompletionChunk.Kind("LeftAngle"),
13: '>', # CompletionChunk.Kind("RightAngle"),
14: ', ', # CompletionChunk.Kind("Comma"),
# 15: CompletionChunk.Kind("ResultType"),
16: ':', # CompletionChunk.Kind("Colon"),
17: ';', # CompletionChunk.Kind("SemiColon"),
18: '=', # CompletionChunk.Kind("Equal"),
19: ' ', # CompletionChunk.Kind("HorizontalSpace"),
# 20: CompletionChunk.Kind("VerticalSpace")
}
class CompletionChunk(object):
class Kind(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return "<ChunkKind: %s>" % self
def __init__(self, completionString, key):
self.cs = completionString
self.key = key
self.__kindNumberCache = -1
def __repr__(self):
return "{'" + self.spelling + "', " + str(self.kind) + "}"
@CachedProperty
def spelling(self):
if self.__kindNumber in SpellingCache:
return SpellingCache[self.__kindNumber]
return conf.lib.clang_getCompletionChunkText(self.cs, self.key)
# We do not use @CachedProperty here, as the manual implementation is
# apparently still significantly faster. Please profile carefully if you
# would like to add CachedProperty back.
@property
def __kindNumber(self):
if self.__kindNumberCache == -1:
self.__kindNumberCache = \
conf.lib.clang_getCompletionChunkKind(self.cs, self.key)
return self.__kindNumberCache
@CachedProperty
def kind(self):
return completionChunkKindMap[self.__kindNumber]
@CachedProperty
def string(self):
res = conf.lib.clang_getCompletionChunkCompletionString(self.cs,
self.key)
if (res):
return CompletionString(res)
else:
None
def isKindOptional(self):
return self.__kindNumber == 0
def isKindTypedText(self):
return self.__kindNumber == 1
def isKindPlaceHolder(self):
return self.__kindNumber == 3
def isKindInformative(self):
return self.__kindNumber == 4
def isKindResultType(self):
return self.__kindNumber == 15
completionChunkKindMap = {
0: CompletionChunk.Kind("Optional"),
1: CompletionChunk.Kind("TypedText"),
2: CompletionChunk.Kind("Text"),
3: CompletionChunk.Kind("Placeholder"),
4: CompletionChunk.Kind("Informative"),
5: CompletionChunk.Kind("CurrentParameter"),
6: CompletionChunk.Kind("LeftParen"),
7: CompletionChunk.Kind("RightParen"),
8: CompletionChunk.Kind("LeftBracket"),
9: CompletionChunk.Kind("RightBracket"),
10: CompletionChunk.Kind("LeftBrace"),
11: CompletionChunk.Kind("RightBrace"),
12: CompletionChunk.Kind("LeftAngle"),
13: CompletionChunk.Kind("RightAngle"),
14: CompletionChunk.Kind("Comma"),
15: CompletionChunk.Kind("ResultType"),
16: CompletionChunk.Kind("Colon"),
17: CompletionChunk.Kind("SemiColon"),
18: CompletionChunk.Kind("Equal"),
19: CompletionChunk.Kind("HorizontalSpace"),
20: CompletionChunk.Kind("VerticalSpace")}
class CompletionString(ClangObject):
class Availability(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return "<Availability: %s>" % self
def __len__(self):
return self.num_chunks
@CachedProperty
def num_chunks(self):
return conf.lib.clang_getNumCompletionChunks(self.obj)
def __getitem__(self, key):
if self.num_chunks <= key:
raise IndexError
return CompletionChunk(self.obj, key)
@property
def priority(self):
return conf.lib.clang_getCompletionPriority(self.obj)
@property
def availability(self):
res = conf.lib.clang_getCompletionAvailability(self.obj)
return availabilityKinds[res]
@property
def briefComment(self):
if conf.function_exists("clang_getCompletionBriefComment"):
return conf.lib.clang_getCompletionBriefComment(self.obj)
return _CXString()
def __repr__(self):
return " | ".join([str(a) for a in self]) \
+ " || Priority: " + str(self.priority) \
+ " || Availability: " + str(self.availability) \
+ " || Brief comment: " + str(self.briefComment)
availabilityKinds = {
0: CompletionChunk.Kind("Available"),
1: CompletionChunk.Kind("Deprecated"),
2: CompletionChunk.Kind("NotAvailable"),
3: CompletionChunk.Kind("NotAccessible")}
class CodeCompletionResult(Structure):
_fields_ = [('cursorKind', c_int), ('completionString', c_object_p)]
def __repr__(self):
return str(CompletionString(self.completionString))
@property
def kind(self):
return CursorKind.from_id(self.cursorKind)
@property
def string(self):
return CompletionString(self.completionString)
class CCRStructure(Structure):
_fields_ = [('results', POINTER(CodeCompletionResult)),
('numResults', c_int)]
def __len__(self):
return self.numResults
def __getitem__(self, key):
if len(self) <= key:
raise IndexError
return self.results[key]
class CodeCompletionResults(ClangObject):
def __init__(self, ptr):
assert isinstance(ptr, POINTER(CCRStructure)) and ptr
self.ptr = self._as_parameter_ = ptr
def from_param(self):
return self._as_parameter_
def __del__(self):
conf.lib.clang_disposeCodeCompleteResults(self)
@property
def results(self):
return self.ptr.contents
@property
def diagnostics(self):
class DiagnosticsItr(object):
def __init__(self, ccr):
self.ccr= ccr
def __len__(self):
return int(\
conf.lib.clang_codeCompleteGetNumDiagnostics(self.ccr))
def __getitem__(self, key):
return conf.lib.clang_codeCompleteGetDiagnostic(self.ccr, key)
return DiagnosticsItr(self)
class Index(ClangObject):
"""
The Index type provides the primary interface to the Clang CIndex library,
primarily by providing an interface for reading and parsing translation
units.
"""
@staticmethod
def create(excludeDecls=False):
"""
Create a new Index.
Parameters:
excludeDecls -- Exclude local declarations from translation units.
"""
return Index(conf.lib.clang_createIndex(excludeDecls, 0))
def __del__(self):
conf.lib.clang_disposeIndex(self)
def read(self, path):
"""Load a TranslationUnit from the given AST file."""
return TranslationUnit.from_ast_file(path, self)
def parse(self, path, args=None, unsaved_files=None, options = 0):
"""Load the translation unit from the given source code file by running
clang and generating the AST before loading. Additional command line
parameters can be passed to clang via the args parameter.
In-memory contents for files can be provided by passing a list of pairs
to as unsaved_files, the first item should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
If an error was encountered during parsing, a TranslationUnitLoadError
will be raised.
"""
return TranslationUnit.from_source(path, args, unsaved_files, options,
self)
class TranslationUnit(ClangObject):
"""Represents a source code translation unit.
This is one of the main types in the API. Any time you wish to interact
with Clang's representation of a source file, you typically start with a
translation unit.
"""
# Default parsing mode.
PARSE_NONE = 0
# Instruct the parser to create a detailed processing record containing
# metadata not normally retained.
PARSE_DETAILED_PROCESSING_RECORD = 1
# Indicates that the translation unit is incomplete. This is typically used
# when parsing headers.
PARSE_INCOMPLETE = 2
# Instruct the parser to create a pre-compiled preamble for the translation
# unit. This caches the preamble (included files at top of source file).
# This is useful if the translation unit will be reparsed and you don't
# want to incur the overhead of reparsing the preamble.
PARSE_PRECOMPILED_PREAMBLE = 4
# Cache code completion information on parse. This adds time to parsing but
# speeds up code completion.
PARSE_CACHE_COMPLETION_RESULTS = 8
# Flags with values 16 and 32 are deprecated and intentionally omitted.
# Do not parse function bodies. This is useful if you only care about
# searching for declarations/definitions.
PARSE_SKIP_FUNCTION_BODIES = 64
# Used to indicate that brief documentation comments should be included
# into the set of code completions returned from this translation unit.
PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION = 128
@classmethod
def from_source(cls, filename, args=None, unsaved_files=None, options=0,
index=None):
"""Create a TranslationUnit by parsing source.
This is capable of processing source code both from files on the
filesystem as well as in-memory contents.
Command-line arguments that would be passed to clang are specified as
a list via args. These can be used to specify include paths, warnings,
etc. e.g. ["-Wall", "-I/path/to/include"].
In-memory file content can be provided via unsaved_files. This is an
iterable of 2-tuples. The first element is the filename (str or
PathLike). The second element defines the content. Content can be
provided as str source code or as file objects (anything with a read()
method). If a file object is being used, content will be read until EOF
and the read cursor will not be reset to its original position.
options is a bitwise or of TranslationUnit.PARSE_XXX flags which will
control parsing behavior.
index is an Index instance to utilize. If not provided, a new Index
will be created for this TranslationUnit.
To parse source from the filesystem, the filename of the file to parse
is specified by the filename argument. Or, filename could be None and
the args list would contain the filename(s) to parse.
To parse source from an in-memory buffer, set filename to the virtual
filename you wish to associate with this source (e.g. "test.c"). The
contents of that file are then provided in unsaved_files.
If an error occurs, a TranslationUnitLoadError is raised.
Please note that a TranslationUnit with parser errors may be returned.
It is the caller's responsibility to check tu.diagnostics for errors.
Also note that Clang infers the source language from the extension of
the input filename. If you pass in source code containing a C++ class
declaration with the filename "test.c" parsing will fail.
"""
if args is None:
args = []
if unsaved_files is None:
unsaved_files = []
if index is None:
index = Index.create()
args_array = None
if len(args) > 0:
args_array = (c_char_p * len(args))(*[b(x) for x in args])
unsaved_array = None
if len(unsaved_files) > 0:
unsaved_array = (_CXUnsavedFile * len(unsaved_files))()
for i, (name, contents) in enumerate(unsaved_files):
if hasattr(contents, "read"):
contents = contents.read()
contents = b(contents)
unsaved_array[i].name = b(fspath(name))
unsaved_array[i].contents = contents
unsaved_array[i].length = len(contents)
ptr = conf.lib.clang_parseTranslationUnit(index,
fspath(filename) if filename is not None else None,
args_array,
len(args), unsaved_array,
len(unsaved_files), options)
if not ptr:
raise TranslationUnitLoadError("Error parsing translation unit.")
return cls(ptr, index=index)
@classmethod
def from_ast_file(cls, filename, index=None):
"""Create a TranslationUnit instance from a saved AST file.
A previously-saved AST file (provided with -emit-ast or
TranslationUnit.save()) is loaded from the filename specified.
If the file cannot be loaded, a TranslationUnitLoadError will be
raised.
index is optional and is the Index instance to use. If not provided,
a default Index will be created.
filename can be str or PathLike.
"""
if index is None:
index = Index.create()
ptr = conf.lib.clang_createTranslationUnit(index, fspath(filename))
if not ptr:
raise TranslationUnitLoadError(filename)
return cls(ptr=ptr, index=index)
def __init__(self, ptr, index):
"""Create a TranslationUnit instance.
TranslationUnits should be created using one of the from_* @classmethod
functions above. __init__ is only called internally.
"""
assert isinstance(index, Index)
self.index = index
ClangObject.__init__(self, ptr)
def __del__(self):
conf.lib.clang_disposeTranslationUnit(self)
@property
def cursor(self):
"""Retrieve the cursor that represents the given translation unit."""
return conf.lib.clang_getTranslationUnitCursor(self)
@property
def spelling(self):
"""Get the original translation unit source file name."""
return conf.lib.clang_getTranslationUnitSpelling(self)
def get_includes(self):
"""
Return an iterable sequence of FileInclusion objects that describe the
sequence of inclusions in a translation unit. The first object in
this sequence is always the input file. Note that this method will not
recursively iterate over header files included through precompiled
headers.
"""
def visitor(fobj, lptr, depth, includes):
if depth > 0:
loc = lptr.contents
includes.append(FileInclusion(loc.file, File(fobj), loc, depth))
# Automatically adapt CIndex/ctype pointers to python objects
includes = []
conf.lib.clang_getInclusions(self,
callbacks['translation_unit_includes'](visitor), includes)
return iter(includes)
def get_file(self, filename):
"""Obtain a File from this translation unit."""
return File.from_name(self, filename)
def get_location(self, filename, position):
"""Obtain a SourceLocation for a file in this translation unit.
The position can be specified by passing:
- Integer file offset. Initial file offset is 0.
- 2-tuple of (line number, column number). Initial file position is
(0, 0)
"""
f = self.get_file(filename)
if isinstance(position, int):
return SourceLocation.from_offset(self, f, position)
return SourceLocation.from_position(self, f, position[0], position[1])
def get_extent(self, filename, locations):
"""Obtain a SourceRange from this translation unit.
The bounds of the SourceRange must ultimately be defined by a start and
end SourceLocation. For the locations argument, you can pass:
- 2 SourceLocation instances in a 2-tuple or list.
- 2 int file offsets via a 2-tuple or list.
- 2 2-tuple or lists of (line, column) pairs in a 2-tuple or list.
e.g.
get_extent('foo.c', (5, 10))
get_extent('foo.c', ((1, 1), (1, 15)))
"""
f = self.get_file(filename)
if len(locations) < 2:
raise Exception('Must pass object with at least 2 elements')
start_location, end_location = locations
if hasattr(start_location, '__len__'):
start_location = SourceLocation.from_position(self, f,
start_location[0], start_location[1])
elif isinstance(start_location, int):
start_location = SourceLocation.from_offset(self, f,
start_location)
if hasattr(end_location, '__len__'):
end_location = SourceLocation.from_position(self, f,
end_location[0], end_location[1])
elif isinstance(end_location, int):
end_location = SourceLocation.from_offset(self, f, end_location)
assert isinstance(start_location, SourceLocation)
assert isinstance(end_location, SourceLocation)
return SourceRange.from_locations(start_location, end_location)
@property
def diagnostics(self):
"""
Return an iterable (and indexable) object containing the diagnostics.
"""
class DiagIterator(object):
def __init__(self, tu):
self.tu = tu
def __len__(self):
return int(conf.lib.clang_getNumDiagnostics(self.tu))
def __getitem__(self, key):
diag = conf.lib.clang_getDiagnostic(self.tu, key)
if not diag:
raise IndexError
return Diagnostic(diag)
return DiagIterator(self)
def reparse(self, unsaved_files=None, options=0):
"""
Reparse an already parsed translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
"""
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,contents) in enumerate(unsaved_files):
if hasattr(contents, "read"):
contents = contents.read()
contents = b(contents)
unsaved_files_array[i].name = b(fspath(name))
unsaved_files_array[i].contents = contents
unsaved_files_array[i].length = len(contents)
ptr = conf.lib.clang_reparseTranslationUnit(self, len(unsaved_files),
unsaved_files_array, options)
def save(self, filename):
"""Saves the TranslationUnit to a file.
This is equivalent to passing -emit-ast to the clang frontend. The
saved file can be loaded back into a TranslationUnit. Or, if it
corresponds to a header, it can be used as a pre-compiled header file.
If an error occurs while saving, a TranslationUnitSaveError is raised.
If the error was TranslationUnitSaveError.ERROR_INVALID_TU, this means
the constructed TranslationUnit was not valid at time of save. In this
case, the reason(s) why should be available via
TranslationUnit.diagnostics().
filename -- The path to save the translation unit to (str or PathLike).
"""
options = conf.lib.clang_defaultSaveOptions(self)
result = int(conf.lib.clang_saveTranslationUnit(self, fspath(filename),
options))
if result != 0:
raise TranslationUnitSaveError(result,
'Error saving TranslationUnit.')
def codeComplete(self, path, line, column, unsaved_files=None,
include_macros=False, include_code_patterns=False,
include_brief_comments=False):
"""
Code complete in this translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
"""
options = 0
if include_macros:
options += 1
if include_code_patterns:
options += 2
if include_brief_comments:
options += 4
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,contents) in enumerate(unsaved_files):
if hasattr(contents, "read"):
contents = contents.read()
contents = b(contents)
unsaved_files_array[i].name = b(fspath(name))
unsaved_files_array[i].contents = contents
unsaved_files_array[i].length = len(contents)
ptr = conf.lib.clang_codeCompleteAt(self, fspath(path), line, column,
unsaved_files_array, len(unsaved_files), options)
if ptr:
return CodeCompletionResults(ptr)
return None
def get_tokens(self, locations=None, extent=None):
"""Obtain tokens in this translation unit.
This is a generator for Token instances. The caller specifies a range
of source code to obtain tokens for. The range can be specified as a
2-tuple of SourceLocation or as a SourceRange. If both are defined,
behavior is undefined.
"""
if locations is not None:
extent = SourceRange(start=locations[0], end=locations[1])
return TokenGroup.get_tokens(self, extent)
class File(ClangObject):
"""
The File class represents a particular source file that is part of a
translation unit.
"""
@staticmethod
def from_name(translation_unit, file_name):
"""Retrieve a file handle within the given translation unit."""
return File(conf.lib.clang_getFile(translation_unit, fspath(file_name)))
@property
def name(self):
"""Return the complete file and path name of the file."""
return conf.lib.clang_getFileName(self)
@property
def time(self):
"""Return the last modification time of the file."""
return conf.lib.clang_getFileTime(self)
def __str__(self):
return self.name
def __repr__(self):
return "<File: %s>" % (self.name)
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, c_object_p)
res = File(res)
# Copy a reference to the TranslationUnit to prevent premature GC.
res._tu = args[0]._tu
return res
class FileInclusion(object):
"""
The FileInclusion class represents the inclusion of one source file by
another via a '#include' directive or as the input file for the translation
unit. This class provides information about the included file, the including
file, the location of the '#include' directive and the depth of the included
file in the stack. Note that the input file has depth 0.
"""
def __init__(self, src, tgt, loc, depth):
self.source = src
self.include = tgt
self.location = loc
self.depth = depth
@property
def is_input_file(self):
"""True if the included file is the input file."""
return self.depth == 0
class CompilationDatabaseError(Exception):
"""Represents an error that occurred when working with a CompilationDatabase
Each error is associated to an enumerated value, accessible under
e.cdb_error. Consumers can compare the value with one of the ERROR_
constants in this class.
"""
# An unknown error occurred
ERROR_UNKNOWN = 0
# The database could not be loaded
ERROR_CANNOTLOADDATABASE = 1
def __init__(self, enumeration, message):
assert isinstance(enumeration, int)
if enumeration > 1:
raise Exception("Encountered undefined CompilationDatabase error "
"constant: %d. Please file a bug to have this "
"value supported." % enumeration)
self.cdb_error = enumeration
Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
class CompileCommand(object):
"""Represents the compile command used to build a file"""
def __init__(self, cmd, ccmds):
self.cmd = cmd
# Keep a reference to the originating CompileCommands
# to prevent garbage collection
self.ccmds = ccmds
@property
def directory(self):
"""Get the working directory for this CompileCommand"""
return conf.lib.clang_CompileCommand_getDirectory(self.cmd)
@property
def filename(self):
"""Get the working filename for this CompileCommand"""
return conf.lib.clang_CompileCommand_getFilename(self.cmd)
@property
def arguments(self):
"""
Get an iterable object providing each argument in the
command line for the compiler invocation as a _CXString.
Invariant : the first argument is the compiler executable
"""
length = conf.lib.clang_CompileCommand_getNumArgs(self.cmd)
for i in range(length):
yield conf.lib.clang_CompileCommand_getArg(self.cmd, i)
class CompileCommands(object):
"""
CompileCommands is an iterable object containing all CompileCommand
that can be used for building a specific file.
"""
def __init__(self, ccmds):
self.ccmds = ccmds
def __del__(self):
conf.lib.clang_CompileCommands_dispose(self.ccmds)
def __len__(self):
return int(conf.lib.clang_CompileCommands_getSize(self.ccmds))
def __getitem__(self, i):
cc = conf.lib.clang_CompileCommands_getCommand(self.ccmds, i)
if not cc:
raise IndexError
return CompileCommand(cc, self)
@staticmethod
def from_result(res, fn, args):
if not res:
return None
return CompileCommands(res)
class CompilationDatabase(ClangObject):
"""
The CompilationDatabase is a wrapper class around
clang::tooling::CompilationDatabase
It enables querying how a specific source file can be built.
"""
def __del__(self):
conf.lib.clang_CompilationDatabase_dispose(self)
@staticmethod
def from_result(res, fn, args):
if not res:
raise CompilationDatabaseError(0,
"CompilationDatabase loading failed")
return CompilationDatabase(res)
@staticmethod
def fromDirectory(buildDir):
"""Builds a CompilationDatabase from the database found in buildDir"""
errorCode = c_uint()
try:
cdb = conf.lib.clang_CompilationDatabase_fromDirectory(fspath(buildDir),
byref(errorCode))
except CompilationDatabaseError as e:
raise CompilationDatabaseError(int(errorCode.value),
"CompilationDatabase loading failed")
return cdb
def getCompileCommands(self, filename):
"""
Get an iterable object providing all the CompileCommands available to
build filename. Returns None if filename is not found in the database.
"""
return conf.lib.clang_CompilationDatabase_getCompileCommands(self,
fspath(filename))
def getAllCompileCommands(self):
"""
Get an iterable object providing all the CompileCommands available from
the database.
"""
return conf.lib.clang_CompilationDatabase_getAllCompileCommands(self)
class Token(Structure):
"""Represents a single token from the preprocessor.
Tokens are effectively segments of source code. Source code is first parsed
into tokens before being converted into the AST and Cursors.
Tokens are obtained from parsed TranslationUnit instances. You currently
can't create tokens manually.
"""
_fields_ = [
('int_data', c_uint * 4),
('ptr_data', c_void_p)
]
@property
def spelling(self):
"""The spelling of this token.
This is the textual representation of the token in source.
"""
return conf.lib.clang_getTokenSpelling(self._tu, self)
@property
def kind(self):
"""Obtain the TokenKind of the current token."""
return TokenKind.from_value(conf.lib.clang_getTokenKind(self))
@property
def location(self):
"""The SourceLocation this Token occurs at."""
return conf.lib.clang_getTokenLocation(self._tu, self)
@property
def extent(self):
"""The SourceRange this Token occupies."""
return conf.lib.clang_getTokenExtent(self._tu, self)
@property
def cursor(self):
"""The Cursor this Token corresponds to."""
cursor = Cursor()
cursor._tu = self._tu
conf.lib.clang_annotateTokens(self._tu, byref(self), 1, byref(cursor))
return cursor
# Now comes the plumbing to hook up the C library.
# Register callback types in common container.
callbacks['translation_unit_includes'] = CFUNCTYPE(None, c_object_p,
POINTER(SourceLocation), c_uint, py_object)
callbacks['cursor_visit'] = CFUNCTYPE(c_int, Cursor, Cursor, py_object)
callbacks['fields_visit'] = CFUNCTYPE(c_int, Cursor, py_object)
# Functions strictly alphabetical order.
functionList = [
("clang_annotateTokens",
[TranslationUnit, POINTER(Token), c_uint, POINTER(Cursor)]),
("clang_CompilationDatabase_dispose",
[c_object_p]),
("clang_CompilationDatabase_fromDirectory",
[c_interop_string, POINTER(c_uint)],
c_object_p,
CompilationDatabase.from_result),
("clang_CompilationDatabase_getAllCompileCommands",
[c_object_p],
c_object_p,
CompileCommands.from_result),
("clang_CompilationDatabase_getCompileCommands",
[c_object_p, c_interop_string],
c_object_p,
CompileCommands.from_result),
("clang_CompileCommands_dispose",
[c_object_p]),
("clang_CompileCommands_getCommand",
[c_object_p, c_uint],
c_object_p),
("clang_CompileCommands_getSize",
[c_object_p],
c_uint),
("clang_CompileCommand_getArg",
[c_object_p, c_uint],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getDirectory",
[c_object_p],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getFilename",
[c_object_p],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getNumArgs",
[c_object_p],
c_uint),
("clang_codeCompleteAt",
[TranslationUnit, c_interop_string, c_int, c_int, c_void_p, c_int, c_int],
POINTER(CCRStructure)),
("clang_codeCompleteGetDiagnostic",
[CodeCompletionResults, c_int],
Diagnostic),
("clang_codeCompleteGetNumDiagnostics",
[CodeCompletionResults],
c_int),
("clang_createIndex",
[c_int, c_int],
c_object_p),
("clang_createTranslationUnit",
[Index, c_interop_string],
c_object_p),
("clang_CXXConstructor_isConvertingConstructor",
[Cursor],
bool),
("clang_CXXConstructor_isCopyConstructor",
[Cursor],
bool),
("clang_CXXConstructor_isDefaultConstructor",
[Cursor],
bool),
("clang_CXXConstructor_isMoveConstructor",
[Cursor],
bool),
("clang_CXXField_isMutable",
[Cursor],
bool),
("clang_CXXMethod_isConst",
[Cursor],
bool),
("clang_CXXMethod_isDefaulted",
[Cursor],
bool),
("clang_CXXMethod_isPureVirtual",
[Cursor],
bool),
("clang_CXXMethod_isStatic",
[Cursor],
bool),
("clang_CXXMethod_isVirtual",
[Cursor],
bool),
("clang_CXXRecord_isAbstract",
[Cursor],
bool),
("clang_EnumDecl_isScoped",
[Cursor],
bool),
("clang_defaultDiagnosticDisplayOptions",
[],
c_uint),
("clang_defaultSaveOptions",
[TranslationUnit],
c_uint),
("clang_disposeCodeCompleteResults",
[CodeCompletionResults]),
# ("clang_disposeCXTUResourceUsage",
# [CXTUResourceUsage]),
("clang_disposeDiagnostic",
[Diagnostic]),
("clang_disposeIndex",
[Index]),
("clang_disposeString",
[_CXString]),
("clang_disposeTokens",
[TranslationUnit, POINTER(Token), c_uint]),
("clang_disposeTranslationUnit",
[TranslationUnit]),
("clang_equalCursors",
[Cursor, Cursor],
bool),
("clang_equalLocations",
[SourceLocation, SourceLocation],
bool),
("clang_equalRanges",
[SourceRange, SourceRange],
bool),
("clang_equalTypes",
[Type, Type],
bool),
("clang_formatDiagnostic",
[Diagnostic, c_uint],
_CXString,
_CXString.from_result),
("clang_getArgType",
[Type, c_uint],
Type,
Type.from_result),
("clang_getArrayElementType",
[Type],
Type,
Type.from_result),
("clang_getArraySize",
[Type],
c_longlong),
("clang_getFieldDeclBitWidth",
[Cursor],
c_int),
("clang_getCanonicalCursor",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCanonicalType",
[Type],
Type,
Type.from_result),
("clang_getChildDiagnostics",
[Diagnostic],
c_object_p),
("clang_getCompletionAvailability",
[c_void_p],
c_int),
("clang_getCompletionBriefComment",
[c_void_p],
_CXString,
_CXString.from_result),
("clang_getCompletionChunkCompletionString",
[c_void_p, c_int],
c_object_p),
("clang_getCompletionChunkKind",
[c_void_p, c_int],
c_int),
("clang_getCompletionChunkText",
[c_void_p, c_int],
_CXString,
_CXString.from_result),
("clang_getCompletionPriority",
[c_void_p],
c_int),
("clang_getCString",
[_CXString],
c_interop_string,
c_interop_string.to_python_string),
("clang_getCursor",
[TranslationUnit, SourceLocation],
Cursor),
("clang_getCursorAvailability",
[Cursor],
c_int),
("clang_getCursorDefinition",
[Cursor],
Cursor,
Cursor.from_result),
("clang_getCursorDisplayName",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getCursorExtent",
[Cursor],
SourceRange),
("clang_getCursorLexicalParent",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCursorLocation",
[Cursor],
SourceLocation),
("clang_getCursorReferenced",
[Cursor],
Cursor,
Cursor.from_result),
("clang_getCursorReferenceNameRange",
[Cursor, c_uint, c_uint],
SourceRange),
("clang_getCursorResultType",
[Cursor],
Type,
Type.from_result),
("clang_getCursorSemanticParent",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCursorSpelling",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getCursorType",
[Cursor],
Type,
Type.from_result),
("clang_getCursorUSR",
[Cursor],
_CXString,
_CXString.from_result),
("clang_Cursor_getMangling",
[Cursor],
_CXString,
_CXString.from_result),
# ("clang_getCXTUResourceUsage",
# [TranslationUnit],
# CXTUResourceUsage),
("clang_getCXXAccessSpecifier",
[Cursor],
c_uint),
("clang_getDeclObjCTypeEncoding",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getDiagnostic",
[c_object_p, c_uint],
c_object_p),
("clang_getDiagnosticCategory",
[Diagnostic],
c_uint),
("clang_getDiagnosticCategoryText",
[Diagnostic],
_CXString,
_CXString.from_result),
("clang_getDiagnosticFixIt",
[Diagnostic, c_uint, POINTER(SourceRange)],
_CXString,
_CXString.from_result),
("clang_getDiagnosticInSet",
[c_object_p, c_uint],
c_object_p),
("clang_getDiagnosticLocation",
[Diagnostic],
SourceLocation),
("clang_getDiagnosticNumFixIts",
[Diagnostic],
c_uint),
("clang_getDiagnosticNumRanges",
[Diagnostic],
c_uint),
("clang_getDiagnosticOption",
[Diagnostic, POINTER(_CXString)],
_CXString,
_CXString.from_result),
("clang_getDiagnosticRange",
[Diagnostic, c_uint],
SourceRange),
("clang_getDiagnosticSeverity",
[Diagnostic],
c_int),
("clang_getDiagnosticSpelling",
[Diagnostic],
_CXString,
_CXString.from_result),
("clang_getElementType",
[Type],
Type,
Type.from_result),
("clang_getEnumConstantDeclUnsignedValue",
[Cursor],
c_ulonglong),
("clang_getEnumConstantDeclValue",
[Cursor],
c_longlong),
("clang_getEnumDeclIntegerType",
[Cursor],
Type,
Type.from_result),
("clang_getFile",
[TranslationUnit, c_interop_string],
c_object_p),
("clang_getFileName",
[File],
_CXString,
_CXString.from_result),
("clang_getFileTime",
[File],
c_uint),
("clang_getIBOutletCollectionType",
[Cursor],
Type,
Type.from_result),
("clang_getIncludedFile",
[Cursor],
c_object_p,
File.from_result),
("clang_getInclusions",
[TranslationUnit, callbacks['translation_unit_includes'], py_object]),
("clang_getInstantiationLocation",
[SourceLocation, POINTER(c_object_p), POINTER(c_uint), POINTER(c_uint),
POINTER(c_uint)]),
("clang_getLocation",
[TranslationUnit, File, c_uint, c_uint],
SourceLocation),
("clang_getLocationForOffset",
[TranslationUnit, File, c_uint],
SourceLocation),
("clang_getNullCursor",
None,
Cursor),
("clang_getNumArgTypes",
[Type],
c_uint),
("clang_getNumCompletionChunks",
[c_void_p],
c_int),
("clang_getNumDiagnostics",
[c_object_p],
c_uint),
("clang_getNumDiagnosticsInSet",
[c_object_p],
c_uint),
("clang_getNumElements",
[Type],
c_longlong),
("clang_getNumOverloadedDecls",
[Cursor],
c_uint),
("clang_getOverloadedDecl",
[Cursor, c_uint],
Cursor,
Cursor.from_cursor_result),
("clang_getPointeeType",
[Type],
Type,
Type.from_result),
("clang_getRange",
[SourceLocation, SourceLocation],
SourceRange),
("clang_getRangeEnd",
[SourceRange],
SourceLocation),
("clang_getRangeStart",
[SourceRange],
SourceLocation),
("clang_getResultType",
[Type],
Type,
Type.from_result),
("clang_getSpecializedCursorTemplate",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getTemplateCursorKind",
[Cursor],
c_uint),
("clang_getTokenExtent",
[TranslationUnit, Token],
SourceRange),
("clang_getTokenKind",
[Token],
c_uint),
("clang_getTokenLocation",
[TranslationUnit, Token],
SourceLocation),
("clang_getTokenSpelling",
[TranslationUnit, Token],
_CXString,
_CXString.from_result),
("clang_getTranslationUnitCursor",
[TranslationUnit],
Cursor,
Cursor.from_result),
("clang_getTranslationUnitSpelling",
[TranslationUnit],
_CXString,
_CXString.from_result),
("clang_getTUResourceUsageName",
[c_uint],
c_interop_string,
c_interop_string.to_python_string),
("clang_getTypeDeclaration",
[Type],
Cursor,
Cursor.from_result),
("clang_getTypedefDeclUnderlyingType",
[Cursor],
Type,
Type.from_result),
("clang_getTypedefName",
[Type],
_CXString,
_CXString.from_result),
("clang_getTypeKindSpelling",
[c_uint],
_CXString,
_CXString.from_result),
("clang_getTypeSpelling",
[Type],
_CXString,
_CXString.from_result),
("clang_hashCursor",
[Cursor],
c_uint),
("clang_isAttribute",
[CursorKind],
bool),
("clang_isConstQualifiedType",
[Type],
bool),
("clang_isCursorDefinition",
[Cursor],
bool),
("clang_isDeclaration",
[CursorKind],
bool),
("clang_isExpression",
[CursorKind],
bool),
("clang_isFileMultipleIncludeGuarded",
[TranslationUnit, File],
bool),
("clang_isFunctionTypeVariadic",
[Type],
bool),
("clang_isInvalid",
[CursorKind],
bool),
("clang_isPODType",
[Type],
bool),
("clang_isPreprocessing",
[CursorKind],
bool),
("clang_isReference",
[CursorKind],
bool),
("clang_isRestrictQualifiedType",
[Type],
bool),
("clang_isStatement",
[CursorKind],
bool),
("clang_isTranslationUnit",
[CursorKind],
bool),
("clang_isUnexposed",
[CursorKind],
bool),
("clang_isVirtualBase",
[Cursor],
bool),
("clang_isVolatileQualifiedType",
[Type],
bool),
("clang_parseTranslationUnit",
[Index, c_interop_string, c_void_p, c_int, c_void_p, c_int, c_int],
c_object_p),
("clang_reparseTranslationUnit",
[TranslationUnit, c_int, c_void_p, c_int],
c_int),
("clang_saveTranslationUnit",
[TranslationUnit, c_interop_string, c_uint],
c_int),
("clang_tokenize",
[TranslationUnit, SourceRange, POINTER(POINTER(Token)), POINTER(c_uint)]),
("clang_visitChildren",
[Cursor, callbacks['cursor_visit'], py_object],
c_uint),
("clang_Cursor_getNumArguments",
[Cursor],
c_int),
("clang_Cursor_getArgument",
[Cursor, c_uint],
Cursor,
Cursor.from_result),
("clang_Cursor_getNumTemplateArguments",
[Cursor],
c_int),
("clang_Cursor_getTemplateArgumentKind",
[Cursor, c_uint],
TemplateArgumentKind.from_id),
("clang_Cursor_getTemplateArgumentType",
[Cursor, c_uint],
Type,
Type.from_result),
("clang_Cursor_getTemplateArgumentValue",
[Cursor, c_uint],
c_longlong),
("clang_Cursor_getTemplateArgumentUnsignedValue",
[Cursor, c_uint],
c_ulonglong),
("clang_Cursor_isAnonymous",
[Cursor],
bool),
("clang_Cursor_isBitField",
[Cursor],
bool),
("clang_Cursor_getBriefCommentText",
[Cursor],
_CXString,
_CXString.from_result),
("clang_Cursor_getRawCommentText",
[Cursor],
_CXString,
_CXString.from_result),
("clang_Cursor_getOffsetOfField",
[Cursor],
c_longlong),
("clang_Type_getAlignOf",
[Type],
c_longlong),
("clang_Type_getClassType",
[Type],
Type,
Type.from_result),
("clang_Type_getNumTemplateArguments",
[Type],
c_int),
("clang_Type_getTemplateArgumentAsType",
[Type, c_uint],
Type,
Type.from_result),
("clang_Type_getOffsetOf",
[Type, c_interop_string],
c_longlong),
("clang_Type_getSizeOf",
[Type],
c_longlong),
("clang_Type_getCXXRefQualifier",
[Type],
c_uint),
("clang_Type_getNamedType",
[Type],
Type,
Type.from_result),
("clang_Type_visitFields",
[Type, callbacks['fields_visit'], py_object],
c_uint),
]
class LibclangError(Exception):
def __init__(self, message):
self.m = message
def __str__(self):
return self.m
def register_function(lib, item, ignore_errors):
# A function may not exist, if these bindings are used with an older or
# incompatible version of libclang.so.
try:
func = getattr(lib, item[0])
except AttributeError as e:
msg = str(e) + ". Please ensure that your python bindings are "\
"compatible with your libclang.so version."
if ignore_errors:
return
raise LibclangError(msg)
if len(item) >= 2:
func.argtypes = item[1]
if len(item) >= 3:
func.restype = item[2]
if len(item) == 4:
func.errcheck = item[3]
def register_functions(lib, ignore_errors):
"""Register function prototypes with a libclang library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library.
"""
def register(item):
return register_function(lib, item, ignore_errors)
for f in functionList:
register(f)
class Config(object):
library_path = None
library_file = None
compatibility_check = True
loaded = False
@staticmethod
def set_library_path(path):
"""Set the path in which to search for libclang"""
if Config.loaded:
raise Exception("library path must be set before before using " \
"any other functionalities in libclang.")
Config.library_path = fspath(path)
@staticmethod
def set_library_file(filename):
"""Set the exact location of libclang"""
if Config.loaded:
raise Exception("library file must be set before before using " \
"any other functionalities in libclang.")
Config.library_file = fspath(filename)
@staticmethod
def set_compatibility_check(check_status):
""" Perform compatibility check when loading libclang
The python bindings are only tested and evaluated with the version of
libclang they are provided with. To ensure correct behavior a (limited)
compatibility check is performed when loading the bindings. This check
will throw an exception, as soon as it fails.
In case these bindings are used with an older version of libclang, parts
that have been stable between releases may still work. Users of the
python bindings can disable the compatibility check. This will cause
the python bindings to load, even though they are written for a newer
version of libclang. Failures now arise if unsupported or incompatible
features are accessed. The user is required to test themselves if the
features they are using are available and compatible between different
libclang versions.
"""
if Config.loaded:
raise Exception("compatibility_check must be set before before " \
"using any other functionalities in libclang.")
Config.compatibility_check = check_status
@CachedProperty
def lib(self):
lib = self.get_cindex_library()
register_functions(lib, not Config.compatibility_check)
Config.loaded = True
return lib
def get_filename(self):
if Config.library_file:
return Config.library_file
import platform
name = platform.system()
if name == 'Darwin':
file = 'libclang.dylib'
elif name == 'Windows':
file = 'libclang.dll'
else:
file = 'libclang.so'
if Config.library_path:
file = Config.library_path + '/' + file
return file
def get_cindex_library(self):
try:
library = cdll.LoadLibrary(self.get_filename())
except OSError as e:
msg = str(e) + ". To provide a path to libclang use " \
"Config.set_library_path() or " \
"Config.set_library_file()."
raise LibclangError(msg)
return library
def function_exists(self, name):
try:
getattr(self.lib, name)
except AttributeError:
return False
return True
def register_enumerations():
for name, value in clang.enumerations.TokenKinds:
TokenKind.register(value, name)
conf = Config()
register_enumerations()
__all__ = [
'AvailabilityKind',
'Config',
'CodeCompletionResults',
'CompilationDatabase',
'CompileCommands',
'CompileCommand',
'CursorKind',
'Cursor',
'Diagnostic',
'File',
'FixIt',
'Index',
'LinkageKind',
'SourceLocation',
'SourceRange',
'TLSKind',
'TokenKind',
'Token',
'TranslationUnitLoadError',
'TranslationUnit',
'TypeKind',
'Type',
]
| 30.057806 | 101 | 0.670839 |
aceaa2663e91e1e8f940dcf65bae3e13fec2034b | 10,779 | py | Python | pygExp/torch_geometric/datasets/reordertest.py | JovanWang/AdaptiveStreamSpTTMFramework | 3ba43efe0d78a3f93c14c03cb959db0b00927ab4 | [
"BSD-3-Clause"
] | null | null | null | pygExp/torch_geometric/datasets/reordertest.py | JovanWang/AdaptiveStreamSpTTMFramework | 3ba43efe0d78a3f93c14c03cb959db0b00927ab4 | [
"BSD-3-Clause"
] | null | null | null | pygExp/torch_geometric/datasets/reordertest.py | JovanWang/AdaptiveStreamSpTTMFramework | 3ba43efe0d78a3f93c14c03cb959db0b00927ab4 | [
"BSD-3-Clause"
] | null | null | null | import json
import os.path as osp
import torch
import numpy as np
import scipy.sparse as sp
from google_drive_downloader import GoogleDriveDownloader as gdd
from torch_geometric.data import Dataset,InMemoryDataset, Data
class Reordertest(InMemoryDataset):
# 原始数据处理(仅处理一次)
# 从文件中加载数据
r"""The Reordertest dataset from the `"GraphSAINT: Graph Sampling Based
Inductive Learning Method" <https://arxiv.org/abs/1907.04931>`_ paper,
containing descriptions and common properties of images.
Args:
root (string): Root directory where the dataset should be saved.
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
"""
# adj_full_id = '1crmsTbd1-2sEXsGwa2IKnIB7Zd3TmUsy'
# feats_id = '1join-XdvX3anJU_MLVtick7MgeAQiWIZ'
# class_map_id = '1uxIkbtg5drHTsKt-PAsZZ4_yJmgFmle9'
# role_id = '1htXCtuktuCW8TR8KiKfrFDAxUgekQoV7'
def __init__(self, root,
train_type=0, transform=None, pre_transform=None):
super(Reordertest, self).__init__(root, transform, pre_transform)
path = self.processed_paths[0]
if train_type==0:
path = self.processed_paths[0]
elif train_type==1:
path= self.processed_paths[1]
elif train_type==2:
path= self.processed_paths[2]
self.data, self.slices = torch.load(path)
@property
def raw_file_names(self):
return ['reordertest.cites', 'reordertest.content']
@property
def processed_file_names(self):
return ['{}.pt'.format(s) for s in ['train', 'val', 'test']]
def download(self):
pass
def process(self):
# 从文件中读取数据
fcites = np.loadtxt(osp.join(self.raw_dir, 'Chol_Edge.txt'))
fcontent = np.loadtxt(osp.join(self.raw_dir, 'Chol_Nodey.txt'))
# 人为划分划分数据集
data_length = int(np.max(fcontent[:,0]))
# print(data_length)
data_no = np.arange(0, data_length, 1)
np.random.shuffle(data_no)
train_dt = data_no[0:int(data_length/3)]
val_dt = data_no[int(data_length/3):int(data_length/2)]
test_dt = data_no[int(data_length/2):data_length]
train_data_list, val_data_list, test_data_list = [], [], []
for i in range(data_length):
# 减少测试集的大小,方便调试
if i > 100:
continue
# 每次选择一张图进行处理
# edge_index_temp = fcites[:,1:2]
edge_index_temp = fcites[np.where(fcites[:,0] == i)]
row = edge_index_temp[:,1]
col = edge_index_temp[:,2]
# row_tmp = edge_index_temp[:,1]
# col_tmp = edge_index_temp[:,2]
# row = np.append(row_tmp, col_tmp)
# col = np.append(col_tmp, row_tmp)
row = torch.from_numpy(row).to(torch.long)
col = torch.from_numpy(col).to(torch.long)
edge_index = torch.stack([row, col], dim=0)
# 每次选择一张图进行处理
content_ori_temp = fcontent[np.where(fcontent[:,0] == i)]
# 每张图内,根据节点序号进行排序
content_ori_temp = content_ori_temp[content_ori_temp[:,1].argsort()]
# 所有数据归一化,除以最大的值
# content_temp = content_ori_temp / content_ori_temp.max(axis=0)
# print 取结点的序号和度的信息
x_temp_all = content_ori_temp[:,1:3]
# x数据归一化,除以最大的值
x_temp = x_temp_all / x_temp_all.max(axis=0)
# 拼接图的序号第一列
tuno = content_ori_temp[:,0:1]
x_np = np.hstack((tuno,x_temp))
# x_temp[:,0] = x_temp_all[:,0]
# if i==0:
# print(x_temp)
# x_temp = np.expand_dims(x_temp, axis=1)
x = torch.from_numpy(x_np).to(torch.float)
# embeding需要long
# x = torch.from_numpy(x_temp).to(torch.long)
# # 多标签,当前有3列,
# # x数据归一化,除以最大的值
# y_temp = content_temp[:,-3:]
# y_temp = (y_temp+1) / (y_temp.max(axis=0)+2)
# y = torch.from_numpy(y_temp).to(torch.float)
# 将y从消去-节点顺序,变为节点-消去顺序。
y_dis = content_ori_temp[:,-1]
if i==1:
print("x=",x_np)
if i==1:
print("y=",y_dis)
y_temp = np.zeros(len(y_dis))
# y_temp = [0]*len(y_dis)
for idx,val in enumerate(y_dis):
y_temp[int(val)] = idx
y_temp = y_temp.T
# y数据归一化,除以最大的值
y_temp = (y_temp+1) / (y_temp.max(axis=0)+2)
y = torch.from_numpy(y_temp).to(torch.float)
data = Data(x=x, edge_index=edge_index, y=y)
data = data if self.pre_transform is None else self.pre_transform(data)
if i in train_dt:
train_data_list.append(data)
if i in val_dt:
val_data_list.append(data)
if i in test_dt:
test_data_list.append(data)
# role_list = np.arange(0, x_temp.shape[0])
# np.random.shuffle(role_list)
# role_tr = role_list[0 : int(x_temp.shape[0]*0.2)]
# role_val = role_list[int(x_temp.shape[0]*0.2) : int(x_temp.shape[0]*0.5)]
# role_test = role_list[0 : int(x_temp.shape[0])]
# train_mask = torch.zeros(x.size(0), dtype=torch.bool)
# train_mask[torch.tensor(role_tr)] = True
# val_mask = torch.zeros(x.size(0), dtype=torch.bool)
# val_mask[torch.tensor(role_val)] = True
# test_mask = torch.zeros(x.size(0), dtype=torch.bool)
# test_mask[torch.tensor(role_test)] = True
# data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask,
# val_mask=val_mask, test_mask=test_mask)
data = data if self.pre_transform is None else self.pre_transform(data)
torch.save((self.collate(train_data_list)), self.processed_paths[0])
torch.save((self.collate(val_data_list)), self.processed_paths[1])
torch.save((self.collate(test_data_list)), self.processed_paths[2])
# torch.save(self.collate([data]), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
# def process(self):
# # 从文件中读取数据
# fcites = np.loadtxt(osp.join(self.raw_dir, 'reordertest.cites'))
# fcontent = np.loadtxt(osp.join(self.raw_dir, 'reordertest.content'))
# # train_dt = [5,6,10]
# # val_dt = [8,9]
# # test_dt = [0,1,2,3,4,7,11,12,13,14,15]
# # 人为划分划分数据集
# train_dt = [5,6,10,16,17,19]
# val_dt = [0,9]
# test_dt = [1,2,3,4,7,8,11,12,13,14,15,18,20]
# train_data_list, val_data_list, test_data_list = [], [], []
# for i in range(20):
# # 每次选择一张图进行处理
# # edge_index_temp = fcites[:,1:2]
# edge_index_temp = fcites[np.where(fcites[:,0] == i)]
# row_tmp = edge_index_temp[:,1]
# col_tmp = edge_index_temp[:,2]
# row = np.append(row_tmp, col_tmp)
# col = np.append(col_tmp, row_tmp)
# row = torch.from_numpy(row).to(torch.long)
# col = torch.from_numpy(col).to(torch.long)
# edge_index = torch.stack([row, col], dim=0)
# # 每次选择一张图进行处理
# content_ori_temp = fcontent[np.where(fcontent[:,0] == i)]
# content_ori_temp = content_ori_temp[content_ori_temp[:,1].argsort()]
# # 所有数据归一化,除以最大的值
# # content_temp = content_ori_temp / content_ori_temp.max(axis=0)
# content_temp = content_ori_temp
# # print 只取度的信息
# x_temp = content_temp[:,2:3]
# # x数据归一化,除以最大的值
# x_temp = x_temp / x_temp.max(axis=0)
# # x_temp = np.expand_dims(x_temp, axis=1)
# # embeding需要long
# x = torch.from_numpy(x_temp).to(torch.float)
# # x = torch.from_numpy(x_temp).to(torch.long)
# y_dis = content_temp[:,-1]
# # 将y从消去-节点顺序,变为节点-消去顺序。
# y_temp = np.zeros(len(y_dis))
# # y_temp = [0]*len(y_dis)
# for idx,val in enumerate(y_dis):
# y_temp[int(val)] = idx
# y_temp = y_temp.T
# # y数据归一化,除以最大的值
# y_temp = (y_temp+1) / (y_temp.max(axis=0)+2)
# # if 0 == i:
# # print(y_temp)
# # if 0 == i:
# # print(y_temp)
# # y_temp = 1/(y_temp+1)
# # y_temp = np.expand_dims(y_temp, axis=1)
# y = torch.from_numpy(y_temp).to(torch.float)
# data = Data(x=x, edge_index=edge_index, y=y)
# data = data if self.pre_transform is None else self.pre_transform(data)
# if i in train_dt:
# train_data_list.append(data)
# if i in val_dt:
# val_data_list.append(data)
# if i in test_dt:
# test_data_list.append(data)
# # role_list = np.arange(0, x_temp.shape[0])
# # np.random.shuffle(role_list)
# # role_tr = role_list[0 : int(x_temp.shape[0]*0.2)]
# # role_val = role_list[int(x_temp.shape[0]*0.2) : int(x_temp.shape[0]*0.5)]
# # role_test = role_list[0 : int(x_temp.shape[0])]
# # train_mask = torch.zeros(x.size(0), dtype=torch.bool)
# # train_mask[torch.tensor(role_tr)] = True
# # val_mask = torch.zeros(x.size(0), dtype=torch.bool)
# # val_mask[torch.tensor(role_val)] = True
# # test_mask = torch.zeros(x.size(0), dtype=torch.bool)
# # test_mask[torch.tensor(role_test)] = True
# # data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask,
# # val_mask=val_mask, test_mask=test_mask)
# data = data if self.pre_transform is None else self.pre_transform(data)
# print(train_data_list)
# print(self.processed_paths[0])
# torch.save((self.collate(train_data_list)), self.processed_paths[0])
# torch.save((self.collate(val_data_list)), self.processed_paths[1])
# torch.save((self.collate(test_data_list)), self.processed_paths[2])
# # torch.save(self.collate([data]), self.processed_paths[0]) | 41.617761 | 89 | 0.566565 |
aceaa2ae16c5c6840d9880426825688f0790cce0 | 11,501 | py | Python | bmo_log_parse.py | zaneb/bmo-log-parse | 431cc44ceb23b48b65c603c90f901b5d167b3349 | [
"Apache-2.0"
] | 1 | 2021-05-27T10:34:14.000Z | 2021-05-27T10:34:14.000Z | bmo_log_parse.py | zaneb/bmo-log-parse | 431cc44ceb23b48b65c603c90f901b5d167b3349 | [
"Apache-2.0"
] | null | null | null | bmo_log_parse.py | zaneb/bmo-log-parse | 431cc44ceb23b48b65c603c90f901b5d167b3349 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for filtering and displaying logs from the Metal³ baremetal-operator.
Written by Zane Bitter.
"""
import collections
import contextlib
import datetime
import functools
import itertools
import json
import re
import sys
import autopage
try:
import yaml
except ImportError:
pretty_print = functools.partial(json.dumps, indent=2)
else:
pretty_print = functools.partial(yaml.safe_dump, default_flow_style=False)
LOGGERS = (
COMMAND,
RUNTIME,
CONTROLLER,
PROVISIONER,
) = (
{'cmd', 'setup', ''},
{'controller-runtime'},
{'controller', 'baremetalhost', 'controllers'},
{'baremetalhost_ironic', 'provisioner'},
)
LEVELS = (
INFO, ERROR,
) = (
'info', 'error',
)
_matcher = re.compile(r'''
(?:
20[0-9]{2}-[0-1][0-9]-[0-3][0-9] # ISO8601 date
T[0-2][0-9]:[0-5][0-9]:[0-6][0-9](?:\.[0-9]+)Z # ISO8601 time
[ ])? # drop any leading datetime
(\{.*?\}) # match JSON object
\n''', re.VERBOSE).fullmatch
class ParseException(Exception):
def __init__(self, error, match, lineno=None):
self.err_msg = error.msg
self.lineno = lineno
self.column = error.colno + match.start(1)
self.line = match.group(0)
ln = f'line {self.lineno}, ' if self.lineno is not None else ''
super().__init__(f'Record parse error: {self.err_msg} '
f'(at {ln}column {self.column}): {self.line}')
def _parse_record(m):
i, match = m
if match is None:
return None
try:
return json.loads(match.group(1))
except json.decoder.JSONDecodeError as exc:
raise ParseException(exc, match, i)
def read_records(logstream):
return (Record(p) for p in map(_parse_record, enumerate(map(_matcher,
logstream)))
if p is not None)
class Record:
"""Class representing a single log record."""
COMMON_FIELDS = (
LEVEL, TIMESTAMP, LOGGER, MESSAGE,
) = (
'level', 'ts', 'logger', 'msg',
)
def __init__(self, data):
"""Initialise from the (JSON) log text."""
self.level = data.pop(self.LEVEL)
ts = float(data.pop(self.TIMESTAMP))
utc = datetime.timezone.utc
self.timestamp = datetime.datetime.fromtimestamp(ts, tz=utc)
self.logger = data.pop(self.LOGGER, '').split('.', 1)[0]
self.message = data.pop(self.MESSAGE)
self.context = None
fq_name = (data.get('baremetalhost',
data.get('Request.Name',
data.get('name')))
if self.logger not in PROVISIONER
else data.get('host', '').replace('~', '/', 1) or None)
if 'stacktrace' in data:
if fq_name is None:
fq_name = data.get('request')
self.context = data.pop('stacktrace')
elif (self.message == 'received introspection data' and
'data' in data):
self.context = pretty_print(data.pop('data'))
ns_name = fq_name.split('/', 1) if fq_name is not None else (None,
None)
self.name = ns_name[-1]
self.namespace = data.get('namespace',
data.get('Request.Namespace',
ns_name[0] if len(ns_name) > 1
else None))
self.error = data.pop('error', None) if self.level == ERROR else None
data.pop('errorVerbose', None)
data.pop('reconciler group', None)
data.pop('reconciler kind', None)
self.data = data
def format(self, highlight=False):
"""
Format the log record as a human-readable string.
:param highlight: Use ANSI escape codes to set colours.
"""
if highlight:
if self.level == ERROR:
esc = ('\033[91m', '\033[31m', '\033[39m')
else:
esc = ('\033[37m', '\033[39m', '\033[39m')
else:
esc = ('', '', '')
extra_data = ''
if self.data:
items = ', '.join(f'{k}: {repr(v)}' for k, v in self.data.items())
extra_data = f' {{{items}}}'
if highlight:
extra_data = f'{esc[0]}{extra_data}{esc[2]}'
else:
if highlight:
extra_data = esc[2]
if self.error is not None:
err = f'{esc[1]}{self.error}{esc[2]}'
extra_data = '\n'.join([extra_data, err])
if self.context is not None:
ct = self.context
if highlight:
ct = '\n'.join(f'\033[90m{l}\033[39m' for l in ct.splitlines())
extra_data = '\n'.join([extra_data, ct])
timestamp = self.timestamp.isoformat(timespec='milliseconds')[:-6]
return f'{esc[0]}{timestamp} {esc[1]}{self.message}{extra_data}'
def __str__(self):
return self.format()
Filter = collections.namedtuple('Filter', ['filterfunc', 'predicate'])
def filtered_records(logstream, filters):
"""Iterate over all log Records in the stream that match the filters."""
return functools.reduce(lambda r,f: f.filterfunc(f.predicate, r),
filters, read_records(logstream))
def process_log(input_stream, filters, output_stream=sys.stdout,
highlight=False):
"""Process the input log stream and write to an output stream."""
for r in filtered_records(input_stream, filters):
output_stream.write(f'{r.format(highlight)}\n')
def list_host_names(input_stream, filters, output_stream=sys.stdout):
seen = set()
for r in filtered_records(input_stream, filters):
if r.name not in seen and r.name is not None:
output_stream.write(f'{r.name}\n')
seen.add(r.name)
def get_filters(options):
"""Iterate over the Filters specified by the given CLI options."""
if options.start is not None:
start_time = options.start
if start_time.tzinfo is None:
start_time = start_time.replace(tzinfo=datetime.timezone.utc)
yield Filter(itertools.dropwhile,
lambda r: r.timestamp < start_time)
if options.error:
yield Filter(filter, lambda r: r.level == ERROR)
if options.controller_only:
yield Filter(filter, lambda r: r.logger in CONTROLLER)
if options.provisioner_only:
yield Filter(filter, lambda r: r.logger in PROVISIONER)
if options.name is not None:
name = options.name
yield Filter(filter, lambda r: r.name == name)
if options.namespace is not None:
namespace = options.namespace
yield Filter(filter, lambda r: r.namespace == namespace)
if options.end is not None:
end_time = options.end
if end_time.tzinfo is None:
end_time = end_time.replace(tzinfo=datetime.timezone.utc)
yield Filter(itertools.takewhile,
lambda r: r.timestamp <= end_time)
def parse_datetime(dtstr):
if hasattr(datetime.datetime, 'fromisoformat'):
return datetime.datetime.fromisoformat(dtstr)
fmt = '%Y-%m-%d'
if 'T' in dtstr:
fmt += 'T%H:%M'
if dtstr.count(':') > 1:
fmt += ':%S'
if '.' in dtstr:
fmt += '.%f'
return datetime.datetime.strptime(dtstr, fmt)
def get_options(args=None):
"""Parse the CLI arguments into options."""
from autopage import argparse
import pydoc
desc = pydoc.getdoc(sys.modules[__name__])
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('logfile', nargs='?', default='-',
help='Input logfile (or "-" to read stdin)')
logger_group = parser.add_mutually_exclusive_group()
logger_group.add_argument('-c', '--controller-only', action='store_true',
help='Include only controller module logs')
logger_group.add_argument('-p', '--provisioner-only', action='store_true',
help='Include only provisioner module logs')
parser.add_argument('--error', action='store_true',
help='Include only logs at ERROR level')
parser.add_argument('-n', '--name', default=None,
help='Filter by a particular host name')
parser.add_argument('--namespace', default=None,
help='Filter by a particular host namespace')
parser.add_argument('-s', '--start', default=None,
type=parse_datetime,
help='Skip ahead to a given time')
parser.add_argument('-e', '--end', default=None,
type=parse_datetime,
help='Stop reading at a given time')
parser.add_argument('--list-names', action='store_true',
help='List the names of hosts in the log')
return parser.parse_args(args)
def input_stream(filename):
"""
Return a context manager for an input stream given the filename option.
Returns stdin if the filename is '-'.
"""
if filename == '-':
@contextlib.contextmanager
def nullcontext(arg):
yield arg
return nullcontext(sys.stdin)
else:
return open(filename)
def _report_error(message, stream=None):
if stream is None:
stream = sys.stderr
colour = stream.isatty()
if colour:
line = f'\033[93m{message}\033[39m\n'
else:
line = f'{message}\n'
stream.write(line)
def main():
"""Run the log parser, reading options from the command line."""
try:
options = get_options()
except Exception as exc:
_report_error(str(exc))
return 1
filters = get_filters(options)
with input_stream(options.logfile) as logstream:
if logstream.isatty():
_report_error('No input found.')
return 1
line_buffer = autopage.line_buffer_from_input(logstream)
error_strategy = autopage.ErrorStrategy.BACKSLASH_REPLACE
pager = autopage.AutoPager(line_buffering=line_buffer,
reset_on_exit=True,
errors=error_strategy)
highlight = pager.to_terminal()
try:
with pager as output_stream:
if options.list_names:
list_host_names(logstream, filters, output_stream)
else:
process_log(logstream, filters, output_stream, highlight)
except KeyboardInterrupt:
pass
except ParseException as exc:
_report_error(str(exc))
return pager.exit_code()
if __name__ == '__main__':
sys.exit(main())
| 33.628655 | 79 | 0.578037 |
aceaa31256b6c3d07e5739cb7070348e40448836 | 732 | py | Python | bookshelf/apps/users/serializers.py | Vostbur/bookshelf | cfa7a739ccf80078d48aa6ab9c3f1b8d1d3c778b | [
"MIT"
] | null | null | null | bookshelf/apps/users/serializers.py | Vostbur/bookshelf | cfa7a739ccf80078d48aa6ab9c3f1b8d1d3c778b | [
"MIT"
] | null | null | null | bookshelf/apps/users/serializers.py | Vostbur/bookshelf | cfa7a739ccf80078d48aa6ab9c3f1b8d1d3c778b | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import User
class UserRegistrSerializer(serializers.ModelSerializer):
password2 = serializers.CharField()
class Meta:
model = User
fields = ['email', 'username', 'password', 'password2']
def save(self, *args, **kwargs):
user = User(
email=self.validated_data['email'],
username=self.validated_data['username'],
)
password = self.validated_data['password']
password2 = self.validated_data['password2']
if password != password2:
raise serializers.ValidationError({password: "Пароль не совпадает"})
user.set_password(password)
user.save()
return user
| 30.5 | 80 | 0.639344 |
aceaa37d616ef5a4dd49915a7d13ca731ed58a83 | 2,940 | py | Python | qidata/qidataaudiofile.py | aldebaran/qidata | 8798f3e488467ac929a981853a95a6045668c786 | [
"BSD-3-Clause"
] | 1 | 2017-11-10T09:50:33.000Z | 2017-11-10T09:50:33.000Z | qidata/qidataaudiofile.py | aldebaran/qidata | 8798f3e488467ac929a981853a95a6045668c786 | [
"BSD-3-Clause"
] | 2 | 2017-10-02T13:54:13.000Z | 2017-10-06T14:09:30.000Z | qidata/qidataaudiofile.py | aldebaran/qidata | 8798f3e488467ac929a981853a95a6045668c786 | [
"BSD-3-Clause"
] | 4 | 2017-10-02T08:59:03.000Z | 2022-02-13T20:44:06.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Softbank Robotics Europe
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
QiDataSensorFile specialization for audio files
"""
# Local modules
from qidata import DataType
from qidata.qidatasensorfile import QiDataSensorFile
class QiDataAudioFile(QiDataSensorFile):
# ───────────
# Constructor
def __init__(self, file_path, mode = "r"):
QiDataSensorFile.__init__(self, file_path, mode)
# ──────────
# Properties
@property
def type(self):
_t = QiDataSensorFile.type.fget(self)
return _t if _t else DataType.AUDIO
@type.setter
def type(self, new_type):
if not str(new_type).startswith("AUDIO"):
raise TypeError("Cannot convert %s to %s"%(self.type, new_type))
QiDataSensorFile.type.fset(self, new_type)
@property
def raw_data(self):
"""
Returns the raw data of audio file
:raise: NotImplementedError
"""
raise NotImplementedError
def _isLocationValid(self, location):
"""
Checks if a location given with an annotation is correct
:param location: The location to evaluate
:type location: list or None
.. note::
The location is expected to be of the form [0,0]. It represents a
subset of samples, from the first, included, to the last, excluded.
"""
if location is None: return True
try:
return (
isinstance(location, list)\
and len(location) == 2\
and isinstance(location[0],int)\
and isinstance(location[1],int)
)
except Exception:
return False | 33.033708 | 80 | 0.745238 |
aceaa428bf2934efe9c363c3c68e73c02fc7bc55 | 12,990 | py | Python | sdk/security/azure-mgmt-security/azure/mgmt/security/operations/_iot_alert_types_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/security/azure-mgmt-security/azure/mgmt/security/operations/_iot_alert_types_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/security/azure-mgmt-security/azure/mgmt/security/operations/_iot_alert_types_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class IotAlertTypesOperations(object):
"""IotAlertTypesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
solution_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.IotAlertTypeList"
"""List IoT alert types.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param solution_name: The name of the IoT Security solution.
:type solution_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotAlertTypeList, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.IotAlertTypeList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotAlertTypeList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'solutionName': self._serialize.url("solution_name", solution_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotAlertTypeList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/iotAlertTypes'} # type: ignore
def get(
self,
resource_group_name, # type: str
solution_name, # type: str
iot_alert_type_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.IotAlertType"
"""Get IoT alert type.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param solution_name: The name of the IoT Security solution.
:type solution_name: str
:param iot_alert_type_name: Name of the alert type.
:type iot_alert_type_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotAlertType, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.IotAlertType
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotAlertType"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'solutionName': self._serialize.url("solution_name", solution_name, 'str'),
'iotAlertTypeName': self._serialize.url("iot_alert_type_name", iot_alert_type_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotAlertType', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/iotAlertTypes/{iotAlertTypeName}'} # type: ignore
def list_at_subscription_scope(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.IotAlertTypeList"
"""List IoT alert types.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotAlertTypeList, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.IotAlertTypeList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotAlertTypeList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-06-preview"
accept = "application/json"
# Construct URL
url = self.list_at_subscription_scope.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotAlertTypeList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_at_subscription_scope.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/iotAlertTypes'} # type: ignore
def get_at_subscription_scope(
self,
iot_alert_type_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.IotAlertType"
"""Get IoT alert type.
:param iot_alert_type_name: Name of the alert type.
:type iot_alert_type_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotAlertType, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.IotAlertType
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotAlertType"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-06-preview"
accept = "application/json"
# Construct URL
url = self.get_at_subscription_scope.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'iotAlertTypeName': self._serialize.url("iot_alert_type_name", iot_alert_type_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotAlertType', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/iotAlertTypes/{iotAlertTypeName}'} # type: ignore
| 47.065217 | 210 | 0.666128 |
aceaa524a86ddbacc01694e0ca76e66b86d52691 | 729 | py | Python | webapp/user/decorators.py | Ilya-Galaktionov/Home-barman | 6b7fc9dcf1095cd2d36ba9109dbe1c7a3996ba38 | [
"MIT"
] | null | null | null | webapp/user/decorators.py | Ilya-Galaktionov/Home-barman | 6b7fc9dcf1095cd2d36ba9109dbe1c7a3996ba38 | [
"MIT"
] | null | null | null | webapp/user/decorators.py | Ilya-Galaktionov/Home-barman | 6b7fc9dcf1095cd2d36ba9109dbe1c7a3996ba38 | [
"MIT"
] | 1 | 2022-03-18T19:19:25.000Z | 2022-03-18T19:19:25.000Z | from functools import wraps
from flask import current_app, flash, request, redirect, url_for
from flask_login import config, current_user
def admin_required(func):
@wraps(func)
def decorated_view(*args, **kwargs):
if request.method in config.EXEMPT_METHODS:
return func(*args, **kwargs)
elif current_app.config.get('LOGIN_DISABLED'):
return func(*args, **kwargs)
elif not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
elif not current_user.is_admin:
flash('Эта страница доступна только админам')
return redirect(url_for('index'))
return func(*args, **kwargs)
return decorated_view
| 34.714286 | 64 | 0.679012 |
aceaa5ab4bd27fb2c361e9da446acf5e2b170dea | 263 | py | Python | MentalMessaging/routing.py | DhyeyLalseta/MentalMessaging | c3c040acb31215ee49d3a0ac931994870bf0d055 | [
"MIT"
] | 1 | 2020-09-14T21:10:40.000Z | 2020-09-14T21:10:40.000Z | MentalMessaging/routing.py | ArnavJain23/MentalMessagingWebApp | e0cd65ce25e843b73f99a02a4ff2c8c19873d6c7 | [
"MIT"
] | null | null | null | MentalMessaging/routing.py | ArnavJain23/MentalMessagingWebApp | e0cd65ce25e843b73f99a02a4ff2c8c19873d6c7 | [
"MIT"
] | null | null | null | from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
import chat.routing
application = ProtocolTypeRouter(
{
"websocket": AuthMiddlewareStack(URLRouter(chat.routing.websocket_urlpatterns)),
}
)
| 26.3 | 88 | 0.790875 |
aceaa6fda4df4009160646bef12f2a35bf6e248c | 844 | py | Python | python/conferenceattendee.py | tthatcher95/Astro-Budget-Site | a304dbad91d51f98d3658a454d60ae39c2a21804 | [
"Unlicense"
] | null | null | null | python/conferenceattendee.py | tthatcher95/Astro-Budget-Site | a304dbad91d51f98d3658a454d60ae39c2a21804 | [
"Unlicense"
] | null | null | null | python/conferenceattendee.py | tthatcher95/Astro-Budget-Site | a304dbad91d51f98d3658a454d60ae39c2a21804 | [
"Unlicense"
] | null | null | null | from sqlalchemy import Column, Integer, String, Float, ForeignKey, Sequence
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class ConferenceAttendee(Base):
__tablename__ = 'conferenceattendee'
conferenceattendeeid = Column (Integer, Sequence('conferenceattendee_conferenceattendeeid_seq'), primary_key=True)
conferenceid = Column (Integer, ForeignKey("conferences.conferenceid"), nullable=False)
proposalid = Column (Integer, ForeignKey("proposals.proposalid"), nullable=False)
peopleid = Column (Integer, ForeignKey("people.peopleid"), nullable=False)
meetindays = Column (Integer)
traveldays = Column (Integer)
def __repr__(self):
return "<ConferenceAttendee(meetingdays='%d', traveldays='%d')>" % (self.meetingdays, self.traveldays)
| 46.888889 | 116 | 0.734597 |
aceaa7228c9e3705e9f5af4a723f430beeb76560 | 1,874 | py | Python | test/parser/unit_operators/testcases/Sinh/Sinh_generator.py | k4rth33k/dnnc-operators | a7fe3f1240c12b3438558def71fbfcd4520446c3 | [
"Apache-2.0"
] | 5 | 2019-08-16T14:35:17.000Z | 2020-07-11T23:59:22.000Z | test/parser/unit_operators/testcases/Sinh/Sinh_generator.py | k4rth33k/dnnc-operators | a7fe3f1240c12b3438558def71fbfcd4520446c3 | [
"Apache-2.0"
] | 6 | 2019-08-12T04:38:14.000Z | 2019-09-04T16:32:13.000Z | test/parser/unit_operators/testcases/Sinh/Sinh_generator.py | k4rth33k/dnnc-operators | a7fe3f1240c12b3438558def71fbfcd4520446c3 | [
"Apache-2.0"
] | 7 | 2019-08-15T13:29:00.000Z | 2019-09-09T17:08:04.000Z |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import os, sys
import numpy as np
separator = os.path.sep
from onnx import *
sys.path.append(".."+separator+".."+separator+".."+separator+".."+separator+"python/parser")
from onnx_parser import *
op_name = 'Sinh'
inputs = [helper.make_tensor_value_info("0", TensorProto.FLOAT, (2, 3, 4))]
outputs = [helper.make_tensor_value_info("1", TensorProto.FLOAT, (2, 3, 4))]
nodes = []
nodes.append(helper.make_node("Sinh", ["0"], ["1"]))
graph = helper.make_graph(nodes, op_name+"_graph", inputs, outputs)
opset = (OperatorSetIdProto(version=11),)
model = helper.make_model(graph, opset_imports=opset)
onnx.checker.check_model(model)
t_prefix = ".." + separator + "testcases" + separator + op_name + separator + op_name
g_prefix = ".." + separator + "gold_files" + separator + op_name
onnx.save(model, t_prefix+".onnx")
parse(t_prefix+".onnx", g_prefix+".sym", onnx_output_file=t_prefix+".txt")
| 39.87234 | 93 | 0.723052 |
aceaa7b778a7adeef340c38d074559fdd018e8b4 | 1,084 | py | Python | common/config.py | jedimasterbot/ipf-analyzer | 72d55f3fcaa7ec0e3abe6736993fbdd069e8225f | [
"Apache-2.0"
] | 7 | 2021-04-07T11:30:35.000Z | 2022-03-31T02:22:47.000Z | common/config.py | jedimasterbot/ipf-analyzer | 72d55f3fcaa7ec0e3abe6736993fbdd069e8225f | [
"Apache-2.0"
] | null | null | null | common/config.py | jedimasterbot/ipf-analyzer | 72d55f3fcaa7ec0e3abe6736993fbdd069e8225f | [
"Apache-2.0"
] | 4 | 2021-08-18T00:34:37.000Z | 2022-02-11T03:14:32.000Z | # Empty Dictionary that gets populated with data
empty = {}
# Abuse URL and Data Gathered
abuseUrl = 'https://api.abuseipdb.com/api/v2/check'
# Cape sandbox URL and Data Gathered
capeUrl = 'https://www.capesandbox.com/apiv2/tasks/extendedsearch/'
# Hybrid Analysis URLS and Data Gathered
hybridUrlSearch = 'https://www.hybrid-analysis.com/api/v2/search/hash'
hybridUrlScan = 'https://www.hybrid-analysis.com/api/v2/quick-scan/url'
hybridUrlReport = 'https://www.hybrid-analysis.com/api/v2/report/%s/summary'
# Malshare URL and Data Gathered
malshareUrl = 'https://malshare.com/api.php?api_key=%s&action=details&hash=%s'
# Urlhaus URL and Data Gathered
urlhausUrl = 'https://urlhaus-api.abuse.ch/v1/'
# VirusTotal URLS and Data Gathered
vtHashUrl = 'https://www.virustotal.com/vtapi/v2/file/report'
vtIpUrl = 'https://www.virustotal.com/vtapi/v2/ip-address/report'
vtUrlUrl = 'https://www.virustotal.com/vtapi/v2/url/report'
vtUrlScan = 'https://www.virustotal.com/vtapi/v2/url/scan'
engine_list = ['AbuseLink', 'AbuseScore', 'VTLink', 'VTScore', 'UrlScanLink', 'UrlScanScore']
| 38.714286 | 93 | 0.751845 |
aceaa85cc797bfcc4688476dec37e6ad9f513774 | 12,339 | py | Python | src/transformers/tokenization_gpt2.py | Liang813/transformers | 08f534d2da47875a4b7eb1c125cfa7f0f3b79642 | [
"Apache-2.0"
] | 6 | 2021-11-30T06:41:00.000Z | 2022-03-24T04:18:43.000Z | src/transformers/tokenization_gpt2.py | Liang813/transformers | 08f534d2da47875a4b7eb1c125cfa7f0f3b79642 | [
"Apache-2.0"
] | 1 | 2021-03-21T03:28:23.000Z | 2021-03-21T06:06:39.000Z | src/transformers/tokenization_gpt2.py | Liang813/transformers | 08f534d2da47875a4b7eb1c125cfa7f0f3b79642 | [
"Apache-2.0"
] | 2 | 2021-05-25T19:59:13.000Z | 2022-02-28T18:11:12.000Z | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
import json
import os
import warnings
from functools import lru_cache
from typing import Optional, Tuple
import regex as re
from .tokenization_utils import AddedToken, PreTrainedTokenizer
from .utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-vocab.json",
"gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-vocab.json",
"gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-vocab.json",
"distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-vocab.json",
},
"merges_file": {
"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-merges.txt",
"gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-merges.txt",
"gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-merges.txt",
"distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class GPT2Tokenizer(PreTrainedTokenizer):
"""
Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
::
>>> from transformers import GPT2Tokenizer
>>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
>>> tokenizer("Hello world")['input_ids']
[15496, 995]
>>> tokenizer(" Hello world")['input_ids']
[18435, 995]
You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
.. note::
When used with ``is_split_into_words=True``, this tokenizer will add a space before each word (even the first
one).
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information.
unk_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The beginning of sequence token.
eos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The end of sequence token.
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (GPT2 tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_prefix_space=False,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
super().__init__(
errors=errors,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
""" Tokenize a string. """
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file)
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
if "is_pretokenized" in kwargs:
warnings.warn(
"`is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.",
FutureWarning,
)
is_split_into_words = kwargs.pop("is_pretokenized")
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if is_split_into_words or add_prefix_space:
text = " " + text
return (text, kwargs)
| 40.192182 | 126 | 0.620472 |
aceaa9238cc646ee260a8790b303d2b380974540 | 2,471 | py | Python | agent/tests/test_input/test_1/test_postgres_http.py | anodot/daria | d475899309f56cd85347be0f7001a0dd97dd197a | [
"Apache-2.0"
] | 16 | 2019-04-03T08:31:54.000Z | 2021-01-24T17:12:04.000Z | agent/tests/test_input/test_1/test_postgres_http.py | anodot/daria | d475899309f56cd85347be0f7001a0dd97dd197a | [
"Apache-2.0"
] | 10 | 2020-01-20T14:59:06.000Z | 2022-01-21T10:19:16.000Z | agent/tests/test_input/test_1/test_postgres_http.py | anodot/daria | d475899309f56cd85347be0f7001a0dd97dd197a | [
"Apache-2.0"
] | 5 | 2021-01-08T19:23:03.000Z | 2021-11-09T14:15:49.000Z | from datetime import datetime
from agent import source, cli
from ..test_zpipeline_base import TestInputBase
class TestPostgreSQL(TestInputBase):
__test__ = True
params = {
'test_source_create': [{'name': 'test_jdbc_postgres', 'type': 'postgres', 'conn': 'postgresql://postgres:5432/test'}],
'test_create': [
{'name': 'test_postgres', 'source': 'test_jdbc_postgres', 'timestamp_type': '', 'timestamp_name': 'timestamp_unix'},
{'name': 'test_postgres_timestamp_ms', 'source': 'test_jdbc_postgres', 'timestamp_type': 'unix_ms',
'timestamp_name': 'timestamp_unix_ms'},
{'name': 'test_postgres_timestamp_datetime', 'source': 'test_jdbc_postgres', 'timestamp_type': 'datetime',
'timestamp_name': 'timestamp_datetime'}],
'test_create_advanced': [{'name': 'test_postgres_advanced', 'source': 'test_jdbc_postgres'}],
'test_create_with_file': [{'file_name': 'jdbc_pipelines_postgres'}],
'test_create_source_with_file': [{'file_name': 'postgres_sources'}],
}
def test_source_create(self, cli_runner, name, type, conn):
result = cli_runner.invoke(cli.source.create, catch_exceptions=False,
input=f"{type}\n{name}\n{conn}\npostgres\npassword\n\n")
assert result.exit_code == 0
assert source.repository.exists(name)
def test_create(self, cli_runner, name, source, timestamp_type, timestamp_name):
days_to_backfill = (datetime.now() - datetime(year=2017, month=12, day=10)).days + 1
result = cli_runner.invoke(cli.pipeline.create, catch_exceptions=False,
input=f'{source}\n{name}\nSELECT * FROM test WHERE {{TIMESTAMP_CONDITION}}\n\n86400\n{days_to_backfill}\n1\n{timestamp_name}\n{timestamp_type}\n\nclicks:gauge impressions:gauge\nadsize country\n\n\n\n')
assert result.exit_code == 0
def test_create_advanced(self, cli_runner, name, source):
days_to_backfill = (datetime.now() - datetime(year=2017, month=12, day=10)).days + 1
result = cli_runner.invoke(cli.pipeline.create, ['-a'], catch_exceptions=False,
input=f'{source}\n{name}\nSELECT * FROM test WHERE {{TIMESTAMP_CONDITION}} AND country = \'USA\'\n\n86400\n{days_to_backfill}\n1\ntimestamp_unix\nunix\ny\ntest\nclicks:gauge impressions:gauge\nadsize country\nkey1:val1 key2:val2\n\n\n\n')
assert result.exit_code == 0
| 65.026316 | 273 | 0.669769 |
aceaa99046756da4011bedb06ff2adcce90dcfb4 | 175,292 | py | Python | bpytop.py | dragonxtek/bpytop | 8f9e8e852d3553393a0201afa6ee989a83051d36 | [
"Apache-2.0"
] | 1 | 2020-09-15T03:39:16.000Z | 2020-09-15T03:39:16.000Z | bpytop.py | dragonxtek/bpytop | 8f9e8e852d3553393a0201afa6ee989a83051d36 | [
"Apache-2.0"
] | null | null | null | bpytop.py | dragonxtek/bpytop | 8f9e8e852d3553393a0201afa6ee989a83051d36 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# pylint: disable=not-callable, no-member
# indent = tab
# tab-size = 4
# Copyright 2020 Aristocratos (jakob@qvantnet.com)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, threading, signal, re, subprocess, logging, logging.handlers, argparse
import urllib.request
from time import time, sleep, strftime, localtime
from datetime import timedelta
from _thread import interrupt_main
from collections import defaultdict
from select import select
from distutils.util import strtobool
from string import Template
from math import ceil, floor
from random import randint
from shutil import which
from typing import List, Set, Dict, Tuple, Optional, Union, Any, Callable, ContextManager, Iterable, Type, NamedTuple
errors: List[str] = []
try: import fcntl, termios, tty
except Exception as e: errors.append(f'{e}')
try: import psutil # type: ignore
except Exception as e: errors.append(f'{e}')
SELF_START = time()
SYSTEM: str
if "linux" in sys.platform: SYSTEM = "Linux"
elif "bsd" in sys.platform: SYSTEM = "BSD"
elif "darwin" in sys.platform: SYSTEM = "MacOS"
else: SYSTEM = "Other"
if errors:
print ("ERROR!")
for error in errors:
print(error)
if SYSTEM == "Other":
print("\nUnsupported platform!\n")
else:
print("\nInstall required modules!\n")
raise SystemExit(1)
VERSION: str = "1.0.32"
#? Argument parser ------------------------------------------------------------------------------->
args = argparse.ArgumentParser()
args.add_argument("-f" , "--full" ,action="store_true" ,help ="Start in full mode showing all boxes [default]")
args.add_argument("-p" , "--proc" ,action="store_true" ,help ="Start in minimal mode without memory and net boxes")
args.add_argument("-s" , "--stat" ,action="store_true" ,help ="Start in minimal mode without process box")
args.add_argument("-v" , "--version" ,action="store_true" ,help ="Show version info and exit")
args.add_argument("--debug" ,action="store_true" ,help ="Start with loglevel set to DEBUG overriding value set in config")
stdargs = args.parse_args()
if stdargs.version:
print(f'bpytop version: {VERSION}\n'
f'psutil version: {".".join(str(x) for x in psutil.version_info)}')
raise SystemExit(0)
ARG_MODE: str = ""
if stdargs.full:
ARG_MODE = "full"
elif stdargs.proc:
ARG_MODE = "proc"
elif stdargs.stat:
ARG_MODE = "stat"
if stdargs.debug:
DEBUG = True
else:
DEBUG = False
#? Variables ------------------------------------------------------------------------------------->
BANNER_SRC: List[Tuple[str, str, str]] = [
("#ffa50a", "#0fd7ff", "██████╗ ██████╗ ██╗ ██╗████████╗ ██████╗ ██████╗"),
("#f09800", "#00bfe6", "██╔══██╗██╔══██╗╚██╗ ██╔╝╚══██╔══╝██╔═══██╗██╔══██╗"),
("#db8b00", "#00a6c7", "██████╔╝██████╔╝ ╚████╔╝ ██║ ██║ ██║██████╔╝"),
("#c27b00", "#008ca8", "██╔══██╗██╔═══╝ ╚██╔╝ ██║ ██║ ██║██╔═══╝ "),
("#a86b00", "#006e85", "██████╔╝██║ ██║ ██║ ╚██████╔╝██║"),
("#000000", "#000000", "╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝"),
]
#*?This is the template used to create the config file
DEFAULT_CONF: Template = Template(f'#? Config file for bpytop v. {VERSION}' + '''
#* Color theme, looks for a .theme file in "/usr/[local/]share/bpytop/themes" and "~/.config/bpytop/themes", "Default" for builtin default theme.
#* Prefix name by a plus sign (+) for a theme located in user themes folder, i.e. color_theme="+monokai"
color_theme="$color_theme"
#* If the theme set background should be shown, set to False if you want terminal background transparency
theme_background=$theme_background
#* Set bpytop view mode, "full" for everything shown, "proc" for cpu stats and processes, "stat" for cpu, mem, disks and net stats shown.
view_mode=$view_mode
#* Update time in milliseconds, increases automatically if set below internal loops processing time, recommended 2000 ms or above for better sample times for graphs.
update_ms=$update_ms
#* Processes sorting, "pid" "program" "arguments" "threads" "user" "memory" "cpu lazy" "cpu responsive",
#* "cpu lazy" updates top process over time, "cpu responsive" updates top process directly.
proc_sorting="$proc_sorting"
#* Reverse sorting order, True or False.
proc_reversed=$proc_reversed
#* Show processes as a tree
proc_tree=$proc_tree
#* Which depth the tree view should auto collapse processes at
tree_depth=$tree_depth
#* Use the cpu graph colors in the process list.
proc_colors=$proc_colors
#* Use a darkening gradient in the process list.
proc_gradient=$proc_gradient
#* If process cpu usage should be of the core it's running on or usage of the total available cpu power.
proc_per_core=$proc_per_core
#* Show process memory as bytes instead of percent
proc_mem_bytes=$proc_mem_bytes
#* Check cpu temperature, needs "osx-cpu-temp" on MacOS X.
check_temp=$check_temp
#* Draw a clock at top of screen, formatting according to strftime, empty string to disable.
draw_clock="$draw_clock"
#* Update main ui in background when menus are showing, set this to false if the menus is flickering too much for comfort.
background_update=$background_update
#* Custom cpu model name, empty string to disable.
custom_cpu_name="$custom_cpu_name"
#* Optional filter for shown disks, should be last folder in path of a mountpoint, "root" replaces "/", separate multiple values with comma.
#* Begin line with "exclude=" to change to exclude filter, oterwise defaults to "most include" filter. Example: disks_filter="exclude=boot, home"
disks_filter="$disks_filter"
#* Show graphs instead of meters for memory values.
mem_graphs=$mem_graphs
#* If swap memory should be shown in memory box.
show_swap=$show_swap
#* Show swap as a disk, ignores show_swap value above, inserts itself after first disk.
swap_disk=$swap_disk
#* If mem box should be split to also show disks info.
show_disks=$show_disks
#* Set fixed values for network graphs, default "10M" = 10 Mibibytes, possible units "K", "M", "G", append with "bit" for bits instead of bytes, i.e "100mbit"
net_download="$net_download"
net_upload="$net_upload"
#* Start in network graphs auto rescaling mode, ignores any values set above and rescales down to 10 Kibibytes at the lowest.
net_auto=$net_auto
#* Sync the scaling for download and upload to whichever currently has the highest scale
net_sync=$net_sync
#* If the network graphs color gradient should scale to bandwith usage or auto scale, bandwith usage is based on "net_download" and "net_upload" values
net_color_fixed=$net_color_fixed
#* Show battery stats in top right if battery is present
show_battery=$show_battery
#* Show init screen at startup, the init screen is purely cosmetical
show_init=$show_init
#* Enable check for new version from github.com/aristocratos/bpytop at start.
update_check=$update_check
#* Set loglevel for "~/.config/bpytop/error.log" levels are: "ERROR" "WARNING" "INFO" "DEBUG".
#* The level set includes all lower levels, i.e. "DEBUG" will show all logging info.
log_level=$log_level
''')
CONFIG_DIR: str = f'{os.path.expanduser("~")}/.config/bpytop'
if not os.path.isdir(CONFIG_DIR):
try:
os.makedirs(CONFIG_DIR)
os.mkdir(f'{CONFIG_DIR}/themes')
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
CONFIG_FILE: str = f'{CONFIG_DIR}/bpytop.conf'
THEME_DIR: str = ""
if os.path.isdir(f'{os.path.dirname(__file__)}/bpytop-themes'):
THEME_DIR = f'{os.path.dirname(__file__)}/bpytop-themes'
else:
for td in ["/usr/local/", "/usr/", "/snap/bpytop/current/usr/"]:
if os.path.isdir(f'{td}share/bpytop/themes'):
THEME_DIR = f'{td}share/bpytop/themes'
break
USER_THEME_DIR: str = f'{CONFIG_DIR}/themes'
CORES: int = psutil.cpu_count(logical=False) or 1
THREADS: int = psutil.cpu_count(logical=True) or 1
THREAD_ERROR: int = 0
DEFAULT_THEME: Dict[str, str] = {
"main_bg" : "",
"main_fg" : "#cc",
"title" : "#ee",
"hi_fg" : "#969696",
"selected_bg" : "#7e2626",
"selected_fg" : "#ee",
"inactive_fg" : "#40",
"graph_text" : "#60",
"meter_bg" : "#40",
"proc_misc" : "#0de756",
"cpu_box" : "#3d7b46",
"mem_box" : "#8a882e",
"net_box" : "#423ba5",
"proc_box" : "#923535",
"div_line" : "#30",
"temp_start" : "#4897d4",
"temp_mid" : "#5474e8",
"temp_end" : "#ff40b6",
"cpu_start" : "#50f095",
"cpu_mid" : "#f2e266",
"cpu_end" : "#fa1e1e",
"free_start" : "#223014",
"free_mid" : "#b5e685",
"free_end" : "#dcff85",
"cached_start" : "#0b1a29",
"cached_mid" : "#74e6fc",
"cached_end" : "#26c5ff",
"available_start" : "#292107",
"available_mid" : "#ffd77a",
"available_end" : "#ffb814",
"used_start" : "#3b1f1c",
"used_mid" : "#d9626d",
"used_end" : "#ff4769",
"download_start" : "#231a63",
"download_mid" : "#4f43a3",
"download_end" : "#b0a9de",
"upload_start" : "#510554",
"upload_mid" : "#7d4180",
"upload_end" : "#dcafde",
"process_start" : "#80d0a3",
"process_mid" : "#dcd179",
"process_end" : "#d45454",
}
MENUS: Dict[str, Dict[str, Tuple[str, ...]]] = {
"options" : {
"normal" : (
"┌─┐┌─┐┌┬┐┬┌─┐┌┐┌┌─┐",
"│ │├─┘ │ ││ ││││└─┐",
"└─┘┴ ┴ ┴└─┘┘└┘└─┘"),
"selected" : (
"╔═╗╔═╗╔╦╗╦╔═╗╔╗╔╔═╗",
"║ ║╠═╝ ║ ║║ ║║║║╚═╗",
"╚═╝╩ ╩ ╩╚═╝╝╚╝╚═╝") },
"help" : {
"normal" : (
"┬ ┬┌─┐┬ ┌─┐",
"├─┤├┤ │ ├─┘",
"┴ ┴└─┘┴─┘┴ "),
"selected" : (
"╦ ╦╔═╗╦ ╔═╗",
"╠═╣║╣ ║ ╠═╝",
"╩ ╩╚═╝╩═╝╩ ") },
"quit" : {
"normal" : (
"┌─┐ ┬ ┬ ┬┌┬┐",
"│─┼┐│ │ │ │ ",
"└─┘└└─┘ ┴ ┴ "),
"selected" : (
"╔═╗ ╦ ╦ ╦╔╦╗ ",
"║═╬╗║ ║ ║ ║ ",
"╚═╝╚╚═╝ ╩ ╩ ") }
}
MENU_COLORS: Dict[str, Tuple[str, ...]] = {
"normal" : ("#0fd7ff", "#00bfe6", "#00a6c7", "#008ca8"),
"selected" : ("#ffa50a", "#f09800", "#db8b00", "#c27b00")
}
#? Units for floating_humanizer function
UNITS: Dict[str, Tuple[str, ...]] = {
"bit" : ("bit", "Kib", "Mib", "Gib", "Tib", "Pib", "Eib", "Zib", "Yib", "Bib", "GEb"),
"byte" : ("Byte", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "BiB", "GEB")
}
#? Setup error logger ---------------------------------------------------------------->
try:
errlog = logging.getLogger("ErrorLogger")
errlog.setLevel(logging.DEBUG)
eh = logging.handlers.RotatingFileHandler(f'{CONFIG_DIR}/error.log', maxBytes=1048576, backupCount=4)
eh.setLevel(logging.DEBUG)
eh.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s: %(message)s", datefmt="%d/%m/%y (%X)"))
errlog.addHandler(eh)
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
#? Timers for testing and debugging -------------------------------------------------------------->
class TimeIt:
timers: Dict[str, float] = {}
paused: Dict[str, float] = {}
@classmethod
def start(cls, name):
cls.timers[name] = time()
@classmethod
def pause(cls, name):
if name in cls.timers:
cls.paused[name] = time() - cls.timers[name]
del cls.timers[name]
@classmethod
def stop(cls, name):
if name in cls.timers:
total: float = time() - cls.timers[name]
del cls.timers[name]
if name in cls.paused:
total += cls.paused[name]
del cls.paused[name]
errlog.debug(f'{name} completed in {total:.6f} seconds')
def timeit_decorator(func):
def timed(*args, **kw):
ts = time()
out = func(*args, **kw)
errlog.debug(f'{func.__name__} completed in {time() - ts:.6f} seconds')
return out
return timed
#? Set up config class and load config ----------------------------------------------------------->
class Config:
'''Holds all config variables and functions for loading from and saving to disk'''
keys: List[str] = ["color_theme", "update_ms", "proc_sorting", "proc_reversed", "proc_tree", "check_temp", "draw_clock", "background_update", "custom_cpu_name",
"proc_colors", "proc_gradient", "proc_per_core", "proc_mem_bytes", "disks_filter", "update_check", "log_level", "mem_graphs", "show_swap",
"swap_disk", "show_disks", "net_download", "net_upload", "net_auto", "net_color_fixed", "show_init", "view_mode", "theme_background",
"net_sync", "show_battery", "tree_depth"]
conf_dict: Dict[str, Union[str, int, bool]] = {}
color_theme: str = "Default"
theme_background: bool = True
update_ms: int = 2000
proc_sorting: str = "cpu lazy"
proc_reversed: bool = False
proc_tree: bool = False
tree_depth: int = 3
proc_colors: bool = True
proc_gradient: bool = True
proc_per_core: bool = False
proc_mem_bytes: bool = True
check_temp: bool = True
draw_clock: str = "%X"
background_update: bool = True
custom_cpu_name: str = ""
disks_filter: str = ""
update_check: bool = True
mem_graphs: bool = True
show_swap: bool = True
swap_disk: bool = True
show_disks: bool = True
net_download: str = "10M"
net_upload: str = "10M"
net_color_fixed: bool = False
net_auto: bool = True
net_sync: bool = False
show_battery: bool = True
show_init: bool = True
view_mode: str = "full"
log_level: str = "WARNING"
warnings: List[str] = []
info: List[str] = []
sorting_options: List[str] = ["pid", "program", "arguments", "threads", "user", "memory", "cpu lazy", "cpu responsive"]
log_levels: List[str] = ["ERROR", "WARNING", "INFO", "DEBUG"]
view_modes: List[str] = ["full", "proc", "stat"]
changed: bool = False
recreate: bool = False
config_file: str = ""
_initialized: bool = False
def __init__(self, path: str):
self.config_file = path
conf: Dict[str, Union[str, int, bool]] = self.load_config()
if not "version" in conf.keys():
self.recreate = True
self.info.append(f'Config file malformatted or missing, will be recreated on exit!')
elif conf["version"] != VERSION:
self.recreate = True
self.info.append(f'Config file version and bpytop version missmatch, will be recreated on exit!')
for key in self.keys:
if key in conf.keys() and conf[key] != "_error_":
setattr(self, key, conf[key])
else:
self.recreate = True
self.conf_dict[key] = getattr(self, key)
self._initialized = True
def __setattr__(self, name, value):
if self._initialized:
object.__setattr__(self, "changed", True)
object.__setattr__(self, name, value)
if name not in ["_initialized", "recreate", "changed"]:
self.conf_dict[name] = value
def load_config(self) -> Dict[str, Union[str, int, bool]]:
'''Load config from file, set correct types for values and return a dict'''
new_config: Dict[str,Union[str, int, bool]] = {}
conf_file: str = ""
if os.path.isfile(self.config_file):
conf_file = self.config_file
elif os.path.isfile("/etc/bpytop.conf"):
conf_file = "/etc/bpytop.conf"
else:
return new_config
try:
with open(conf_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith("#? Config"):
new_config["version"] = line[line.find("v. ") + 3:]
for key in self.keys:
if line.startswith(key):
line = line.replace(key + "=", "")
if line.startswith('"'):
line = line.strip('"')
if type(getattr(self, key)) == int:
try:
new_config[key] = int(line)
except ValueError:
self.warnings.append(f'Config key "{key}" should be an integer!')
if type(getattr(self, key)) == bool:
try:
new_config[key] = bool(strtobool(line))
except ValueError:
self.warnings.append(f'Config key "{key}" can only be True or False!')
if type(getattr(self, key)) == str:
new_config[key] = str(line)
except Exception as e:
errlog.exception(str(e))
if "proc_sorting" in new_config and not new_config["proc_sorting"] in self.sorting_options:
new_config["proc_sorting"] = "_error_"
self.warnings.append(f'Config key "proc_sorted" didn\'t get an acceptable value!')
if "log_level" in new_config and not new_config["log_level"] in self.log_levels:
new_config["log_level"] = "_error_"
self.warnings.append(f'Config key "log_level" didn\'t get an acceptable value!')
if "view_mode" in new_config and not new_config["view_mode"] in self.view_modes:
new_config["view_mode"] = "_error_"
self.warnings.append(f'Config key "view_mode" didn\'t get an acceptable value!')
if isinstance(new_config["update_ms"], int) and new_config["update_ms"] < 100:
new_config["update_ms"] = 100
self.warnings.append(f'Config key "update_ms" can\'t be lower than 100!')
for net_name in ["net_download", "net_upload"]:
if net_name in new_config and not new_config[net_name][0].isdigit(): # type: ignore
new_config[net_name] = "_error_"
return new_config
def save_config(self):
'''Save current config to config file if difference in values or version, creates a new file if not found'''
if not self.changed and not self.recreate: return
try:
with open(self.config_file, "w" if os.path.isfile(self.config_file) else "x") as f:
f.write(DEFAULT_CONF.substitute(self.conf_dict))
except Exception as e:
errlog.exception(str(e))
try:
CONFIG: Config = Config(CONFIG_FILE)
if DEBUG:
errlog.setLevel(logging.DEBUG)
else:
errlog.setLevel(getattr(logging, CONFIG.log_level))
if CONFIG.log_level == "DEBUG": DEBUG = True
errlog.info(f'New instance of bpytop version {VERSION} started with pid {os.getpid()}')
errlog.info(f'Loglevel set to {"DEBUG" if DEBUG else CONFIG.log_level}')
errlog.debug(f'Using psutil version {".".join(str(x) for x in psutil.version_info)}')
errlog.debug(f'CMD: {" ".join(sys.argv)}')
if CONFIG.info:
for info in CONFIG.info:
errlog.info(info)
CONFIG.info = []
if CONFIG.warnings:
for warning in CONFIG.warnings:
errlog.warning(warning)
CONFIG.warnings = []
except Exception as e:
errlog.exception(f'{e}')
raise SystemExit(1)
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
warn = f'psutil version {".".join(str(x) for x in psutil.version_info)} detected, version 5.7.0 or later required for full functionality!'
print("WARNING!", warn)
errlog.warning(warn)
#? Classes --------------------------------------------------------------------------------------->
class Term:
"""Terminal info and commands"""
width: int = 0
height: int = 0
resized: bool = False
_w : int = 0
_h : int = 0
fg: str = "" #* Default foreground color
bg: str = "" #* Default background color
hide_cursor = "\033[?25l" #* Hide terminal cursor
show_cursor = "\033[?25h" #* Show terminal cursor
alt_screen = "\033[?1049h" #* Switch to alternate screen
normal_screen = "\033[?1049l" #* Switch to normal screen
clear = "\033[2J\033[0;0f" #* Clear screen and set cursor to position 0,0
mouse_on = "\033[?1002h\033[?1015h\033[?1006h" #* Enable reporting of mouse position on click and release
mouse_off = "\033[?1002l" #* Disable mouse reporting
mouse_direct_on = "\033[?1003h" #* Enable reporting of mouse position at any movement
mouse_direct_off = "\033[?1003l" #* Disable direct mouse reporting
winch = threading.Event()
@classmethod
def refresh(cls, *args, force: bool = False):
"""Update width, height and set resized flag if terminal has been resized"""
if cls.resized: cls.winch.set(); return
cls._w, cls._h = os.get_terminal_size()
if (cls._w, cls._h) == (cls.width, cls.height) and not force: return
if force: Collector.collect_interrupt = True
while (cls._w, cls._h) != (cls.width, cls.height) or (cls._w < 80 or cls._h < 24):
if Init.running: Init.resized = True
CpuBox.clock_block = True
cls.resized = True
Collector.collect_interrupt = True
cls.width, cls.height = cls._w, cls._h
Draw.now(Term.clear)
Draw.now(f'{create_box(cls._w // 2 - 25, cls._h // 2 - 2, 50, 3, "resizing", line_color=Colors.green, title_color=Colors.white)}',
f'{Mv.r(12)}{Colors.default}{Colors.black_bg}{Fx.b}Width : {cls._w} Height: {cls._h}{Fx.ub}{Term.bg}{Term.fg}')
if cls._w < 80 or cls._h < 24:
while cls._w < 80 or cls._h < 24:
Draw.now(Term.clear)
Draw.now(f'{create_box(cls._w // 2 - 25, cls._h // 2 - 2, 50, 4, "warning", line_color=Colors.red, title_color=Colors.white)}',
f'{Mv.r(12)}{Colors.default}{Colors.black_bg}{Fx.b}Width: {Colors.red if cls._w < 80 else Colors.green}{cls._w} ',
f'{Colors.default}Height: {Colors.red if cls._h < 24 else Colors.green}{cls._h}{Term.bg}{Term.fg}',
f'{Mv.to(cls._h // 2, cls._w // 2 - 23)}{Colors.default}{Colors.black_bg}Width and Height needs to be at least 80 x 24 !{Fx.ub}{Term.bg}{Term.fg}')
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
else:
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
Key.mouse = {}
Box.calc_sizes()
if Init.running: cls.resized = False; return
if Menu.active: Menu.resized = True
Box.draw_bg(now=False)
cls.resized = False
Timer.finish()
@staticmethod
def echo(on: bool):
"""Toggle input echo"""
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(sys.stdin.fileno())
if on:
lflag |= termios.ECHO # type: ignore
else:
lflag &= ~termios.ECHO # type: ignore
new_attr = [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, new_attr)
@staticmethod
def title(text: str = "") -> str:
if text: text = f' {text}'
return f'\033]0;{os.environ.get("TERMINAL_TITLE", "")}{text}\a'
class Fx:
"""Text effects
* trans(string: str): Replace whitespace with escape move right to not overwrite background behind whitespace.
* uncolor(string: str) : Removes all 24-bit color and returns string ."""
start = "\033[" #* Escape sequence start
sep = ";" #* Escape sequence separator
end = "m" #* Escape sequence end
reset = rs = "\033[0m" #* Reset foreground/background color and text effects
bold = b = "\033[1m" #* Bold on
unbold = ub = "\033[22m" #* Bold off
dark = d = "\033[2m" #* Dark on
undark = ud = "\033[22m" #* Dark off
italic = i = "\033[3m" #* Italic on
unitalic = ui = "\033[23m" #* Italic off
underline = u = "\033[4m" #* Underline on
ununderline = uu = "\033[24m" #* Underline off
blink = bl = "\033[5m" #* Blink on
unblink = ubl = "\033[25m" #* Blink off
strike = s = "\033[9m" #* Strike / crossed-out on
unstrike = us = "\033[29m" #* Strike / crossed-out off
#* Precompiled regex for finding a 24-bit color escape sequence in a string
color_re = re.compile(r"\033\[\d+;\d?;?\d*;?\d*;?\d*m")
@staticmethod
def trans(string: str):
return string.replace(" ", "\033[1C")
@classmethod
def uncolor(cls, string: str) -> str:
return f'{cls.color_re.sub("", string)}'
class Raw(object):
"""Set raw input mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Nonblocking(object):
"""Set nonblocking mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
class Mv:
"""Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore()"""
@staticmethod
def to(line: int, col: int) -> str:
return f'\033[{line};{col}f' #* Move cursor to line, column
@staticmethod
def right(x: int) -> str: #* Move cursor right x columns
return f'\033[{x}C'
@staticmethod
def left(x: int) -> str: #* Move cursor left x columns
return f'\033[{x}D'
@staticmethod
def up(x: int) -> str: #* Move cursor up x lines
return f'\033[{x}A'
@staticmethod
def down(x: int) -> str: #* Move cursor down x lines
return f'\033[{x}B'
save: str = "\033[s" #* Save cursor position
restore: str = "\033[u" #* Restore saved cursor postion
t = to
r = right
l = left
u = up
d = down
class Key:
"""Handles the threaded input reader for keypresses and mouse events"""
list: List[str] = []
mouse: Dict[str, List[List[int]]] = {}
mouse_pos: Tuple[int, int] = (0, 0)
escape: Dict[Union[str, Tuple[str, str]], str] = {
"\n" : "enter",
("\x7f", "\x08") : "backspace",
("[A", "OA") : "up",
("[B", "OB") : "down",
("[D", "OD") : "left",
("[C", "OC") : "right",
"[2~" : "insert",
"[3~" : "delete",
"[H" : "home",
"[F" : "end",
"[5~" : "page_up",
"[6~" : "page_down",
"\t" : "tab",
"[Z" : "shift_tab",
"OP" : "f1",
"OQ" : "f2",
"OR" : "f3",
"OS" : "f4",
"[15" : "f5",
"[17" : "f6",
"[18" : "f7",
"[19" : "f8",
"[20" : "f9",
"[21" : "f10",
"[23" : "f11",
"[24" : "f12"
}
new = threading.Event()
idle = threading.Event()
mouse_move = threading.Event()
mouse_report: bool = False
idle.set()
stopping: bool = False
started: bool = False
reader: threading.Thread
@classmethod
def start(cls):
cls.stopping = False
cls.reader = threading.Thread(target=cls._get_key)
cls.reader.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.reader.is_alive():
cls.stopping = True
try:
cls.reader.join()
except:
pass
@classmethod
def last(cls) -> str:
if cls.list: return cls.list.pop()
else: return ""
@classmethod
def get(cls) -> str:
if cls.list: return cls.list.pop(0)
else: return ""
@classmethod
def get_mouse(cls) -> Tuple[int, int]:
if cls.new.is_set():
cls.new.clear()
return cls.mouse_pos
@classmethod
def mouse_moved(cls) -> bool:
if cls.mouse_move.is_set():
cls.mouse_move.clear()
return True
else:
return False
@classmethod
def has_key(cls) -> bool:
if cls.list: return True
else: return False
@classmethod
def clear(cls):
cls.list = []
@classmethod
def input_wait(cls, sec: float = 0.0, mouse: bool = False) -> bool:
'''Returns True if key is detected else waits out timer and returns False'''
if cls.list: return True
if mouse: Draw.now(Term.mouse_direct_on)
cls.new.wait(sec if sec > 0 else 0.0)
if mouse: Draw.now(Term.mouse_direct_off, Term.mouse_on)
if cls.new.is_set():
cls.new.clear()
return True
else:
return False
@classmethod
def break_wait(cls):
cls.list.append("_null")
cls.new.set()
sleep(0.01)
cls.new.clear()
@classmethod
def _get_key(cls):
"""Get a key or escape sequence from stdin, convert to readable format and save to keys list. Meant to be run in it's own thread."""
input_key: str = ""
clean_key: str = ""
try:
while not cls.stopping:
with Raw(sys.stdin):
if not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag
continue
input_key += sys.stdin.read(1) #* Read 1 key safely with blocking on
if input_key == "\033": #* If first character is a escape sequence keep reading
cls.idle.clear() #* Report IO block in progress to prevent Draw functions from getting a IO Block error
Draw.idle.wait() #* Wait for Draw function to finish if busy
with Nonblocking(sys.stdin): #* Set non blocking to prevent read stall
input_key += sys.stdin.read(20)
if input_key.startswith("\033[<"):
_ = sys.stdin.read(1000)
cls.idle.set() #* Report IO blocking done
#errlog.debug(f'{repr(input_key)}')
if input_key == "\033": clean_key = "escape" #* Key is "escape" key if only containing \033
elif input_key.startswith(("\033[<0;", "\033[<35;", "\033[<64;", "\033[<65;")): #* Detected mouse event
try:
cls.mouse_pos = (int(input_key.split(";")[1]), int(input_key.split(";")[2].rstrip("mM")))
except:
pass
else:
if input_key.startswith("\033[<35;"): #* Detected mouse move in mouse direct mode
cls.mouse_move.set()
cls.new.set()
elif input_key.startswith("\033[<64;"): #* Detected mouse scroll up
clean_key = "mouse_scroll_up"
elif input_key.startswith("\033[<65;"): #* Detected mouse scroll down
clean_key = "mouse_scroll_down"
elif input_key.startswith("\033[<0;") and input_key.endswith("m"): #* Detected mouse click release
if Menu.active:
clean_key = "mouse_click"
else:
for key_name, positions in cls.mouse.items(): #* Check if mouse position is clickable
if list(cls.mouse_pos) in positions:
clean_key = key_name
break
else:
clean_key = "mouse_click"
elif input_key == "\\": clean_key = "\\" #* Clean up "\" to not return escaped
else:
for code in cls.escape.keys(): #* Go trough dict of escape codes to get the cleaned key name
if input_key.lstrip("\033").startswith(code):
clean_key = cls.escape[code]
break
else: #* If not found in escape dict and length of key is 1, assume regular character
if len(input_key) == 1:
clean_key = input_key
if clean_key:
cls.list.append(clean_key) #* Store up to 10 keys in input queue for later processing
if len(cls.list) > 10: del cls.list[0]
clean_key = ""
cls.new.set() #* Set threading event to interrupt main thread sleep
input_key = ""
except Exception as e:
errlog.exception(f'Input thread failed with exception: {e}')
cls.idle.set()
cls.list.clear()
clean_quit(1, thread=True)
class Draw:
'''Holds the draw buffer and manages IO blocking queue
* .buffer([+]name[!], *args, append=False, now=False, z=100) : Add *args to buffer
* - Adding "+" prefix to name sets append to True and appends to name's current string
* - Adding "!" suffix to name sets now to True and print name's current string
* .out(clear=False) : Print all strings in buffer, clear=True clear all buffers after
* .now(*args) : Prints all arguments as a string
* .clear(*names) : Clear named buffers, all if no argument
* .last_screen() : Prints all saved buffers
'''
strings: Dict[str, str] = {}
z_order: Dict[str, int] = {}
saved: Dict[str, str] = {}
save: Dict[str, bool] = {}
once: Dict[str, bool] = {}
idle = threading.Event()
idle.set()
@classmethod
def now(cls, *args):
'''Wait for input reader and self to be idle then print to screen'''
Key.idle.wait()
cls.idle.wait()
cls.idle.clear()
try:
print(*args, sep="", end="", flush=True)
except BlockingIOError:
pass
Key.idle.wait()
print(*args, sep="", end="", flush=True)
cls.idle.set()
@classmethod
def buffer(cls, name: str, *args: str, append: bool = False, now: bool = False, z: int = 100, only_save: bool = False, no_save: bool = False, once: bool = False):
string: str = ""
if name.startswith("+"):
name = name.lstrip("+")
append = True
if name.endswith("!"):
name = name.rstrip("!")
now = True
cls.save[name] = not no_save
cls.once[name] = once
if not name in cls.z_order or z != 100: cls.z_order[name] = z
if args: string = "".join(args)
if only_save:
if name not in cls.saved or not append: cls.saved[name] = ""
cls.saved[name] += string
else:
if name not in cls.strings or not append: cls.strings[name] = ""
cls.strings[name] += string
if now:
cls.out(name)
@classmethod
def out(cls, *names: str, clear = False):
out: str = ""
if not cls.strings: return
if names:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True):
if name in names and name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if clear or cls.once[name]:
cls.clear(name)
cls.now(out)
else:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True):
if name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = out
if cls.once[name] and not clear:
cls.clear(name)
if clear:
cls.clear()
cls.now(out)
@classmethod
def saved_buffer(cls) -> str:
out: str = ""
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True):
if name in cls.saved:
out += cls.saved[name]
return out
@classmethod
def clear(cls, *names, saved: bool = False):
if names:
for name in names:
if name in cls.strings:
del cls.strings[name]
if name in cls.save:
del cls.save[name]
if name in cls.once:
del cls.once[name]
if saved:
if name in cls.saved:
del cls.saved[name]
if name in cls.z_order:
del cls.z_order[name]
else:
cls.strings = {}
cls.save = {}
cls.once = {}
if saved:
cls.saved = {}
cls.z_order = {}
class Color:
'''Holds representations for a 24-bit color value
__init__(color, depth="fg", default=False)
-- color accepts 6 digit hexadecimal: string "#RRGGBB", 2 digit hexadecimal: string "#FF" or decimal RGB "255 255 255" as a string.
-- depth accepts "fg" or "bg"
__call__(*args) joins str arguments to a string and apply color
__str__ returns escape sequence to set color
__iter__ returns iteration over red, green and blue in integer values of 0-255.
* Values: .hexa: str | .dec: Tuple[int, int, int] | .red: int | .green: int | .blue: int | .depth: str | .escape: str
'''
hexa: str; dec: Tuple[int, int, int]; red: int; green: int; blue: int; depth: str; escape: str; default: bool
def __init__(self, color: str, depth: str = "fg", default: bool = False):
self.depth = depth
self.default = default
try:
if not color:
self.dec = (-1, -1, -1)
self.hexa = ""
self.red = self.green = self.blue = -1
self.escape = "\033[49m" if depth == "bg" and default else ""
return
elif color.startswith("#"):
self.hexa = color
if len(self.hexa) == 3:
self.hexa += self.hexa[1:3] + self.hexa[1:3]
c = int(self.hexa[1:3], base=16)
self.dec = (c, c, c)
elif len(self.hexa) == 7:
self.dec = (int(self.hexa[1:3], base=16), int(self.hexa[3:5], base=16), int(self.hexa[5:7], base=16))
else:
raise ValueError(f'Incorrectly formatted hexadecimal rgb string: {self.hexa}')
else:
c_t = tuple(map(int, color.split(" ")))
if len(c_t) == 3:
self.dec = c_t #type: ignore
else:
raise ValueError(f'RGB dec should be "0-255 0-255 0-255"')
ct = self.dec[0] + self.dec[1] + self.dec[2]
if ct > 255*3 or ct < 0:
raise ValueError(f'RGB values out of range: {color}')
except Exception as e:
errlog.exception(str(e))
self.escape = ""
return
if self.dec and not self.hexa: self.hexa = f'{hex(self.dec[0]).lstrip("0x").zfill(2)}{hex(self.dec[1]).lstrip("0x").zfill(2)}{hex(self.dec[2]).lstrip("0x").zfill(2)}'
if self.dec and self.hexa:
self.red, self.green, self.blue = self.dec
self.escape = f'\033[{38 if self.depth == "fg" else 48};2;{";".join(str(c) for c in self.dec)}m'
def __str__(self) -> str:
return self.escape
def __repr__(self) -> str:
return repr(self.escape)
def __iter__(self) -> Iterable:
for c in self.dec: yield c
def __call__(self, *args: str) -> str:
if len(args) < 1: return ""
return f'{self.escape}{"".join(args)}{getattr(Term, self.depth)}'
@staticmethod
def escape_color(hexa: str = "", r: int = 0, g: int = 0, b: int = 0, depth: str = "fg") -> str:
"""Returns escape sequence to set color
* accepts either 6 digit hexadecimal hexa="#RRGGBB", 2 digit hexadecimal: hexa="#FF"
* or decimal RGB: r=0-255, g=0-255, b=0-255
* depth="fg" or "bg"
"""
dint: int = 38 if depth == "fg" else 48
color: str = ""
if hexa:
try:
if len(hexa) == 3:
c = int(hexa[1:], base=16)
color = f'\033[{dint};2;{c};{c};{c}m'
elif len(hexa) == 7:
color = f'\033[{dint};2;{int(hexa[1:3], base=16)};{int(hexa[3:5], base=16)};{int(hexa[5:7], base=16)}m'
except ValueError as e:
errlog.exception(f'{e}')
else:
color = f'\033[{dint};2;{r};{g};{b}m'
return color
@classmethod
def fg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="fg")
else: return cls.escape_color(hexa=args[0], depth="fg")
@classmethod
def bg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="bg")
else: return cls.escape_color(hexa=args[0], depth="bg")
class Colors:
'''Standard colors for menus and dialogs'''
default = Color("#cc")
white = Color("#ff")
red = Color("#bf3636")
green = Color("#68bf36")
blue = Color("#0fd7ff")
yellow = Color("#db8b00")
black_bg = Color("#00", depth="bg")
null = Color("")
class Theme:
'''__init__ accepts a dict containing { "color_element" : "color" }'''
themes: Dict[str, str] = {}
cached: Dict[str, Dict[str, str]] = { "Default" : DEFAULT_THEME }
current: str = ""
main_bg = main_fg = title = hi_fg = selected_bg = selected_fg = inactive_fg = proc_misc = cpu_box = mem_box = net_box = proc_box = div_line = temp_start = temp_mid = temp_end = cpu_start = cpu_mid = cpu_end = free_start = free_mid = free_end = cached_start = cached_mid = cached_end = available_start = available_mid = available_end = used_start = used_mid = used_end = download_start = download_mid = download_end = upload_start = upload_mid = upload_end = graph_text = meter_bg = process_start = process_mid = process_end = NotImplemented
gradient: Dict[str, List[str]] = {
"temp" : [],
"cpu" : [],
"free" : [],
"cached" : [],
"available" : [],
"used" : [],
"download" : [],
"upload" : [],
"proc" : [],
"proc_color" : [],
"process" : [],
}
def __init__(self, theme: str):
self.refresh()
self._load_theme(theme)
def __call__(self, theme: str):
for k in self.gradient.keys(): self.gradient[k] = []
self._load_theme(theme)
def _load_theme(self, theme: str):
tdict: Dict[str, str]
if theme in self.cached:
tdict = self.cached[theme]
elif theme in self.themes:
tdict = self._load_file(self.themes[theme])
self.cached[theme] = tdict
else:
errlog.warning(f'No theme named "{theme}" found!')
theme = "Default"
CONFIG.color_theme = theme
tdict = DEFAULT_THEME
self.current = theme
#if CONFIG.color_theme != theme: CONFIG.color_theme = theme
if not "graph_text" in tdict and "inactive_fg" in tdict:
tdict["graph_text"] = tdict["inactive_fg"]
if not "meter_bg" in tdict and "inactive_fg" in tdict:
tdict["meter_bg"] = tdict["inactive_fg"]
if not "process_start" in tdict and "cpu_start" in tdict:
tdict["process_start"] = tdict["cpu_start"]
tdict["process_mid"] = tdict.get("cpu_mid", "")
tdict["process_end"] = tdict.get("cpu_end", "")
#* Get key names from DEFAULT_THEME dict to not leave any color unset if missing from theme dict
for item, value in DEFAULT_THEME.items():
default = False if item not in ["main_fg", "main_bg"] else True
depth = "fg" if item not in ["main_bg", "selected_bg"] else "bg"
if item in tdict:
setattr(self, item, Color(tdict[item], depth=depth, default=default))
else:
setattr(self, item, Color(value, depth=depth, default=default))
#* Create color gradients from one, two or three colors, 101 values indexed 0-100
self.proc_start, self.proc_mid, self.proc_end = self.main_fg, Colors.null, self.inactive_fg
self.proc_color_start, self.proc_color_mid, self.proc_color_end = self.inactive_fg, Colors.null, self.process_start
rgb: Dict[str, Tuple[int, int, int]]
colors: List[List[int]] = []
for name in self.gradient:
rgb = { "start" : getattr(self, f'{name}_start').dec, "mid" : getattr(self, f'{name}_mid').dec, "end" : getattr(self, f'{name}_end').dec }
colors = [ list(getattr(self, f'{name}_start')) ]
if rgb["end"][0] >= 0:
r = 50 if rgb["mid"][0] >= 0 else 100
for first, second in ["start", "mid" if r == 50 else "end"], ["mid", "end"]:
for i in range(r):
colors += [[rgb[first][n] + i * (rgb[second][n] - rgb[first][n]) // r for n in range(3)]]
if r == 100:
break
self.gradient[name] += [ Color.fg(*color) for color in colors ]
else:
c = Color.fg(*rgb["start"])
for _ in range(101):
self.gradient[name] += [c]
#* Set terminal colors
Term.fg = self.main_fg
Term.bg = self.main_bg if CONFIG.theme_background else "\033[49m"
Draw.now(self.main_fg, self.main_bg)
@classmethod
def refresh(cls):
'''Sets themes dict with names and paths to all found themes'''
cls.themes = { "Default" : "Default" }
try:
for d in (THEME_DIR, USER_THEME_DIR):
if not d: continue
for f in os.listdir(d):
if f.endswith(".theme"):
cls.themes[f'{"" if d == THEME_DIR else "+"}{f[:-6]}'] = f'{d}/{f}'
except Exception as e:
errlog.exception(str(e))
@staticmethod
def _load_file(path: str) -> Dict[str, str]:
'''Load a bashtop formatted theme file and return a dict'''
new_theme: Dict[str, str] = {}
try:
with open(path) as f:
for line in f:
if not line.startswith("theme["): continue
key = line[6:line.find("]")]
s = line.find('"')
value = line[s + 1:line.find('"', s + 1)]
new_theme[key] = value
except Exception as e:
errlog.exception(str(e))
return new_theme
class Banner:
'''Holds the bpytop banner, .draw(line, [col=0], [center=False], [now=False])'''
out: List[str] = []
c_color: str = ""
length: int = 0
if not out:
for num, (color, color2, line) in enumerate(BANNER_SRC):
if len(line) > length: length = len(line)
out_var = ""
line_color = Color.fg(color)
line_color2 = Color.fg(color2)
line_dark = Color.fg(f'#{80 - num * 6}')
for n, letter in enumerate(line):
if letter == "█" and c_color != line_color:
if n > 5 and n < 25: c_color = line_color2
else: c_color = line_color
out_var += c_color
elif letter == " ":
letter = f'{Mv.r(1)}'
c_color = ""
elif letter != "█" and c_color != line_dark:
c_color = line_dark
out_var += line_dark
out_var += letter
out.append(out_var)
@classmethod
def draw(cls, line: int, col: int = 0, center: bool = False, now: bool = False):
out: str = ""
if center: col = Term.width // 2 - cls.length // 2
for n, o in enumerate(cls.out):
out += f'{Mv.to(line + n, col)}{o}'
out += f'{Term.fg}'
if now: Draw.out(out)
else: return out
class Symbol:
h_line: str = "─"
v_line: str = "│"
left_up: str = "┌"
right_up: str = "┐"
left_down: str = "└"
right_down: str = "┘"
title_left: str = "┤"
title_right: str = "├"
div_up: str = "┬"
div_down: str = "┴"
graph_up: Dict[float, str] = {
0.0 : " ", 0.1 : "⢀", 0.2 : "⢠", 0.3 : "⢰", 0.4 : "⢸",
1.0 : "⡀", 1.1 : "⣀", 1.2 : "⣠", 1.3 : "⣰", 1.4 : "⣸",
2.0 : "⡄", 2.1 : "⣄", 2.2 : "⣤", 2.3 : "⣴", 2.4 : "⣼",
3.0 : "⡆", 3.1 : "⣆", 3.2 : "⣦", 3.3 : "⣶", 3.4 : "⣾",
4.0 : "⡇", 4.1 : "⣇", 4.2 : "⣧", 4.3 : "⣷", 4.4 : "⣿"
}
graph_up_small = graph_up.copy()
graph_up_small[0.0] = "\033[1C"
graph_down: Dict[float, str] = {
0.0 : " ", 0.1 : "⠈", 0.2 : "⠘", 0.3 : "⠸", 0.4 : "⢸",
1.0 : "⠁", 1.1 : "⠉", 1.2 : "⠙", 1.3 : "⠹", 1.4 : "⢹",
2.0 : "⠃", 2.1 : "⠋", 2.2 : "⠛", 2.3 : "⠻", 2.4 : "⢻",
3.0 : "⠇", 3.1 : "⠏", 3.2 : "⠟", 3.3 : "⠿", 3.4 : "⢿",
4.0 : "⡇", 4.1 : "⡏", 4.2 : "⡟", 4.3 : "⡿", 4.4 : "⣿"
}
graph_down_small = graph_down.copy()
graph_down_small[0.0] = "\033[1C"
meter: str = "■"
up: str = "↑"
down: str = "↓"
left: str = "←"
right: str = "→"
enter: str = "↲"
ok: str = f'{Color.fg("#30ff50")}√{Color.fg("#cc")}'
fail: str = f'{Color.fg("#ff3050")}!{Color.fg("#cc")}'
class Graph:
'''Class for creating and adding to graphs
* __str__ : returns graph as a string
* add(value: int) : adds a value to graph and returns it as a string
* __call__ : same as add
'''
out: str
width: int
height: int
graphs: Dict[bool, List[str]]
colors: List[str]
invert: bool
max_value: int
color_max_value: int
offset: int
current: bool
last: int
symbol: Dict[float, str]
def __init__(self, width: int, height: int, color: Union[List[str], Color, None], data: List[int], invert: bool = False, max_value: int = 0, offset: int = 0, color_max_value: Union[int, None] = None):
self.graphs: Dict[bool, List[str]] = {False : [], True : []}
self.current: bool = True
self.width = width
self.height = height
self.invert = invert
self.offset = offset
if not data: data = [0]
if max_value:
self.max_value = max_value
data = [ min(100, (v + offset) * 100 // (max_value + offset)) for v in data ] #* Convert values to percentage values of max_value with max_value as ceiling
else:
self.max_value = 0
if color_max_value:
self.color_max_value = color_max_value
else:
self.color_max_value = self.max_value
if self.color_max_value and self.max_value:
color_scale = int(100.0 * self.max_value / self.color_max_value)
else:
color_scale = 100
self.colors: List[str] = []
if isinstance(color, list) and height > 1:
for i in range(1, height + 1): self.colors.insert(0, color[min(100, i * color_scale // height)]) #* Calculate colors of graph
if invert: self.colors.reverse()
elif isinstance(color, Color) and height > 1:
self.colors = [ f'{color}' for _ in range(height) ]
else:
if isinstance(color, list): self.colors = color
elif isinstance(color, Color): self.colors = [ f'{color}' for _ in range(101) ]
if self.height == 1:
self.symbol = Symbol.graph_down_small if invert else Symbol.graph_up_small
else:
self.symbol = Symbol.graph_down if invert else Symbol.graph_up
value_width: int = ceil(len(data) / 2)
filler: str = ""
if value_width > width: #* If the size of given data set is bigger then width of graph, shrink data set
data = data[-(width*2):]
value_width = ceil(len(data) / 2)
elif value_width < width: #* If the size of given data set is smaller then width of graph, fill graph with whitespace
filler = self.symbol[0.0] * (width - value_width)
if len(data) % 2: data.insert(0, 0)
for _ in range(height):
for b in [True, False]:
self.graphs[b].append(filler)
self._create(data, new=True)
def _create(self, data: List[int], new: bool = False):
h_high: int
h_low: int
value: Dict[str, int] = { "left" : 0, "right" : 0 }
val: int
side: str
#* Create the graph
for h in range(self.height):
h_high = round(100 * (self.height - h) / self.height) if self.height > 1 else 100
h_low = round(100 * (self.height - (h + 1)) / self.height) if self.height > 1 else 0
for v in range(len(data)):
if new: self.current = bool(v % 2) #* Switch between True and False graphs
if new and v == 0: self.last = 0
for val, side in [self.last, "left"], [data[v], "right"]: # type: ignore
if val >= h_high:
value[side] = 4
elif val <= h_low:
value[side] = 0
else:
if self.height == 1: value[side] = round(val * 4 / 100 + 0.5)
else: value[side] = round((val - h_low) * 4 / (h_high - h_low) + 0.1)
if new: self.last = data[v]
self.graphs[self.current][h] += self.symbol[float(value["left"] + value["right"] / 10)]
if data: self.last = data[-1]
self.out = ""
if self.height == 1:
self.out += f'{"" if not self.colors else self.colors[self.last]}{self.graphs[self.current][0]}'
elif self.height > 1:
for h in range(self.height):
if h > 0: self.out += f'{Mv.d(1)}{Mv.l(self.width)}'
self.out += f'{"" if not self.colors else self.colors[h]}{self.graphs[self.current][h if not self.invert else (self.height - 1) - h]}'
if self.colors: self.out += f'{Term.fg}'
def __call__(self, value: Union[int, None] = None) -> str:
if not isinstance(value, int): return self.out
self.current = not self.current
if self.height == 1:
if self.graphs[self.current][0].startswith(self.symbol[0.0]):
self.graphs[self.current][0] = self.graphs[self.current][0].replace(self.symbol[0.0], "", 1)
else:
self.graphs[self.current][0] = self.graphs[self.current][0][1:]
else:
for n in range(self.height):
self.graphs[self.current][n] = self.graphs[self.current][n][1:]
if self.max_value: value = (value + self.offset) * 100 // (self.max_value + self.offset) if value < self.max_value else 100
self._create([value])
return self.out
def add(self, value: Union[int, None] = None) -> str:
return self.__call__(value)
def __str__(self):
return self.out
def __repr__(self):
return repr(self.out)
class Graphs:
'''Holds all graphs and lists of graphs for dynamically created graphs'''
cpu: Dict[str, Graph] = {}
cores: List[Graph] = [NotImplemented] * THREADS
temps: List[Graph] = [NotImplemented] * (THREADS + 1)
net: Dict[str, Graph] = {}
detailed_cpu: Graph = NotImplemented
detailed_mem: Graph = NotImplemented
pid_cpu: Dict[int, Graph] = {}
class Meter:
'''Creates a percentage meter
__init__(value, width, theme, gradient_name) to create new meter
__call__(value) to set value and return meter as a string
__str__ returns last set meter as a string
'''
out: str
color_gradient: List[str]
color_inactive: Color
gradient_name: str
width: int
invert: bool
saved: Dict[int, str]
def __init__(self, value: int, width: int, gradient_name: str, invert: bool = False):
self.gradient_name = gradient_name
self.color_gradient = THEME.gradient[gradient_name]
self.color_inactive = THEME.meter_bg
self.width = width
self.saved = {}
self.invert = invert
self.out = self._create(value)
def __call__(self, value: Union[int, None]) -> str:
if not isinstance(value, int): return self.out
if value > 100: value = 100
elif value < 0: value = 100
if value in self.saved:
self.out = self.saved[value]
else:
self.out = self._create(value)
return self.out
def __str__(self) -> str:
return self.out
def __repr__(self):
return repr(self.out)
def _create(self, value: int) -> str:
if value > 100: value = 100
elif value < 0: value = 100
out: str = ""
for i in range(1, self.width + 1):
if value >= round(i * 100 / self.width):
out += f'{self.color_gradient[round(i * 100 / self.width) if not self.invert else round(100 - (i * 100 / self.width))]}{Symbol.meter}'
else:
out += self.color_inactive(Symbol.meter * (self.width + 1 - i))
break
else:
out += f'{Term.fg}'
if not value in self.saved:
self.saved[value] = out
return out
class Meters:
cpu: Meter
battery: Meter
mem: Dict[str, Union[Meter, Graph]] = {}
swap: Dict[str, Union[Meter, Graph]] = {}
disks_used: Dict[str, Meter] = {}
disks_free: Dict[str, Meter] = {}
class Box:
'''Box class with all needed attributes for create_box() function'''
name: str
height_p: int
width_p: int
x: int
y: int
width: int
height: int
proc_mode: bool = True if (CONFIG.view_mode == "proc" and not ARG_MODE) or ARG_MODE == "proc" else False
stat_mode: bool = True if (CONFIG.view_mode == "stat" and not ARG_MODE) or ARG_MODE == "stat" else False
out: str
bg: str
_b_cpu_h: int
_b_mem_h: int
redraw_all: bool
buffers: List[str] = []
clock_on: bool = False
clock: str = ""
resized: bool = False
@classmethod
def calc_sizes(cls):
'''Calculate sizes of boxes'''
for sub in cls.__subclasses__():
sub._calc_size() # type: ignore
sub.resized = True # type: ignore
@classmethod
def draw_update_ms(cls, now: bool = True):
update_string: str = f'{CONFIG.update_ms}ms'
xpos: int = CpuBox.x + CpuBox.width - len(update_string) - 15
if not "+" in Key.mouse:
Key.mouse["+"] = [[xpos + 7 + i, CpuBox.y] for i in range(3)]
Key.mouse["-"] = [[CpuBox.x + CpuBox.width - 4 + i, CpuBox.y] for i in range(3)]
Draw.buffer("update_ms!" if now and not Menu.active else "update_ms",
f'{Mv.to(CpuBox.y, xpos)}{THEME.cpu_box(Symbol.h_line * 7, Symbol.title_left)}{Fx.b}{THEME.hi_fg("+")} ',
f'{THEME.title(update_string)} {THEME.hi_fg("-")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}', only_save=Menu.active, once=True)
if now and not Menu.active:
Draw.clear("update_ms")
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
CpuBox.redraw = True
CpuBox._draw_fg()
Draw.out("cpu")
@classmethod
def draw_clock(cls, force: bool = False):
if force: pass
elif not cls.clock_on or Term.resized or strftime(CONFIG.draw_clock) == cls.clock: return
cls.clock = strftime(CONFIG.draw_clock)
clock_len = len(cls.clock[:(CpuBox.width-58)])
now: bool = False if Menu.active else not force
Draw.buffer("clock", (f'{Mv.to(CpuBox.y, ((CpuBox.width-2)//2)-(clock_len//2)-3)}{Fx.ub}{THEME.cpu_box}{Symbol.h_line * 4}'
f'{Symbol.title_left}{Fx.b}{THEME.title(cls.clock[:clock_len])}{Fx.ub}{THEME.cpu_box}{Symbol.title_right}{Symbol.h_line * 4}{Term.fg}'),
z=1, now=now, once=not force, only_save=Menu.active)
@classmethod
def draw_bg(cls, now: bool = True):
'''Draw all boxes outlines and titles'''
Draw.buffer("bg", "".join(sub._draw_bg() for sub in cls.__subclasses__()), now=now, z=1000, only_save=Menu.active, once=True) # type: ignore
cls.draw_update_ms(now=now)
if CONFIG.draw_clock: cls.draw_clock(force=True)
class SubBox:
box_x: int = 0
box_y: int = 0
box_width: int = 0
box_height: int = 0
box_columns: int = 0
column_size: int = 0
class CpuBox(Box, SubBox):
name = "cpu"
x = 1
y = 1
height_p = 32
width_p = 100
resized: bool = True
redraw: bool = False
buffer: str = "cpu"
battery_percent: int = 1000
battery_secs: int = 0
old_battery_pos = 0
clock_block: bool = True
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
cpu = CpuCollector
height_p: int
if cls.proc_mode: height_p = 20
else: height_p = cls.height_p
cls.width = round(Term.width * cls.width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height < 8: cls.height = 8
Box._b_cpu_h = cls.height
#THREADS = 64
cls.box_columns = ceil((THREADS + 1) / (cls.height - 5))
if cls.box_columns * (20 + 13 if cpu.got_sensors else 21) < cls.width - (cls.width // 3):
cls.column_size = 2
cls.box_width = (20 + 13 if cpu.got_sensors else 21) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (15 + 6 if cpu.got_sensors else 15) < cls.width - (cls.width // 3):
cls.column_size = 1
cls.box_width = (15 + 6 if cpu.got_sensors else 15) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (8 + 6 if cpu.got_sensors else 8) < cls.width - (cls.width // 3):
cls.column_size = 0
else:
cls.box_columns = (cls.width - cls.width // 3) // (8 + 6 if cpu.got_sensors else 8); cls.column_size = 0
if cls.column_size == 0: cls.box_width = (8 + 6 if cpu.got_sensors else 8) * cls.box_columns + 1
cls.box_height = ceil(THREADS / cls.box_columns) + 4
if cls.box_height > cls.height - 2: cls.box_height = cls.height - 2
cls.box_x = (cls.width - 1) - cls.box_width
cls.box_y = cls.y + ceil((cls.height - 2) / 2) - ceil(cls.box_height / 2) + 1
@classmethod
def _draw_bg(cls) -> str:
if not "M" in Key.mouse:
Key.mouse["M"] = [[cls.x + 10 + i, cls.y] for i in range(6)]
return (f'{create_box(box=cls, line_color=THEME.cpu_box)}'
f'{Mv.to(cls.y, cls.x + 10)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("M")}{THEME.title("enu")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
f'{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title=CPU_NAME[:cls.box_width - 14] if not CONFIG.custom_cpu_name else CONFIG.custom_cpu_name[:cls.box_width - 14])}')
@classmethod
def _draw_fg(cls):
cpu = CpuCollector
if cpu.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
lavg: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
hh: int = ceil(h / 2)
if cls.resized or cls.redraw:
if not "m" in Key.mouse:
Key.mouse["m"] = [[cls.x + 16 + i, cls.y] for i in range(12)]
out_misc += f'{Mv.to(cls.y, cls.x + 16)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("m")}{THEME.title}ode:{ARG_MODE or CONFIG.view_mode}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
Graphs.cpu["up"] = Graph(w - bw - 3, hh, THEME.gradient["cpu"], cpu.cpu_usage[0])
Graphs.cpu["down"] = Graph(w - bw - 3, h - hh, THEME.gradient["cpu"], cpu.cpu_usage[0], invert=True)
Meters.cpu = Meter(cpu.cpu_usage[0][-1], bw - (21 if cpu.got_sensors else 9), "cpu")
if cls.column_size > 0:
for n in range(THREADS):
Graphs.cores[n] = Graph(5 * cls.column_size, 1, None, cpu.cpu_usage[n + 1])
if cpu.got_sensors:
Graphs.temps[0] = Graph(5, 1, None, cpu.cpu_temp[0], max_value=cpu.cpu_temp_crit, offset=-23)
if cls.column_size > 1:
for n in range(1, THREADS + 1):
Graphs.temps[n] = Graph(5, 1, None, cpu.cpu_temp[n], max_value=cpu.cpu_temp_crit, offset=-23)
Draw.buffer("cpu_misc", out_misc, only_save=True)
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery() and (ceil(psutil.sensors_battery().percent) != cls.battery_percent or psutil.sensors_battery().secsleft != cls.battery_secs or cls.resized or cls.redraw):
cls.battery_percent = ceil(psutil.sensors_battery().percent)
if psutil.sensors_battery().secsleft > 0:
cls.battery_secs = psutil.sensors_battery().secsleft
battery_time = f' {cls.battery_secs // 3600:02}:{(cls.battery_secs % 3600) // 60:02}'
else:
cls.battery_secs = 0
battery_time = ""
if not hasattr(Meters, "battery") or cls.resized:
Meters.battery = Meter(cls.battery_percent, 10, "cpu", invert=True)
if psutil.sensors_battery().power_plugged:
battery_symbol: str = "▲" if cls.battery_percent < 100 else "■"
else:
battery_symbol = "▼"
battery_pos = cls.width - len(f'{CONFIG.update_ms}') - 17 - (11 if cls.width >= 100 else 0) - len(battery_time) - len(f'{cls.battery_percent}')
if battery_pos != cls.old_battery_pos and cls.old_battery_pos > 0 and not cls.resized:
out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(15 if cls.width >= 100 else 5))}'
cls.old_battery_pos = battery_pos
out += (f'{Mv.to(y-1, battery_pos)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.title}BAT{battery_symbol} {cls.battery_percent}%'+
("" if cls.width < 100 else f' {Fx.ub}{Meters.battery(cls.battery_percent)}{Fx.b}') +
f'{THEME.title}{battery_time}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}')
cx = cy = cc = 0
ccw = (bw + 1) // cls.box_columns
if cpu.cpu_freq:
freq: str = f'{cpu.cpu_freq} Mhz' if cpu.cpu_freq < 1000 else f'{float(cpu.cpu_freq / 1000):.1f} GHz'
out += f'{Mv.to(by - 1, bx + bw - 9)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title(freq)}{Fx.ub}{THEME.div_line(Symbol.title_right)}'
out += (f'{Mv.to(y, x)}{Graphs.cpu["up"](None if cls.resized else cpu.cpu_usage[0][-1])}{Mv.to(y + hh, x)}{Graphs.cpu["down"](None if cls.resized else cpu.cpu_usage[0][-1])}'
f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b}{"CPU "}{Fx.ub}{Meters.cpu(cpu.cpu_usage[0][-1])}'
f'{THEME.gradient["cpu"][cpu.cpu_usage[0][-1]]}{cpu.cpu_usage[0][-1]:>4}{THEME.main_fg}%')
if cpu.got_sensors:
out += (f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[0][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[0](None if cls.resized else cpu.cpu_temp[0][-1])}'
f'{cpu.cpu_temp[0][-1]:>4}{THEME.main_fg}°C')
cy += 1
for n in range(1, THREADS + 1):
out += f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b + "C" + Fx.ub if THREADS < 100 else ""}{str(n):<{2 if cls.column_size == 0 else 3}}'
if cls.column_size > 0:
out += f'{THEME.inactive_fg}{"⡀" * (5 * cls.column_size)}{Mv.l(5 * cls.column_size)}{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}{Graphs.cores[n-1](None if cls.resized else cpu.cpu_usage[n][-1])}'
else:
out += f'{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}'
out += f'{cpu.cpu_usage[n][-1]:>{3 if cls.column_size < 2 else 4}}{THEME.main_fg}%'
if cpu.got_sensors:
if cls.column_size > 1:
out += f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][100 if cpu.cpu_temp[n][-1] >= cpu.cpu_temp_crit else (cpu.cpu_temp[n][-1] * 100 // cpu.cpu_temp_crit)]}{Graphs.temps[n](None if cls.resized else cpu.cpu_temp[n][-1])}'
else:
out += f'{THEME.gradient["temp"][100 if cpu.cpu_temp[n][-1] >= cpu.cpu_temp_crit else (cpu.cpu_temp[n][-1] * 100 // cpu.cpu_temp_crit)]}'
out += f'{cpu.cpu_temp[n][-1]:>4}{THEME.main_fg}°C'
out += f'{THEME.div_line(Symbol.v_line)}'
cy += 1
if cy == bh:
cc += 1; cy = 1; cx = ccw * cc
if cc == cls.box_columns: break
if cy < bh - 1: cy = bh - 1
if cy < bh and cc < cls.box_columns:
if cls.column_size == 2 and cpu.got_sensors:
lavg = f' Load AVG: {" ".join(str(l) for l in cpu.load_avg):^19.19}'
elif cls.column_size == 2 or (cls.column_size == 1 and cpu.got_sensors):
lavg = f'LAV: {" ".join(str(l) for l in cpu.load_avg):^14.14}'
elif cls.column_size == 1 or (cls.column_size == 0 and cpu.got_sensors):
lavg = f'L {" ".join(str(round(l, 1)) for l in cpu.load_avg):^11.11}'
else:
lavg = f'{" ".join(str(round(l, 1)) for l in cpu.load_avg[:2]):^7.7}'
out += f'{Mv.to(by + cy, bx + cx)}{THEME.main_fg}{lavg}{THEME.div_line(Symbol.v_line)}'
out += f'{Mv.to(y + h - 1, x + 1)}{THEME.graph_text}up {cpu.uptime}'
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = cls.clock_block = False
class MemBox(Box):
name = "mem"
height_p = 38
width_p = 45
x = 1
y = 1
mem_meter: int = 0
mem_size: int = 0
disk_meter: int = 0
divider: int = 0
mem_width: int = 0
disks_width: int = 0
graph_height: int
resized: bool = True
redraw: bool = False
buffer: str = "mem"
swap_on: bool = CONFIG.show_swap
Box.buffers.append(buffer)
mem_names: List[str] = ["used", "available", "cached", "free"]
swap_names: List[str] = ["used", "free"]
@classmethod
def _calc_size(cls):
width_p: int; height_p: int
if cls.stat_mode:
width_p, height_p = 100, cls.height_p
else:
width_p, height_p = cls.width_p, cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100) + 1
Box._b_mem_h = cls.height
cls.y = Box._b_cpu_h + 1
if CONFIG.show_disks:
cls.mem_width = ceil((cls.width - 3) / 2)
cls.disks_width = cls.width - cls.mem_width - 3
if cls.mem_width + cls.disks_width < cls.width - 2: cls.mem_width += 1
cls.divider = cls.x + cls.mem_width
else:
cls.mem_width = cls.width - 1
item_height: int = 6 if cls.swap_on and not CONFIG.swap_disk else 4
if cls.height - (3 if cls.swap_on and not CONFIG.swap_disk else 2) > 2 * item_height: cls.mem_size = 3
elif cls.mem_width > 25: cls.mem_size = 2
else: cls.mem_size = 1
cls.mem_meter = cls.width - (cls.disks_width if CONFIG.show_disks else 0) - (9 if cls.mem_size > 2 else 20)
if cls.mem_size == 1: cls.mem_meter += 6
if cls.mem_meter < 1: cls.mem_meter = 0
if CONFIG.mem_graphs:
cls.graph_height = round(((cls.height - (2 if cls.swap_on and not CONFIG.swap_disk else 1)) - (2 if cls.mem_size == 3 else 1) * item_height) / item_height)
if cls.graph_height == 0: cls.graph_height = 1
if cls.graph_height > 1: cls.mem_meter += 6
else:
cls.graph_height = 0
if CONFIG.show_disks:
cls.disk_meter = cls.width - cls.mem_width - 23
if cls.disks_width < 25:
cls.disk_meter += 10
if cls.disk_meter < 1: cls.disk_meter = 0
@classmethod
def _draw_bg(cls) -> str:
if cls.proc_mode: return ""
out: str = ""
out += f'{create_box(box=cls, line_color=THEME.mem_box)}'
if CONFIG.show_disks:
out += (f'{Mv.to(cls.y, cls.divider + 2)}{THEME.mem_box(Symbol.title_left)}{Fx.b}{THEME.title("disks")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}'
f'{Mv.to(cls.y, cls.divider)}{THEME.mem_box(Symbol.div_up)}'
f'{Mv.to(cls.y + cls.height - 1, cls.divider)}{THEME.mem_box(Symbol.div_down)}{THEME.div_line}'
f'{"".join(f"{Mv.to(cls.y + i, cls.divider)}{Symbol.v_line}" for i in range(1, cls.height - 1))}')
return out
@classmethod
def _draw_fg(cls):
if cls.proc_mode: return
mem = MemCollector
if mem.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
gbg: str = ""
gmv: str = ""
gli: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
if cls.resized or cls.redraw:
cls._calc_size()
out_misc += cls._draw_bg()
Meters.mem = {}
Meters.swap = {}
Meters.disks_used = {}
Meters.disks_free = {}
if cls.mem_meter > 0:
for name in cls.mem_names:
if CONFIG.mem_graphs:
Meters.mem[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.vlist[name])
else:
Meters.mem[name] = Meter(mem.percent[name], cls.mem_meter, name)
if cls.swap_on:
for name in cls.swap_names:
if CONFIG.mem_graphs and not CONFIG.swap_disk:
Meters.swap[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.swap_vlist[name])
elif CONFIG.swap_disk and CONFIG.show_disks:
Meters.disks_used["__swap"] = Meter(mem.swap_percent["used"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free["__swap"] = Meter(mem.swap_percent["free"], cls.disk_meter, "free")
break
else:
Meters.swap[name] = Meter(mem.swap_percent[name], cls.mem_meter, name)
if cls.disk_meter > 0:
for n, name in enumerate(mem.disks.keys()):
if n * 2 > h: break
Meters.disks_used[name] = Meter(mem.disks[name]["used_percent"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free[name] = Meter(mem.disks[name]["free_percent"], cls.disk_meter, "free")
if not "g" in Key.mouse:
Key.mouse["g"] = [[x + cls.mem_width - 8 + i, y-1] for i in range(5)]
out_misc += (f'{Mv.to(y-1, x + cls.mem_width - 9)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.mem_graphs else ""}'
f'{THEME.hi_fg("g")}{THEME.title("raph")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if CONFIG.show_disks:
if not "s" in Key.mouse:
Key.mouse["s"] = [[x + w - 6 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x + w - 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.swap_disk else ""}'
f'{THEME.hi_fg("s")}{THEME.title("wap")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
Draw.buffer("mem_misc", out_misc, only_save=True)
#* Mem
cx = 1; cy = 1
out += f'{Mv.to(y, x+1)}{THEME.title}{Fx.b}Total:{mem.string["total"]:>{cls.mem_width - 9}}{Fx.ub}{THEME.main_fg}'
if cls.graph_height > 0:
gli = f'{Mv.l(2)}{THEME.mem_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (cls.mem_width - 1)}{"" if CONFIG.show_disks else THEME.mem_box}{Symbol.title_left}{Mv.l(cls.mem_width - 1)}{THEME.title}'
if cls.graph_height >= 2:
gbg = f'{Mv.l(1)}'
gmv = f'{Mv.l(cls.mem_width - 2)}{Mv.u(cls.graph_height - 1)}'
big_mem: bool = True if cls.mem_width > 21 else False
for name in cls.mem_names:
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.string[name])))}{Fx.trans(mem.string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{gmv}{str(mem.percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{mem.string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'
cy += 1 if not cls.graph_height else cls.graph_height
#* Swap
if cls.swap_on and CONFIG.show_swap and not CONFIG.swap_disk:
if h - cy > 5:
if cls.graph_height > 0: out += f'{Mv.to(y+cy, x+cx)}{gli}'
cy += 1
out += f'{Mv.to(y+cy, x+cx)}{THEME.title}{Fx.b}Swap:{mem.swap_string["total"]:>{cls.mem_width - 8}}{Fx.ub}{THEME.main_fg}'
cy += 1
for name in cls.swap_names:
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.swap_string[name])))}{Fx.trans(mem.swap_string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{gmv}{str(mem.swap_percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{mem.swap_string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'; cy += 1 if not cls.graph_height else cls.graph_height
if cls.graph_height > 0 and not cy == h: out += f'{Mv.to(y+cy, x+cx)}{gli}'
#* Disks
if CONFIG.show_disks:
cx = x + cls.mem_width - 1; cy = 0
big_disk: bool = True if cls.disks_width >= 25 else False
gli = f'{Mv.l(2)}{THEME.div_line}{Symbol.title_right}{Symbol.h_line * cls.disks_width}{THEME.mem_box}{Symbol.title_left}{Mv.l(cls.disks_width - 1)}'
for name, item in mem.disks.items():
if cy > h - 2: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
out += f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(item["io"]) // 2) - 2)}{Fx.ub}{THEME.main_fg}{item["io"]}{Fx.ub}{THEME.main_fg}{Mv.to(y+cy+1, x+cx)}'
out += f'Used:{str(item["used_percent"]) + "%":>4} ' if big_disk else "U "
out += f'{Meters.disks_used[name]}{item["used"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 2
if len(mem.disks) * 3 <= h + 1:
if cy > h - 1: break
out += Mv.to(y+cy, x+cx)
out += f'Free:{str(item["free_percent"]) + "%":>4} ' if big_disk else f'{"F "}'
out += f'{Meters.disks_free[name]}{item["free"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 4 <= h + 1: cy += 1
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = False
class NetBox(Box, SubBox):
name = "net"
height_p = 30
width_p = 45
x = 1
y = 1
resized: bool = True
redraw: bool = True
graph_height: Dict[str, int] = {}
symbols: Dict[str, str] = {"download" : "▼", "upload" : "▲"}
buffer: str = "net"
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
width_p: int
if cls.stat_mode:
width_p = 100
else:
width_p = cls.width_p
cls.width = round(Term.width * width_p / 100)
cls.height = Term.height - Box._b_cpu_h - Box._b_mem_h
cls.y = Term.height - cls.height + 1
cls.box_width = 27 if cls.width > 45 else 19
cls.box_height = 9 if cls.height > 10 else cls.height - 2
cls.box_x = cls.width - cls.box_width - 1
cls.box_y = cls.y + ((cls.height - 2) // 2) - cls.box_height // 2 + 1
cls.graph_height["download"] = round((cls.height - 2) / 2)
cls.graph_height["upload"] = cls.height - 2 - cls.graph_height["download"]
cls.redraw = True
@classmethod
def _draw_bg(cls) -> str:
if cls.proc_mode: return ""
return f'{create_box(box=cls, line_color=THEME.net_box)}\
{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title="Download", title2="Upload")}'
@classmethod
def _draw_fg(cls):
if cls.proc_mode: return
net = NetCollector
if net.redraw: cls.redraw = True
if not net.nic: return
out: str = ""
out_misc: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
reset: bool = bool(net.stats[net.nic]["download"]["offset"])
if cls.resized or cls.redraw:
out_misc += cls._draw_bg()
if not "b" in Key.mouse:
Key.mouse["b"] = [[x+w - len(net.nic[:10]) - 9 + i, y-1] for i in range(4)]
Key.mouse["n"] = [[x+w - 5 + i, y-1] for i in range(4)]
Key.mouse["z"] = [[x+w - len(net.nic[:10]) - 14 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 25)}{THEME.net_box}{Symbol.h_line * (10 - len(net.nic[:10]))}{Symbol.title_left}{Fx.b if reset else ""}{THEME.hi_fg("z")}{THEME.title("ero")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}'
f'{THEME.net_box}{Symbol.title_left}{Fx.b}{THEME.hi_fg("<b")} {THEME.title(net.nic[:10])} {THEME.hi_fg("n>")}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 6:
if not "a" in Key.mouse: Key.mouse["a"] = [[x+w - 20 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 21 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if net.auto_min else ""}{THEME.hi_fg("a")}{THEME.title("uto")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 13:
if not "y" in Key.mouse: Key.mouse["y"] = [[x+w - 26 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 27 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if CONFIG.net_sync else ""}{THEME.title("s")}{THEME.hi_fg("y")}{THEME.title("nc")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
Draw.buffer("net_misc", out_misc, only_save=True)
cy = 0
for direction in ["download", "upload"]:
strings = net.strings[net.nic][direction]
stats = net.stats[net.nic][direction]
if cls.redraw: stats["redraw"] = True
if stats["redraw"] or cls.resized:
Graphs.net[direction] = Graph(w - bw - 3, cls.graph_height[direction], THEME.gradient[direction], stats["speed"], max_value=net.sync_top if CONFIG.net_sync else stats["graph_top"],
invert=False if direction == "download" else True, color_max_value=net.net_min.get(direction) if CONFIG.net_color_fixed else None)
out += f'{Mv.to(y if direction == "download" else y + cls.graph_height["download"], x)}{Graphs.net[direction](None if stats["redraw"] else stats["speed"][-1])}'
out += (f'{Mv.to(by+cy, bx)}{THEME.main_fg}{cls.symbols[direction]} {strings["byte_ps"]:<10.10}' +
("" if bw < 20 else f'{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["bit_ps"] + ")":>12.12}'))
cy += 1 if bh != 3 else 2
if bh >= 6:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Top:"}{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["top"] + ")":>12.12}'
cy += 1
if bh >= 4:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Total:"}{Mv.to(by+cy, bx+bw - 10)}{strings["total"]:>10.10}'
if bh > 2 and bh % 2: cy += 2
else: cy += 1
stats["redraw"] = False
out += (f'{Mv.to(y, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["download"]["graph_top"])}'
f'{Mv.to(y+h-1, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["upload"]["graph_top"])}')
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = False
class ProcBox(Box):
name = "proc"
height_p = 68
width_p = 55
x = 1
y = 1
current_y: int = 0
current_h: int = 0
select_max: int = 0
selected: int = 0
selected_pid: int = 0
last_selection: int = 0
filtering: bool = False
moved: bool = False
start: int = 1
count: int = 0
s_len: int = 0
detailed: bool = False
detailed_x: int = 0
detailed_y: int = 0
detailed_width: int = 0
detailed_height: int = 8
resized: bool = True
redraw: bool = True
buffer: str = "proc"
pid_counter: Dict[int, int] = {}
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
width_p: int; height_p: int
if cls.proc_mode:
width_p, height_p = 100, 80
else:
width_p, height_p = cls.width_p, cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
cls.x = Term.width - cls.width + 1
cls.y = Box._b_cpu_h + 1
cls.current_y = cls.y
cls.current_h = cls.height
cls.select_max = cls.height - 3
cls.redraw = True
cls.resized = True
@classmethod
def _draw_bg(cls) -> str:
if cls.stat_mode: return ""
return create_box(box=cls, line_color=THEME.proc_box)
@classmethod
def selector(cls, key: str, mouse_pos: Tuple[int, int] = (0, 0)):
old: Tuple[int, int] = (cls.start, cls.selected)
new_sel: int
if key == "up":
if cls.selected == 1 and cls.start > 1:
cls.start -= 1
elif cls.selected == 1:
cls.selected = 0
elif cls.selected > 1:
cls.selected -= 1
elif key == "down":
if cls.selected == 0 and ProcCollector.detailed and cls.last_selection:
cls.selected = cls.last_selection
cls.last_selection = 0
if cls.selected == cls.select_max and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 1
elif cls.selected < cls.select_max:
cls.selected += 1
elif key == "mouse_scroll_up" and cls.start > 1:
cls.start -= 5
elif key == "mouse_scroll_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 5
elif key == "page_up" and cls.start > 1:
cls.start -= cls.select_max
elif key == "page_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += cls.select_max
elif key == "home":
if cls.start > 1: cls.start = 1
elif cls.selected > 0: cls.selected = 0
elif key == "end":
if cls.start < ProcCollector.num_procs - cls.select_max + 1: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.selected < cls.select_max: cls.selected = cls.select_max
elif key == "mouse_click":
if mouse_pos[0] > cls.x + cls.width - 4 and mouse_pos[1] > cls.current_y + 1 and mouse_pos[1] < cls.current_y + 1 + cls.select_max + 1:
if mouse_pos[1] == cls.current_y + 2:
cls.start = 1
elif mouse_pos[1] == cls.current_y + 1 + cls.select_max:
cls.start = ProcCollector.num_procs - cls.select_max + 1
else:
cls.start = round((mouse_pos[1] - cls.current_y) * ((ProcCollector.num_procs - cls.select_max - 2) / (cls.select_max - 2)))
else:
new_sel = mouse_pos[1] - cls.current_y - 1 if mouse_pos[1] >= cls.current_y - 1 else 0
if new_sel > 0 and new_sel == cls.selected:
Key.list.insert(0, "enter")
return
elif new_sel > 0 and new_sel != cls.selected:
if cls.last_selection: cls.last_selection = 0
cls.selected = new_sel
elif key == "mouse_unselect":
cls.selected = 0
if cls.start > ProcCollector.num_procs - cls.select_max + 1 and ProcCollector.num_procs > cls.select_max: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.start > ProcCollector.num_procs: cls.start = ProcCollector.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > ProcCollector.num_procs and ProcCollector.num_procs < cls.select_max: cls.selected = ProcCollector.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
if old != (cls.start, cls.selected):
cls.moved = True
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True, only_draw=True)
@classmethod
def _draw_fg(cls):
if cls.stat_mode: return
proc = ProcCollector
if proc.proc_interrupt: return
if proc.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
n: int = 0
x, y, w, h = cls.x + 1, cls.current_y + 1, cls.width - 2, cls.current_h - 2
prog_len: int; arg_len: int; val: int; c_color: str; m_color: str; t_color: str; sort_pos: int; tree_len: int; is_selected: bool; calc: int
dgx: int; dgw: int; dx: int; dw: int; dy: int
l_count: int = 0
scroll_pos: int = 0
killed: bool = True
indent: str = ""
offset: int = 0
tr_show: bool = True
usr_show: bool = True
vals: List[str]
g_color: str = ""
s_len: int = 0
if proc.search_filter: s_len = len(proc.search_filter[:10])
loc_string: str = f'{cls.start + cls.selected - 1}/{proc.num_procs}'
end: str = ""
if proc.detailed:
dgx, dgw = x, w // 3
dw = w - dgw - 1
if dw > 120:
dw = 120
dgw = w - 121
dx = x + dgw + 2
dy = cls.y + 1
if w > 67:
arg_len = w - 53 - (1 if proc.num_procs > cls.select_max else 0)
prog_len = 15
else:
arg_len = 0
prog_len = w - 38 - (1 if proc.num_procs > cls.select_max else 0)
if prog_len < 15:
tr_show = False
prog_len += 5
if prog_len < 12:
usr_show = False
prog_len += 9
if CONFIG.proc_tree:
tree_len = arg_len + prog_len + 6
arg_len = 0
#* Buttons and titles only redrawn if needed
if cls.resized or cls.redraw:
s_len += len(CONFIG.proc_sorting)
if cls.resized or s_len != cls.s_len or proc.detailed:
cls.s_len = s_len
for k in ["e", "r", "c", "t", "k", "i", "enter", "left", " ", "f", "delete"]:
if k in Key.mouse: del Key.mouse[k]
if proc.detailed:
killed = proc.details["killed"]
main = THEME.main_fg if cls.selected == 0 and not killed else THEME.inactive_fg
hi = THEME.hi_fg if cls.selected == 0 and not killed else THEME.inactive_fg
title = THEME.title if cls.selected == 0 and not killed else THEME.inactive_fg
if cls.current_y != cls.y + 8 or cls.resized or Graphs.detailed_cpu is NotImplemented:
cls.current_y = cls.y + 8
cls.current_h = cls.height - 8
for i in range(7): out_misc += f'{Mv.to(dy+i, x)}{" " * w}'
out_misc += (f'{Mv.to(dy+7, x-1)}{THEME.proc_box}{Symbol.title_right}{Symbol.h_line*w}{Symbol.title_left}'
f'{Mv.to(dy+7, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}{THEME.div_line}')
for i in range(7):
out_misc += f'{Mv.to(dy + i, dgx + dgw + 1)}{Symbol.v_line}'
out_misc += (f'{Mv.to(dy-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(dy-1, dgx + dgw + 1)}{Symbol.div_up}'
f'{Mv.to(dy-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(str(proc.details["pid"]))}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(proc.details["name"][:(dgw - 11)])}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if cls.selected == 0:
Key.mouse["enter"] = [[dx+dw-10 + i, dy-1] for i in range(7)]
if cls.selected == 0 and not killed:
Key.mouse["t"] = [[dx+2 + i, dy-1] for i in range(9)]
out_misc += (f'{Mv.to(dy-1, dx+dw - 11)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{title if cls.selected > 0 else THEME.title}close{Fx.ub} {main if cls.selected > 0 else THEME.main_fg}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(dy-1, dx+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}t{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if dw > 28:
if cls.selected == 0 and not killed and not "k" in Key.mouse: Key.mouse["k"] = [[dx + 13 + i, dy-1] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}k{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if dw > 39:
if cls.selected == 0 and not killed and not "i" in Key.mouse: Key.mouse["i"] = [[dx + 19 + i, dy-1] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}i{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if Graphs.detailed_cpu is NotImplemented or cls.resized:
Graphs.detailed_cpu = Graph(dgw+1, 7, THEME.gradient["cpu"], proc.details_cpu)
Graphs.detailed_mem = Graph(dw // 3, 1, None, proc.details_mem)
cls.select_max = cls.height - 11
y = cls.y + 9
h = cls.height - 10
else:
if cls.current_y != cls.y or cls.resized:
cls.current_y = cls.y
cls.current_h = cls.height
y, h = cls.y + 1, cls.height - 2
out_misc += (f'{Mv.to(y-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(y-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(y+7, x-1)}{THEME.proc_box(Symbol.v_line)}{Mv.r(w)}{THEME.proc_box(Symbol.v_line)}')
cls.select_max = cls.height - 3
sort_pos = x + w - len(CONFIG.proc_sorting) - 7
if not "left" in Key.mouse:
Key.mouse["left"] = [[sort_pos + i, y-1] for i in range(3)]
Key.mouse["right"] = [[sort_pos + len(CONFIG.proc_sorting) + 3 + i, y-1] for i in range(3)]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.h_line * (w - 9))}' +
("" if not proc.detailed else f"{Mv.to(dy+7, dgx + dgw + 1)}{THEME.proc_box(Symbol.div_down)}") +
f'{Mv.to(y-1, sort_pos)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("<")} {THEME.title(CONFIG.proc_sorting)} '
f'{THEME.hi_fg(">")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 29 + s_len:
if not "e" in Key.mouse: Key.mouse["e"] = [[sort_pos - 5 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, sort_pos - 6)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_tree else ""}'
f'{THEME.title("tre")}{THEME.hi_fg("e")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 37 + s_len:
if not "r" in Key.mouse: Key.mouse["r"] = [[sort_pos - 14 + i, y-1] for i in range(7)]
out_misc += (f'{Mv.to(y-1, sort_pos - 15)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_reversed else ""}'
f'{THEME.hi_fg("r")}{THEME.title("everse")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 47 + s_len:
if not "c" in Key.mouse: Key.mouse["c"] = [[sort_pos - 24 + i, y-1] for i in range(8)]
out_misc += (f'{Mv.to(y-1, sort_pos - 25)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_per_core else ""}'
f'{THEME.title("per-")}{THEME.hi_fg("c")}{THEME.title("ore")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if not "f" in Key.mouse or cls.resized: Key.mouse["f"] = [[x+5 + i, y-1] for i in range(6 if not proc.search_filter else 2 + len(proc.search_filter[-10:]))]
if proc.search_filter:
if not "delete" in Key.mouse: Key.mouse["delete"] = [[x+11 + len(proc.search_filter[-10:]) + i, y-1] for i in range(3)]
elif "delete" in Key.mouse:
del Key.mouse["delete"]
out_misc += (f'{Mv.to(y-1, x + 7)}{THEME.proc_box(Symbol.title_left)}{Fx.b if cls.filtering or proc.search_filter else ""}{THEME.hi_fg("f")}{THEME.title}' +
("ilter" if not proc.search_filter and not cls.filtering else f' {proc.search_filter[-(10 if w < 83 else w - 74):]}{(Fx.bl + "█" + Fx.ubl) if cls.filtering else THEME.hi_fg(" del")}') +
f'{THEME.proc_box(Symbol.title_right)}')
main = THEME.inactive_fg if cls.selected == 0 else THEME.main_fg
hi = THEME.inactive_fg if cls.selected == 0 else THEME.hi_fg
title = THEME.inactive_fg if cls.selected == 0 else THEME.title
out_misc += (f'{Mv.to(y+h, x + 1)}{THEME.proc_box}{Symbol.h_line*(w-4)}'
f'{Mv.to(y+h, x+1)}{THEME.proc_box(Symbol.title_left)}{main}{Symbol.up} {Fx.b}{THEME.main_fg("select")} {Fx.ub}'
f'{THEME.inactive_fg if cls.selected == cls.select_max else THEME.main_fg}{Symbol.down}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{title}{Fx.b}info {Fx.ub}{main}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}')
if not "enter" in Key.mouse: Key.mouse["enter"] = [[x + 14 + i, y+h] for i in range(6)]
if w - len(loc_string) > 34:
if not "t" in Key.mouse: Key.mouse["t"] = [[x + 22 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}t{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 40:
if not "k" in Key.mouse: Key.mouse["k"] = [[x + 33 + i, y+h] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}k{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 51:
if not "i" in Key.mouse: Key.mouse["i"] = [[x + 39 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}i{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if CONFIG.proc_tree and w - len(loc_string) > 65:
if not " " in Key.mouse: Key.mouse[" "] = [[x + 50 + i, y+h] for i in range(12)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}spc {title}collapse{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
#* Processes labels
selected: str = CONFIG.proc_sorting
label: str
if selected == "memory": selected = "mem"
if selected == "threads" and not CONFIG.proc_tree and not arg_len: selected = "tr"
if CONFIG.proc_tree:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{" Tree:":<{tree_len-2}}' + (f'{"Threads: ":<9}' if tr_show else " "*4) + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected in ["pid", "program", "arguments"]: selected = "tree"
else:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{"Pid:":>7} {"Program:" if prog_len > 8 else "Prg:":<{prog_len}}' + (f'{"Arguments:":<{arg_len-4}}' if arg_len else "") +
((f'{"Threads:":<9}' if arg_len else f'{"Tr:":^5}') if tr_show else "") + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected == "program" and prog_len <= 8: selected = "prg"
selected = selected.split(" ")[0].capitalize()
if CONFIG.proc_mem_bytes: label = label.replace("Mem%", "MemB")
label = label.replace(selected, f'{Fx.u}{selected}{Fx.uu}')
out_misc += label
Draw.buffer("proc_misc", out_misc, only_save=True)
#* Detailed box draw
if proc.detailed:
if proc.details["status"] == psutil.STATUS_RUNNING: stat_color = Fx.b
elif proc.details["status"] in [psutil.STATUS_DEAD, psutil.STATUS_STOPPED, psutil.STATUS_ZOMBIE]: stat_color = THEME.inactive_fg
else: stat_color = ""
expand = proc.expand
iw = (dw - 3) // (4 + expand)
iw2 = iw - 1
out += (f'{Mv.to(dy, dgx)}{Graphs.detailed_cpu(None if cls.moved or proc.details["killed"] else proc.details_cpu[-1])}'
f'{Mv.to(dy, dgx)}{THEME.title}{Fx.b}{0 if proc.details["killed"] else proc.details["cpu_percent"]}%{Mv.r(1)}{"" if SYSTEM == "MacOS" else (("C" if dgw < 20 else "Core") + str(proc.details["cpu_num"]))}')
for i, l in enumerate(["C", "P", "U"]):
out += f'{Mv.to(dy+2+i, dgx)}{l}'
for i, l in enumerate(["C", "M", "D"]):
out += f'{Mv.to(dy+4+i, dx+1)}{l}'
out += (f'{Mv.to(dy, dx+1)} {"Status:":^{iw}.{iw2}}{"Elapsed:":^{iw}.{iw2}}' +
(f'{"Parent:":^{iw}.{iw2}}' if dw > 28 else "") + (f'{"User:":^{iw}.{iw2}}' if dw > 38 else "") +
(f'{"Threads:":^{iw}.{iw2}}' if expand > 0 else "") + (f'{"Nice:":^{iw}.{iw2}}' if expand > 1 else "") +
(f'{"IO Read:":^{iw}.{iw2}}' if expand > 2 else "") + (f'{"IO Write:":^{iw}.{iw2}}' if expand > 3 else "") +
(f'{"TTY:":^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+1, dx+1)}{Fx.ub}{THEME.main_fg}{stat_color}{proc.details["status"]:^{iw}.{iw2}}{Fx.ub}{THEME.main_fg}{proc.details["uptime"]:^{iw}.{iw2}} ' +
(f'{proc.details["parent_name"]:^{iw}.{iw2}}' if dw > 28 else "") + (f'{proc.details["username"]:^{iw}.{iw2}}' if dw > 38 else "") +
(f'{proc.details["threads"]:^{iw}.{iw2}}' if expand > 0 else "") + (f'{proc.details["nice"]:^{iw}.{iw2}}' if expand > 1 else "") +
(f'{proc.details["io_read"]:^{iw}.{iw2}}' if expand > 2 else "") + (f'{proc.details["io_write"]:^{iw}.{iw2}}' if expand > 3 else "") +
(f'{proc.details["terminal"][-(iw2):]:^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+3, dx)}{THEME.title}{Fx.b}{("Memory: " if dw > 42 else "M:") + str(round(proc.details["memory_percent"], 1)) + "%":>{dw//3-1}}{Fx.ub} {THEME.inactive_fg}{"⡀"*(dw//3)}'
f'{Mv.l(dw//3)}{THEME.proc_misc}{Graphs.detailed_mem(None if cls.moved else proc.details_mem[-1])} '
f'{THEME.title}{Fx.b}{proc.details["memory_bytes"]:.{dw//3 - 2}}{THEME.main_fg}{Fx.ub}')
cy = dy + (4 if len(proc.details["cmdline"]) > dw - 5 else 5)
for i in range(ceil(len(proc.details["cmdline"]) / (dw - 5))):
out += f'{Mv.to(cy+i, dx + 3)}{proc.details["cmdline"][((dw-5)*i):][:(dw-5)]:{"^" if i == 0 else "<"}{dw-5}}'
if i == 2: break
#* Checking for selection out of bounds
if cls.start > proc.num_procs - cls.select_max + 1 and proc.num_procs > cls.select_max: cls.start = proc.num_procs - cls.select_max + 1
elif cls.start > proc.num_procs: cls.start = proc.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > proc.num_procs and proc.num_procs < cls.select_max: cls.selected = proc.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
#* Start iteration over all processes and info
cy = 1
for n, (pid, items) in enumerate(proc.processes.items(), start=1):
if n < cls.start: continue
l_count += 1
if l_count == cls.selected:
is_selected = True
cls.selected_pid = pid
else: is_selected = False
indent, name, cmd, threads, username, mem, mem_b, cpu = [items.get(v, d) for v, d in [("indent", ""), ("name", ""), ("cmd", ""), ("threads", 0), ("username", "?"), ("mem", 0.0), ("mem_b", 0), ("cpu", 0.0)]]
if CONFIG.proc_tree:
arg_len = 0
offset = tree_len - len(f'{indent}{pid}')
if offset < 1: offset = 0
indent = f'{indent:.{tree_len - len(str(pid))}}'
if offset - len(name) > 12:
cmd = cmd.split(" ")[0].split("/")[-1]
if not cmd.startswith(name):
offset = len(name)
arg_len = tree_len - len(f'{indent}{pid} {name} ') + 2
cmd = f'({cmd[:(arg_len-4)]})'
else:
offset = prog_len - 1
if cpu > 1.0 or pid in Graphs.pid_cpu:
if pid not in Graphs.pid_cpu:
Graphs.pid_cpu[pid] = Graph(5, 1, None, [0])
cls.pid_counter[pid] = 0
elif cpu < 1.0:
cls.pid_counter[pid] += 1
if cls.pid_counter[pid] > 10:
del cls.pid_counter[pid], Graphs.pid_cpu[pid]
else:
cls.pid_counter[pid] = 0
end = f'{THEME.main_fg}{Fx.ub}' if CONFIG.proc_colors else Fx.ub
if cls.selected > cy: calc = cls.selected - cy
elif cls.selected > 0 and cls.selected <= cy: calc = cy - cls.selected
else: calc = cy
if CONFIG.proc_colors and not is_selected:
vals = []
for v in [int(cpu), int(mem), int(threads // 3)]:
if CONFIG.proc_gradient:
val = ((v if v <= 100 else 100) + 100) - calc * 100 // cls.select_max
vals += [f'{THEME.gradient["proc_color" if val < 100 else "process"][val if val < 100 else val - 100]}']
else:
vals += [f'{THEME.gradient["process"][v if v <= 100 else 100]}']
c_color, m_color, t_color = vals
else:
c_color = m_color = t_color = Fx.b
if CONFIG.proc_gradient and not is_selected:
g_color = f'{THEME.gradient["proc"][calc * 100 // cls.select_max]}'
if is_selected:
c_color = m_color = t_color = g_color = end = ""
out += f'{THEME.selected_bg}{THEME.selected_fg}{Fx.b}'
#* Creates one line for a process with all gathered information
out += (f'{Mv.to(y+cy, x)}{g_color}{indent}{pid:>{(1 if CONFIG.proc_tree else 7)}} ' +
f'{c_color}{name:<{offset}.{offset}} {end}' +
(f'{g_color}{cmd:<{arg_len}.{arg_len-1}}' if arg_len else "") +
(t_color + (f'{threads:>4} ' if threads < 1000 else "999> ") + end if tr_show else "") +
(g_color + (f'{username:<9.9}' if len(username) < 10 else f'{username[:8]:<8}+') if usr_show else "") +
m_color + ((f'{mem:>4.1f}' if mem < 100 else f'{mem:>4.0f} ') if not CONFIG.proc_mem_bytes else f'{floating_humanizer(mem_b, short=True):>4.4}') + end +
f' {THEME.inactive_fg}{"⡀"*5}{THEME.main_fg}{g_color}{c_color}' + (f' {cpu:>4.1f} ' if cpu < 100 else f'{cpu:>5.0f} ') + end +
(" " if proc.num_procs > cls.select_max else ""))
#* Draw small cpu graph for process if cpu usage was above 1% in the last 10 updates
if pid in Graphs.pid_cpu:
out += f'{Mv.to(y+cy, x + w - (12 if proc.num_procs > cls.select_max else 11))}{c_color if CONFIG.proc_colors else THEME.proc_misc}{Graphs.pid_cpu[pid](None if cls.moved else round(cpu))}{THEME.main_fg}'
if is_selected: out += f'{Fx.ub}{Term.fg}{Term.bg}{Mv.to(y+cy, x + w - 1)}{" " if proc.num_procs > cls.select_max else ""}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+cy+i, x)}{" " * w}'
#* Draw scrollbar if needed
if proc.num_procs > cls.select_max:
if cls.resized:
Key.mouse["mouse_scroll_up"] = [[x+w-2+i, y] for i in range(3)]
Key.mouse["mouse_scroll_down"] = [[x+w-2+i, y+h-1] for i in range(3)]
scroll_pos = round(cls.start * (cls.select_max - 2) / (proc.num_procs - (cls.select_max - 2)))
if scroll_pos < 0 or cls.start == 1: scroll_pos = 0
elif scroll_pos > h - 3 or cls.start >= proc.num_procs - cls.select_max: scroll_pos = h - 3
out += (f'{Mv.to(y, x+w-1)}{Fx.b}{THEME.main_fg}↑{Mv.to(y+h-1, x+w-1)}↓{Fx.ub}'
f'{Mv.to(y+1+scroll_pos, x+w-1)}█')
elif "scroll_up" in Key.mouse:
del Key.mouse["scroll_up"], Key.mouse["scroll_down"]
#* Draw current selection and number of processes
out += (f'{Mv.to(y+h, x + w - 3 - len(loc_string))}{THEME.proc_box}{Symbol.title_left}{THEME.title}'
f'{Fx.b}{loc_string}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
#* Clean up dead processes graphs and counters
cls.count += 1
if cls.count == 100:
cls.count == 0
for p in list(cls.pid_counter):
if not psutil.pid_exists(p):
del cls.pid_counter[p], Graphs.pid_cpu[p]
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = cls.moved = False
class Collector:
'''Data collector master class
* .start(): Starts collector thread
* .stop(): Stops collector thread
* .collect(*collectors: Collector, draw_now: bool = True, interrupt: bool = False): queues up collectors to run'''
stopping: bool = False
started: bool = False
draw_now: bool = False
redraw: bool = False
only_draw: bool = False
thread: threading.Thread
collect_run = threading.Event()
collect_idle = threading.Event()
collect_idle.set()
collect_done = threading.Event()
collect_queue: List = []
collect_interrupt: bool = False
proc_interrupt: bool = False
use_draw_list: bool = False
@classmethod
def start(cls):
cls.stopping = False
cls.thread = threading.Thread(target=cls._runner, args=())
cls.thread.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.thread.is_alive():
cls.stopping = True
cls.started = False
cls.collect_queue = []
cls.collect_idle.set()
cls.collect_done.set()
try:
cls.thread.join()
except:
pass
@classmethod
def _runner(cls):
'''This is meant to run in it's own thread, collecting and drawing when collect_run is set'''
draw_buffers: List[str] = []
debugged: bool = False
try:
while not cls.stopping:
if CONFIG.draw_clock: Box.draw_clock()
cls.collect_run.wait(0.1)
if not cls.collect_run.is_set():
continue
draw_buffers = []
cls.collect_interrupt = False
cls.collect_run.clear()
cls.collect_idle.clear()
cls.collect_done.clear()
if DEBUG and not debugged: TimeIt.start("Collect and draw")
while cls.collect_queue:
collector = cls.collect_queue.pop()
if not cls.only_draw:
collector._collect()
collector._draw()
if cls.use_draw_list: draw_buffers.append(collector.buffer)
if cls.collect_interrupt: break
if DEBUG and not debugged: TimeIt.stop("Collect and draw"); debugged = True
if cls.draw_now and not Menu.active and not cls.collect_interrupt:
if cls.use_draw_list: Draw.out(*draw_buffers)
else: Draw.out()
cls.collect_idle.set()
cls.collect_done.set()
except Exception as e:
errlog.exception(f'Data collection thread failed with exception: {e}')
cls.collect_idle.set()
cls.collect_done.set()
clean_quit(1, thread=True)
@classmethod
def collect(cls, *collectors, draw_now: bool = True, interrupt: bool = False, proc_interrupt: bool = False, redraw: bool = False, only_draw: bool = False):
'''Setup collect queue for _runner'''
cls.collect_interrupt = interrupt
cls.proc_interrupt = proc_interrupt
cls.collect_idle.wait()
cls.collect_interrupt = False
cls.proc_interrupt = False
cls.use_draw_list = False
cls.draw_now = draw_now
cls.redraw = redraw
cls.only_draw = only_draw
if collectors:
cls.collect_queue = [*collectors]
cls.use_draw_list = True
else:
cls.collect_queue = list(cls.__subclasses__())
cls.collect_run.set()
class CpuCollector(Collector):
'''Collects cpu usage for cpu and cores, cpu frequency, load_avg, uptime and cpu temps'''
cpu_usage: List[List[int]] = []
cpu_temp: List[List[int]] = []
cpu_temp_high: int = 0
cpu_temp_crit: int = 0
for _ in range(THREADS + 1):
cpu_usage.append([])
cpu_temp.append([])
freq_error: bool = False
cpu_freq: int = 0
load_avg: List[float] = []
uptime: str = ""
buffer: str = CpuBox.buffer
sensor_method: str = ""
got_sensors: bool = False
@classmethod
def get_sensors(cls):
'''Check if we can get cpu temps and return method of getting temps'''
cls.sensor_method = ""
if SYSTEM == "MacOS":
try:
if which("osx-cpu-temp") and subprocess.check_output("osx-cpu-temp", text=True).rstrip().endswith("°C"):
cls.sensor_method = "osx-cpu-temp"
if which("coretemp") and subprocess.check_output("coretemp", text=True).split()[0].strip().replace("-", "").isdigit():
cls.sensor_method += "+coretemp"
except: pass
elif hasattr(psutil, "sensors_temperatures"):
try:
temps = psutil.sensors_temperatures()
if temps:
for name, entries in temps.items():
if name.lower().startswith("cpu"):
cls.sensor_method = "psutil"
break
for entry in entries:
if entry.label.startswith(("Package", "Core 0", "Tdie", "CPU")):
cls.sensor_method = "psutil"
break
except: pass
if not cls.sensor_method and SYSTEM == "Linux":
try:
if which("vcgencmd") and subprocess.check_output(["vcgencmd", "measure_temp"], text=True).strip().endswith("'C"):
cls.sensor_method = "vcgencmd"
except: pass
cls.got_sensors = True if cls.sensor_method else False
@classmethod
def _collect(cls):
cls.cpu_usage[0].append(round(psutil.cpu_percent(percpu=False)))
for n, thread in enumerate(psutil.cpu_percent(percpu=True), start=1):
cls.cpu_usage[n].append(round(thread))
if len(cls.cpu_usage[n]) > Term.width * 2:
del cls.cpu_usage[n][0]
try:
if hasattr(psutil.cpu_freq(), "current"):
cls.cpu_freq = round(psutil.cpu_freq().current)
except Exception as e:
if not cls.freq_error:
cls.freq_error = True
errlog.error("Exception while getting cpu frequency!")
errlog.exception(f'{e}')
else:
pass
cls.load_avg = [round(lavg, 2) for lavg in os.getloadavg()]
cls.uptime = str(timedelta(seconds=round(time()-psutil.boot_time(),0)))[:-3]
if CONFIG.check_temp and cls.got_sensors:
cls._collect_temps()
@classmethod
def _collect_temps(cls):
temp: int
cores: List[int] = []
cpu_type: str = ""
if cls.sensor_method == "psutil":
try:
for name, entries in psutil.sensors_temperatures().items():
for entry in entries:
if entry.label.startswith(("Package", "Tdie")) and hasattr(entry, "current") and round(entry.current) > 0:
cpu_type = "intel" if entry.label.startswith("Package") else "ryzen"
if not cls.cpu_temp_high:
if hasattr(entry, "high") and entry.high: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if hasattr(entry, "critical") and entry.critical: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
temp = round(entry.current)
elif (entry.label.startswith(("Core", "Tccd", "CPU")) or (name.lower().startswith("cpu") and not entry.label)) and hasattr(entry, "current") and round(entry.current) > 0:
if not cpu_type:
cpu_type = "other"
if not cls.cpu_temp_high:
if hasattr(entry, "high") and entry.high: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 60 if name == "cpu_thermal" else 80
if hasattr(entry, "critical") and entry.critical: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 80 if name == "cpu_thermal" else 95
temp = round(entry.current)
cores.append(round(entry.current))
if len(cores) < THREADS:
if cpu_type == "intel" or (cpu_type == "other" and len(cores) == THREADS // 2):
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
elif cpu_type == "ryzen" or cpu_type == "other":
cls.cpu_temp[0].append(temp)
if len(cores) < 1: cores.append(temp)
z = 1
for t in cores:
try:
for i in range(THREADS // len(cores)):
cls.cpu_temp[z + i].append(t)
z += i
except IndexError:
break
if cls.cpu_temp[0]:
for n in range(1, len(cls.cpu_temp)):
if len(cls.cpu_temp[n]) != len(cls.cpu_temp[n-1]):
cls.cpu_temp[n] = cls.cpu_temp[n//2].copy()
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
#CONFIG.check_temp = False
CpuBox._calc_size()
else:
try:
if cls.sensor_method.startswith("osx-cpu-temp") or cls.sensor_method.startswith("+coretemp"):
if cls.sensor_method.startswith("+coretemp"):
temp = max(0, round(float(subprocess.check_output(["coretemp", "-c 0", "-r 3"], text=True).split()[0].strip())))
else:
temp = max(0, round(float(subprocess.check_output("osx-cpu-temp", text=True).strip()[:-2])))
if cls.sensor_method.endswith("+coretemp"):
cores = [max(0, round(float(x))) for x in subprocess.check_output(["coretemp", "-r 3"], text=True).split()]
if len(cores) < THREADS:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "vcgencmd":
temp = max(0, round(float(subprocess.check_output(["vcgencmd", "measure_temp"], text=True).strip()[5:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 60
cls.cpu_temp_crit = 80
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
#CONFIG.check_temp = False
CpuBox._calc_size()
else:
if not cores:
for n in range(THREADS + 1):
cls.cpu_temp[n].append(temp)
if len(cls.cpu_temp[0]) > 5:
for n in range(len(cls.cpu_temp)):
del cls.cpu_temp[n][0]
@classmethod
def _draw(cls):
CpuBox._draw_fg()
class MemCollector(Collector):
'''Collects memory and disks information'''
values: Dict[str, int] = {}
vlist: Dict[str, List[int]] = {}
percent: Dict[str, int] = {}
string: Dict[str, str] = {}
swap_values: Dict[str, int] = {}
swap_vlist: Dict[str, List[int]] = {}
swap_percent: Dict[str, int] = {}
swap_string: Dict[str, str] = {}
disks: Dict[str, Dict]
disk_hist: Dict[str, Tuple] = {}
timestamp: float = time()
io_error: bool = False
old_disks: List[str] = []
excludes: List[str] = ["squashfs"]
if SYSTEM == "BSD": excludes += ["devfs", "tmpfs", "procfs", "linprocfs", "gvfs", "fusefs"]
buffer: str = MemBox.buffer
@classmethod
def _collect(cls):
#* Collect memory
mem = psutil.virtual_memory()
if hasattr(mem, "cached"):
cls.values["cached"] = mem.cached
else:
cls.values["cached"] = mem.active
cls.values["total"], cls.values["free"], cls.values["available"] = mem.total, mem.free, mem.available
cls.values["used"] = cls.values["total"] - cls.values["available"]
for key, value in cls.values.items():
cls.string[key] = floating_humanizer(value)
if key == "total": continue
cls.percent[key] = round(value * 100 / cls.values["total"])
if CONFIG.mem_graphs:
if not key in cls.vlist: cls.vlist[key] = []
cls.vlist[key].append(cls.percent[key])
if len(cls.vlist[key]) > MemBox.width: del cls.vlist[key][0]
#* Collect swap
if CONFIG.show_swap or CONFIG.swap_disk:
swap = psutil.swap_memory()
cls.swap_values["total"], cls.swap_values["free"] = swap.total, swap.free
cls.swap_values["used"] = cls.swap_values["total"] - cls.swap_values["free"]
if swap.total:
if not MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = True
for key, value in cls.swap_values.items():
cls.swap_string[key] = floating_humanizer(value)
if key == "total": continue
cls.swap_percent[key] = round(value * 100 / cls.swap_values["total"])
if CONFIG.mem_graphs:
if not key in cls.swap_vlist: cls.swap_vlist[key] = []
cls.swap_vlist[key].append(cls.swap_percent[key])
if len(cls.swap_vlist[key]) > MemBox.width: del cls.swap_vlist[key][0]
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
if not CONFIG.show_disks: return
#* Collect disks usage
disk_read: int = 0
disk_write: int = 0
dev_name: str
disk_name: str
filtering: Tuple = ()
filter_exclude: bool = False
io_string: str
u_percent: int
disk_list: List[str] = []
cls.disks = {}
if CONFIG.disks_filter:
if CONFIG.disks_filter.startswith("exclude="):
filter_exclude = True
filtering = tuple(v.strip() for v in CONFIG.disks_filter.replace("exclude=", "").strip().split(","))
else:
filtering = tuple(v.strip() for v in CONFIG.disks_filter.strip().split(","))
try:
io_counters = psutil.disk_io_counters(perdisk=True if SYSTEM == "Linux" else False, nowrap=True)
except ValueError as e:
if not cls.io_error:
cls.io_error = True
errlog.error(f'Non fatal error during disk io collection!')
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
errlog.error(f'Caused by outdated psutil version.')
errlog.exception(f'{e}')
io_counters = None
for disk in psutil.disk_partitions():
disk_io = None
io_string = ""
disk_name = disk.mountpoint.rsplit('/', 1)[-1] if not disk.mountpoint == "/" else "root"
while disk_name in disk_list: disk_name += "_"
disk_list += [disk_name]
if cls.excludes and disk.fstype in cls.excludes:
continue
if filtering and ((not filter_exclude and not disk_name.endswith(filtering)) or (filter_exclude and disk_name.endswith(filtering))):
continue
#elif filtering and disk_name.endswith(filtering)
if SYSTEM == "MacOS" and disk.mountpoint == "/private/var/vm":
continue
try:
disk_u = psutil.disk_usage(disk.mountpoint)
except:
pass
u_percent = round(disk_u.percent)
cls.disks[disk.device] = { "name" : disk_name, "used_percent" : u_percent, "free_percent" : 100 - u_percent }
for name in ["total", "used", "free"]:
cls.disks[disk.device][name] = floating_humanizer(getattr(disk_u, name, 0))
#* Collect disk io
if io_counters:
try:
if SYSTEM == "Linux":
dev_name = os.path.realpath(disk.device).rsplit('/', 1)[-1]
if dev_name.startswith("md"):
try:
dev_name = dev_name[:dev_name.index("p")]
except:
pass
disk_io = io_counters[dev_name]
elif disk.mountpoint == "/":
disk_io = io_counters
else:
raise Exception
disk_read = round((disk_io.read_bytes - cls.disk_hist[disk.device][0]) / (time() - cls.timestamp))
disk_write = round((disk_io.write_bytes - cls.disk_hist[disk.device][1]) / (time() - cls.timestamp))
except:
disk_read = disk_write = 0
else:
disk_read = disk_write = 0
if disk_io:
cls.disk_hist[disk.device] = (disk_io.read_bytes, disk_io.write_bytes)
if MemBox.disks_width > 30:
if disk_read > 0:
io_string += f'▲{floating_humanizer(disk_read, short=True)} '
if disk_write > 0:
io_string += f'▼{floating_humanizer(disk_write, short=True)}'
elif disk_read + disk_write > 0:
io_string += f'▼▲{floating_humanizer(disk_read + disk_write, short=True)}'
cls.disks[disk.device]["io"] = io_string
if CONFIG.swap_disk and MemBox.swap_on:
cls.disks["__swap"] = { "name" : "swap", "used_percent" : cls.swap_percent["used"], "free_percent" : cls.swap_percent["free"], "io" : "" }
for name in ["total", "used", "free"]:
cls.disks["__swap"][name] = cls.swap_string[name]
if len(cls.disks) > 2:
try:
new = { list(cls.disks)[0] : cls.disks.pop(list(cls.disks)[0])}
new["__swap"] = cls.disks.pop("__swap")
new.update(cls.disks)
cls.disks = new
except:
pass
if disk_list != cls.old_disks:
MemBox.redraw = True
cls.old_disks = disk_list.copy()
cls.timestamp = time()
@classmethod
def _draw(cls):
MemBox._draw_fg()
class NetCollector(Collector):
'''Collects network stats'''
buffer: str = NetBox.buffer
nics: List[str] = []
nic_i: int = 0
nic: str = ""
new_nic: str = ""
nic_error: bool = False
reset: bool = False
graph_raise: Dict[str, int] = {"download" : 5, "upload" : 5}
graph_lower: Dict[str, int] = {"download" : 5, "upload" : 5}
#min_top: int = 10<<10
#* Stats structure = stats[netword device][download, upload][total, last, top, graph_top, offset, speed, redraw, graph_raise, graph_low] = int, List[int], bool
stats: Dict[str, Dict[str, Dict[str, Any]]] = {}
#* Strings structure strings[network device][download, upload][total, byte_ps, bit_ps, top, graph_top] = str
strings: Dict[str, Dict[str, Dict[str, str]]] = {}
switched: bool = False
timestamp: float = time()
net_min: Dict[str, int] = {"download" : -1, "upload" : -1}
auto_min: bool = CONFIG.net_auto
sync_top: int = 0
sync_string: str = ""
@classmethod
def _get_nics(cls):
'''Get a list of all network devices sorted by highest throughput'''
cls.nic_i = 0
cls.nic = ""
try:
io_all = psutil.net_io_counters(pernic=True)
except Exception as e:
if not cls.nic_error:
cls.nic_error = True
errlog.exception(f'{e}')
if not io_all: return
up_stat = psutil.net_if_stats()
for nic in sorted(io_all.keys(), key=lambda nic: (getattr(io_all[nic], "bytes_recv", 0) + getattr(io_all[nic], "bytes_sent", 0)), reverse=True):
if nic not in up_stat or not up_stat[nic].isup:
continue
cls.nics.append(nic)
if not cls.nics: cls.nics = [""]
cls.nic = cls.nics[cls.nic_i]
@classmethod
def switch(cls, key: str):
if len(cls.nics) < 2: return
cls.nic_i += +1 if key == "n" else -1
if cls.nic_i >= len(cls.nics): cls.nic_i = 0
elif cls.nic_i < 0: cls.nic_i = len(cls.nics) - 1
cls.new_nic = cls.nics[cls.nic_i]
cls.switched = True
Collector.collect(NetCollector, redraw=True)
@classmethod
def _collect(cls):
speed: int
stat: Dict
up_stat = psutil.net_if_stats()
if cls.switched:
cls.nic = cls.new_nic
cls.switched = False
if not cls.nic or cls.nic not in up_stat or not up_stat[cls.nic].isup:
cls._get_nics()
if not cls.nic: return
try:
io_all = psutil.net_io_counters(pernic=True)[cls.nic]
except KeyError:
pass
return
if not cls.nic in cls.stats:
cls.stats[cls.nic] = {}
cls.strings[cls.nic] = { "download" : {}, "upload" : {}}
for direction, value in ["download", io_all.bytes_recv], ["upload", io_all.bytes_sent]:
cls.stats[cls.nic][direction] = { "total" : value, "last" : value, "top" : 0, "graph_top" : 0, "offset" : 0, "speed" : [], "redraw" : True, "graph_raise" : 0, "graph_lower" : 7 }
for v in ["total", "byte_ps", "bit_ps", "top", "graph_top"]:
cls.strings[cls.nic][direction][v] = ""
cls.stats[cls.nic]["download"]["total"] = io_all.bytes_recv
cls.stats[cls.nic]["upload"]["total"] = io_all.bytes_sent
for direction in ["download", "upload"]:
stat = cls.stats[cls.nic][direction]
strings = cls.strings[cls.nic][direction]
#* Calculate current speed
stat["speed"].append(round((stat["total"] - stat["last"]) / (time() - cls.timestamp)))
stat["last"] = stat["total"]
speed = stat["speed"][-1]
if cls.net_min[direction] == -1:
cls.net_min[direction] = units_to_bytes(getattr(CONFIG, "net_" + direction))
stat["graph_top"] = cls.net_min[direction]
stat["graph_lower"] = 7
if not cls.auto_min:
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
if stat["offset"] and stat["offset"] > stat["total"]:
cls.reset = True
if cls.reset:
if not stat["offset"]:
stat["offset"] = stat["total"]
else:
stat["offset"] = 0
if direction == "upload":
cls.reset = False
NetBox.redraw = True
if len(stat["speed"]) > NetBox.width * 2:
del stat["speed"][0]
strings["total"] = floating_humanizer(stat["total"] - stat["offset"])
strings["byte_ps"] = floating_humanizer(stat["speed"][-1], per_second=True)
strings["bit_ps"] = floating_humanizer(stat["speed"][-1], bit=True, per_second=True)
if speed > stat["top"] or not stat["top"]:
stat["top"] = speed
strings["top"] = floating_humanizer(stat["top"], bit=True, per_second=True)
if cls.auto_min:
if speed > stat["graph_top"]:
stat["graph_raise"] += 1
if stat["graph_lower"] > 0: stat["graph_lower"] -= 1
elif speed < stat["graph_top"] // 10:
stat["graph_lower"] += 1
if stat["graph_raise"] > 0: stat["graph_raise"] -= 1
if stat["graph_raise"] >= 5 or stat["graph_lower"] >= 5:
if stat["graph_raise"] >= 5:
stat["graph_top"] = round(max(stat["speed"][-5:]) / 0.8)
elif stat["graph_lower"] >= 5:
stat["graph_top"] = max(10 << 10, max(stat["speed"][-5:]) * 3)
stat["graph_raise"] = 0
stat["graph_lower"] = 0
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
cls.timestamp = time()
if CONFIG.net_sync:
c_max: int = max(cls.stats[cls.nic]["download"]["graph_top"], cls.stats[cls.nic]["upload"]["graph_top"])
if c_max != cls.sync_top:
cls.sync_top = c_max
cls.sync_string = floating_humanizer(cls.sync_top, short=True)
NetBox.redraw = True
@classmethod
def _draw(cls):
NetBox._draw_fg()
class ProcCollector(Collector):
'''Collects process stats'''
buffer: str = ProcBox.buffer
search_filter: str = ""
processes: Dict = {}
num_procs: int = 0
det_cpu: float = 0.0
detailed: bool = False
detailed_pid: Union[int, None] = None
details: Dict[str, Any] = {}
details_cpu: List[int] = []
details_mem: List[int] = []
expand: int = 0
collapsed: Dict = {}
tree_counter: int = 0
p_values: List[str] = ["pid", "name", "cmdline", "num_threads", "username", "memory_percent", "cpu_percent", "cpu_times", "create_time"]
sort_expr: Dict = {}
sort_expr["pid"] = compile("p.info['pid']", "str", "eval")
sort_expr["program"] = compile("'' if p.info['name'] == 0.0 else p.info['name']", "str", "eval")
sort_expr["arguments"] = compile("' '.join(str(p.info['cmdline'])) or ('' if p.info['name'] == 0.0 else p.info['name'])", "str", "eval")
sort_expr["threads"] = compile("0 if p.info['num_threads'] == 0.0 else p.info['num_threads']", "str", "eval")
sort_expr["user"] = compile("'' if p.info['username'] == 0.0 else p.info['username']", "str", "eval")
sort_expr["memory"] = compile("p.info['memory_percent']", "str", "eval")
sort_expr["cpu lazy"] = compile("(sum(p.info['cpu_times'][:2] if not p.info['cpu_times'] == 0.0 else [0.0, 0.0]) * 1000 / (time() - p.info['create_time']))", "str", "eval")
sort_expr["cpu responsive"] = compile("(p.info['cpu_percent'] if CONFIG.proc_per_core else (p.info['cpu_percent'] / THREADS))", "str", "eval")
@classmethod
def _collect(cls):
'''List all processess with pid, name, arguments, threads, username, memory percent and cpu percent'''
if Box.stat_mode: return
out: Dict = {}
cls.det_cpu = 0.0
sorting: str = CONFIG.proc_sorting
reverse: bool = not CONFIG.proc_reversed
proc_per_cpu: bool = CONFIG.proc_per_core
search: str = cls.search_filter
err: float = 0.0
n: int = 0
if CONFIG.proc_tree and sorting == "arguments":
sorting = "program"
sort_cmd = cls.sort_expr[sorting]
if CONFIG.proc_tree:
cls._tree(sort_cmd=sort_cmd, reverse=reverse, proc_per_cpu=proc_per_cpu, search=search)
else:
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt or cls.proc_interrupt:
return
if p.info["name"] == "idle" or p.info["name"] == err or p.info["pid"] == err:
continue
if p.info["cmdline"] == err:
p.info["cmdline"] = ""
if p.info["username"] == err:
p.info["username"] = ""
if p.info["num_threads"] == err:
p.info["num_threads"] = 0
if search:
if cls.detailed and p.info["pid"] == cls.detailed_pid:
cls.det_cpu = p.info["cpu_percent"]
for value in [ p.info["name"], " ".join(p.info["cmdline"]), str(p.info["pid"]), p.info["username"] ]:
for s in search.split(","):
if s.strip() in value:
break
else: continue
break
else: continue
cpu = p.info["cpu_percent"] if proc_per_cpu else round(p.info["cpu_percent"] / THREADS, 2)
mem = p.info["memory_percent"]
if CONFIG.proc_mem_bytes and hasattr(p.info["memory_info"], "rss"):
mem_b = p.info["memory_info"].rss
else:
mem_b = 0
cmd = " ".join(p.info["cmdline"]) or "[" + p.info["name"] + "]"
out[p.info["pid"]] = {
"name" : p.info["name"],
"cmd" : cmd,
"threads" : p.info["num_threads"],
"username" : p.info["username"],
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu }
n += 1
cls.num_procs = n
cls.processes = out.copy()
if cls.detailed:
cls.expand = ((ProcBox.width - 2) - ((ProcBox.width - 2) // 3) - 40) // 10
if cls.expand > 5: cls.expand = 5
if cls.detailed and not cls.details.get("killed", False):
try:
c_pid = cls.detailed_pid
det = psutil.Process(c_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
cls.details["killed"] = True
cls.details["status"] = psutil.STATUS_DEAD
ProcBox.redraw = True
else:
attrs: List[str] = ["status", "memory_info", "create_time"]
if not SYSTEM == "MacOS": attrs.extend(["cpu_num"])
if cls.expand:
attrs.extend(["nice", "terminal"])
if not SYSTEM == "MacOS": attrs.extend(["io_counters"])
if not c_pid in cls.processes: attrs.extend(["pid", "name", "cmdline", "num_threads", "username", "memory_percent"])
cls.details = det.as_dict(attrs=attrs, ad_value="")
if det.parent() != None: cls.details["parent_name"] = det.parent().name()
else: cls.details["parent_name"] = ""
cls.details["pid"] = c_pid
if c_pid in cls.processes:
cls.details["name"] = cls.processes[c_pid]["name"]
cls.details["cmdline"] = cls.processes[c_pid]["cmd"]
cls.details["threads"] = f'{cls.processes[c_pid]["threads"]}'
cls.details["username"] = cls.processes[c_pid]["username"]
cls.details["memory_percent"] = cls.processes[c_pid]["mem"]
cls.details["cpu_percent"] = round(cls.processes[c_pid]["cpu"] * (1 if CONFIG.proc_per_core else THREADS))
else:
cls.details["cmdline"] = " ".join(cls.details["cmdline"]) or "[" + cls.details["name"] + "]"
cls.details["threads"] = f'{cls.details["num_threads"]}'
cls.details["cpu_percent"] = round(cls.det_cpu)
cls.details["killed"] = False
if SYSTEM == "MacOS":
cls.details["cpu_num"] = -1
cls.details["io_counters"] = ""
if hasattr(cls.details["memory_info"], "rss"): cls.details["memory_bytes"] = floating_humanizer(cls.details["memory_info"].rss) # type: ignore
else: cls.details["memory_bytes"] = "? Bytes"
if isinstance(cls.details["create_time"], float):
uptime = timedelta(seconds=round(time()-cls.details["create_time"],0))
if uptime.days > 0: cls.details["uptime"] = f'{uptime.days}d {str(uptime).split(",")[1][:-3].strip()}'
else: cls.details["uptime"] = f'{uptime}'
else: cls.details["uptime"] = "??:??:??"
if cls.expand:
if cls.expand > 1 : cls.details["nice"] = f'{cls.details["nice"]}'
if SYSTEM == "BSD":
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_count"): cls.details["io_read"] = f'{cls.details["io_counters"].read_count}'
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_count"): cls.details["io_write"] = f'{cls.details["io_counters"].write_count}'
else: cls.details["io_write"] = "?"
else:
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_bytes"): cls.details["io_read"] = floating_humanizer(cls.details["io_counters"].read_bytes)
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_bytes"): cls.details["io_write"] = floating_humanizer(cls.details["io_counters"].write_bytes)
else: cls.details["io_write"] = "?"
if cls.expand > 4 : cls.details["terminal"] = f'{cls.details["terminal"]}'.replace("/dev/", "")
cls.details_cpu.append(cls.details["cpu_percent"])
mem = cls.details["memory_percent"]
if mem > 80: mem = round(mem)
elif mem > 60: mem = round(mem * 1.2)
elif mem > 30: mem = round(mem * 1.5)
elif mem > 10: mem = round(mem * 2)
elif mem > 5: mem = round(mem * 10)
else: mem = round(mem * 20)
cls.details_mem.append(mem)
if len(cls.details_cpu) > ProcBox.width: del cls.details_cpu[0]
if len(cls.details_mem) > ProcBox.width: del cls.details_mem[0]
@classmethod
def _tree(cls, sort_cmd, reverse: bool, proc_per_cpu: bool, search: str):
'''List all processess in a tree view with pid, name, threads, username, memory percent and cpu percent'''
out: Dict = {}
err: float = 0.0
det_cpu: float = 0.0
infolist: Dict = {}
cls.tree_counter += 1
tree = defaultdict(list)
n: int = 0
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt: return
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
else:
infolist[p.pid] = p.info
n += 1
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
def create_tree(pid: int, tree: defaultdict, indent: str = "", inindent: str = " ", found: bool = False, depth: int = 0, collapse_to: Union[None, int] = None):
nonlocal infolist, proc_per_cpu, search, out, det_cpu
name: str; threads: int; username: str; mem: float; cpu: float; collapse: bool = False
cont: bool = True
getinfo: Dict = {}
if cls.collect_interrupt: return
try:
name = psutil.Process(pid).name()
if name == "idle": return
except psutil.Error:
pass
cont = False
name = ""
if pid in infolist:
getinfo = infolist[pid]
if search and not found:
if cls.detailed and pid == cls.detailed_pid:
det_cpu = getinfo["cpu_percent"]
if "username" in getinfo and isinstance(getinfo["username"], float): getinfo["username"] = ""
if "cmdline" in getinfo and isinstance(getinfo["cmdline"], float): getinfo["cmdline"] = ""
for value in [ name, str(pid), getinfo.get("username", ""), " ".join(getinfo.get("cmdline", "")) ]:
for s in search.split(","):
if s.strip() in value:
found = True
break
else: continue
break
else: cont = False
if cont:
if getinfo:
if getinfo["num_threads"] == err: threads = 0
else: threads = getinfo["num_threads"]
if getinfo["username"] == err: username = ""
else: username = getinfo["username"]
cpu = getinfo["cpu_percent"] if proc_per_cpu else round(getinfo["cpu_percent"] / THREADS, 2)
mem = getinfo["memory_percent"]
if getinfo["cmdline"] == err: cmd = ""
else: cmd = " ".join(getinfo["cmdline"]) or "[" + getinfo["name"] + "]"
if CONFIG.proc_mem_bytes and hasattr(getinfo["memory_info"], "rss"):
mem_b = getinfo["memory_info"].rss
else:
mem_b = 0
else:
threads = mem_b = 0
username = ""
mem = cpu = 0.0
if pid in cls.collapsed:
collapse = cls.collapsed[pid]
else:
collapse = True if depth > CONFIG.tree_depth else False
cls.collapsed[pid] = collapse
if collapse_to and not search:
out[collapse_to]["threads"] += threads
out[collapse_to]["mem"] += mem
out[collapse_to]["mem_b"] += mem_b
out[collapse_to]["cpu"] += cpu
else:
if pid in tree and len(tree[pid]) > 0:
sign: str = "+" if collapse else "-"
inindent = inindent.replace(" ├─ ", "[" + sign + "]─").replace(" └─ ", "[" + sign + "]─")
out[pid] = {
"indent" : inindent,
"name": name,
"cmd" : cmd,
"threads" : threads,
"username" : username,
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu,
"depth" : depth,
}
if search: collapse = False
elif collapse and not collapse_to:
collapse_to = pid
if pid not in tree:
return
children = tree[pid][:-1]
for child in children:
create_tree(child, tree, indent + " │ ", indent + " ├─ ", found=found, depth=depth+1, collapse_to=collapse_to)
create_tree(tree[pid][-1], tree, indent + " ", indent + " └─ ", depth=depth+1, collapse_to=collapse_to)
create_tree(min(tree), tree)
cls.det_cpu = det_cpu
if cls.collect_interrupt: return
if cls.tree_counter >= 100:
cls.tree_counter = 0
for pid in list(cls.collapsed):
if not psutil.pid_exists(pid):
del cls.collapsed[pid]
cls.num_procs = len(out)
cls.processes = out.copy()
@classmethod
def sorting(cls, key: str):
index: int = CONFIG.sorting_options.index(CONFIG.proc_sorting) + (1 if key == "right" else -1)
if index >= len(CONFIG.sorting_options): index = 0
elif index < 0: index = len(CONFIG.sorting_options) - 1
CONFIG.proc_sorting = CONFIG.sorting_options[index]
if "left" in Key.mouse: del Key.mouse["left"]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
@classmethod
def _draw(cls):
ProcBox._draw_fg()
class Menu:
'''Holds all menus'''
active: bool = False
close: bool = False
resized: bool = True
menus: Dict[str, Dict[str, str]] = {}
menu_length: Dict[str, int] = {}
background: str = ""
for name, menu in MENUS.items():
menu_length[name] = len(menu["normal"][0])
menus[name] = {}
for sel in ["normal", "selected"]:
menus[name][sel] = ""
for i in range(len(menu[sel])):
menus[name][sel] += Fx.trans(f'{Color.fg(MENU_COLORS[sel][i])}{menu[sel][i]}')
if i < len(menu[sel]) - 1: menus[name][sel] += f'{Mv.d(1)}{Mv.l(len(menu[sel][i]))}'
@classmethod
def main(cls):
out: str = ""
banner: str = ""
redraw: bool = True
key: str = ""
mx: int = 0
my: int = 0
skip: bool = False
mouse_over: bool = False
mouse_items: Dict[str, Dict[str, int]] = {}
cls.active = True
cls.resized = True
menu_names: List[str] = list(cls.menus.keys())
menu_index: int = 0
menu_current: str = menu_names[0]
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
while not cls.close:
key = ""
if cls.resized:
banner = (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
if UpdateChecker.version != VERSION:
banner += f'{Mv.to(Term.height, 1)}{Fx.b}{THEME.title}New release {UpdateChecker.version} available at https://github.com/aristocratos/bpytop{Fx.ub}{Term.fg}'
cy = 0
for name, menu in cls.menus.items():
ypos = Term.height // 2 - 2 + cy
xpos = Term.width // 2 - (cls.menu_length[name] // 2)
mouse_items[name] = { "x1" : xpos, "x2" : xpos + cls.menu_length[name] - 1, "y1" : ypos, "y2" : ypos + 2 }
cy += 3
redraw = True
cls.resized = False
if redraw:
out = ""
for name, menu in cls.menus.items():
out += f'{Mv.to(mouse_items[name]["y1"], mouse_items[name]["x1"])}{menu["selected" if name == menu_current else "normal"]}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{banner}{out}')
skip = redraw = False
if Key.input_wait(Timer.left(), mouse=True):
if Key.mouse_moved():
mx, my = Key.get_mouse()
for name, pos in mouse_items.items():
if mx >= pos["x1"] and mx <= pos["x2"] and my >= pos["y1"] and my <= pos["y2"]:
mouse_over = True
if name != menu_current:
menu_current = name
menu_index = menu_names.index(name)
redraw = True
break
else:
mouse_over = False
else:
key = Key.get()
if key == "mouse_click" and not mouse_over:
key = "M"
if key == "q":
clean_quit()
elif key in ["escape", "M"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "shift_tab"]:
menu_index -= 1
if menu_index < 0: menu_index = len(menu_names) - 1
menu_current = menu_names[menu_index]
redraw = True
elif key in ["down", "mouse_scroll_down", "tab"]:
menu_index += 1
if menu_index > len(menu_names) - 1: menu_index = 0
menu_current = menu_names[menu_index]
redraw = True
elif key == "enter" or (key == "mouse_click" and mouse_over):
if menu_current == "quit":
clean_quit()
elif menu_current == "options":
cls.options()
cls.resized = True
elif menu_current == "help":
cls.help()
cls.resized = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(1)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def help(cls):
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = True if cls.active else False
cls.active = True
cls.resized = True
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
help_items: Dict[str, str] = {
"(Mouse 1)" : "Clicks buttons and selects in process list.",
"Selected (Mouse 1)" : "Show detailed information for selected process.",
"(Mouse scroll)" : "Scrolls any scrollable list/text under cursor.",
"(Esc, shift+m)" : "Toggles main menu.",
"(m)" : "Change current view mode, order full->proc->stat.",
"(F2, o)" : "Shows options.",
"(F1, h)" : "Shows this window.",
"(ctrl+z)" : "Sleep program and put in background.",
"(ctrl+c, q)" : "Quits program.",
"(+) / (-)" : "Add/Subtract 100ms to/from update timer.",
"(Up) (Down)" : "Select in process list.",
"(Enter)" : "Show detailed information for selected process.",
"(Spacebar)" : "Expand/collapse the selected process in tree view.",
"(Pg Up) (Pg Down)" : "Jump 1 page in process list.",
"(Home) (End)" : "Jump to first or last page in process list.",
"(Left) (Right)" : "Select previous/next sorting column.",
"(b) (n)" : "Select previous/next network device.",
"(z)" : "Toggle totals reset for current network device",
"(a)" : "Toggle auto scaling for the network graphs.",
"(y)" : "Toggle synced scaling mode for network graphs.",
"(f)" : "Input a string to filter processes with.",
"(c)" : "Toggle per-core cpu usage of processes.",
"(r)" : "Reverse sorting order in processes box.",
"(e)" : "Toggle processes tree view.",
"(delete)" : "Clear any entered filter.",
"Selected (T, t)" : "Terminate selected process with SIGTERM - 15.",
"Selected (K, k)" : "Kill selected process with SIGKILL - 9.",
"Selected (I, i)" : "Interrupt selected process with SIGINT - 2.",
"_1" : " ",
"_2" : "For bug reporting and project updates, visit:",
"_3" : "https://github.com/aristocratos/bpytop",
}
while not cls.close:
key = ""
if cls.resized:
y = 8 if Term.height < len(help_items) + 10 else Term.height // 2 - len(help_items) // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-36
h, w = Term.height-2-y, 72
if len(help_items) > h:
pages = ceil(len(help_items) / h)
else:
h = len(help_items)
pages = 0
page = 1
out_misc += create_box(x, y, w, h+3, "help", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
if pages:
out += (f'{Mv.to(y, x+56)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, (keys, desc) in enumerate(help_items.items()):
if pages and n < (page - 1) * h: continue
out += f'{Mv.to(y+2+cy, x+1)}{Fx.b}{("" if keys.startswith("_") else keys):^20.20}{Fx.ub}{desc:50.50}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+2+cy+i, x+1)}{" " * (w-2)}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
if key == "mouse_click":
mx, my = Key.get_mouse()
if mx >= x and mx < x + w and my >= y and my < y + h + 3:
if pages and my == y and mx > x + 56 and mx < x + 61:
key = "up"
elif pages and my == y and mx > x + 63 and mx < x + 68:
key = "down"
else:
key = "escape"
if key == "q":
clean_quit()
elif key in ["escape", "M", "enter", "backspace", "h", "f1"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
redraw = True
elif key in ["down", "mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
redraw = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(1)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def options(cls):
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = True if cls.active else False
cls.active = True
cls.resized = True
d_quote: str
inputting: bool = False
input_val: str = ""
global ARG_MODE
Theme.refresh()
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
option_items: Dict[str, List[str]] = {
"color_theme" : [
'Set color theme.',
'',
'Choose from all theme files in',
'"/usr/[local/]share/bpytop/themes" and',
'"~/.config/bpytop/themes".',
'',
'"Default" for builtin default theme.',
'User themes are prefixed by a plus sign "+".',
'',
'For theme updates see:',
'https://github.com/aristocratos/bpytop'],
"theme_background" : [
'If the theme set background should be shown.',
'',
'Set to False if you want terminal background',
'transparency.'],
"view_mode" : [
'Set bpytop view mode.',
'',
'"full" for everything shown.',
'"proc" for cpu stats and processes.',
'"stat" for cpu, mem, disks and net stats shown.'],
"update_ms" : [
'Update time in milliseconds.',
'',
'Recommended 2000 ms or above for better sample',
'times for graphs.',
'',
'Min value: 100 ms',
'Max value: 86400000 ms = 24 hours.'],
"proc_sorting" : [
'Processes sorting option.',
'',
'Possible values: "pid", "program", "arguments",',
'"threads", "user", "memory", "cpu lazy" and',
'"cpu responsive".',
'',
'"cpu lazy" updates top process over time,',
'"cpu responsive" updates top process directly.'],
"proc_reversed" : [
'Reverse processes sorting order.',
'',
'True or False.'],
"proc_tree" : [
'Processes tree view.',
'',
'Set true to show processes grouped by parents,',
'with lines drawn between parent and child',
'process.'],
"tree_depth" : [
'Process tree auto collapse depth.',
'',
'Sets the depth were the tree view will auto',
'collapse processes at.'],
"proc_colors" : [
'Enable colors in process view.',
'',
'Uses the cpu graph gradient colors.'],
"proc_gradient" : [
'Enable process view gradient fade.',
'',
'Fades from top or current selection.',
'Max fade value is equal to current themes',
'"inactive_fg" color value.'],
"proc_per_core" : [
'Process usage per core.',
'',
'If process cpu usage should be of the core',
'it\'s running on or usage of the total',
'available cpu power.',
'',
'If true and process is multithreaded',
'cpu usage can reach over 100%.'],
"proc_mem_bytes" : [
'Show memory as bytes in process list.',
' ',
'True or False.'
],
"check_temp" : [
'Enable cpu temperature reporting.',
'',
'True or False.'],
"draw_clock" : [
'Draw a clock at top of screen.',
'',
'Formatting according to strftime, empty',
'string to disable.',
'',
'Examples:',
'"%X" locale HH:MM:SS',
'"%H" 24h hour, "%I" 12h hour',
'"%M" minute, "%S" second',
'"%d" day, "%m" month, "%y" year'],
"background_update" : [
'Update main ui when menus are showing.',
'',
'True or False.',
'',
'Set this to false if the menus is flickering',
'too much for a comfortable experience.'],
"custom_cpu_name" : [
'Custom cpu model name in cpu percentage box.',
'',
'Empty string to disable.'],
"disks_filter" : [
'Optional filter for shown disks.',
'',
'Should be last folder in path of a mountpoint,',
'"root" replaces "/", separate multiple values',
'with a comma.',
'Begin line with "exclude=" to change to exclude',
'filter.',
'Oterwise defaults to "most include" filter.',
'',
'Example: disks_filter="exclude=boot, home"'],
"mem_graphs" : [
'Show graphs for memory values.',
'',
'True or False.'],
"show_swap" : [
'If swap memory should be shown in memory box.',
'',
'True or False.'],
"swap_disk" : [
'Show swap as a disk.',
'',
'Ignores show_swap value above.',
'Inserts itself after first disk.'],
"show_disks" : [
'Split memory box to also show disks.',
'',
'True or False.'],
"net_download" : [
'Fixed network graph download value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_upload" : [
'Fixed network graph upload value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_auto" : [
'Start in network graphs auto rescaling mode.',
'',
'Ignores any values set above at start and',
'rescales down to 10KibiBytes at the lowest.',
'',
'True or False.'],
"net_sync" : [
'Network scale sync.',
'',
'Syncs the scaling for download and upload to',
'whichever currently has the highest scale.',
'',
'True or False.'],
"net_color_fixed" : [
'Set network graphs color gradient to fixed.',
'',
'If True the network graphs color is based',
'on the total bandwidth usage instead of',
'the current autoscaling.',
'',
'The bandwidth usage is based on the',
'"net_download" and "net_upload" values set',
'above.'],
"show_battery" : [
'Show battery stats.',
'',
'Show battery stats in the top right corner',
'if a battery is present.'],
"show_init" : [
'Show init screen at startup.',
'',
'The init screen is purely cosmetical and',
'slows down start to show status messages.'],
"update_check" : [
'Check for updates at start.',
'',
'Checks for latest version from:',
'https://github.com/aristocratos/bpytop'],
"log_level" : [
'Set loglevel for error.log',
'',
'Levels are: "ERROR" "WARNING" "INFO" "DEBUG".',
'The level set includes all lower levels,',
'i.e. "DEBUG" will show all logging info.']
}
option_len: int = len(option_items) * 2
sorting_i: int = CONFIG.sorting_options.index(CONFIG.proc_sorting)
loglevel_i: int = CONFIG.log_levels.index(CONFIG.log_level)
view_mode_i: int = CONFIG.view_modes.index(CONFIG.view_mode)
color_i: int
while not cls.close:
key = ""
if cls.resized:
y = 9 if Term.height < option_len + 10 else Term.height // 2 - option_len // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-38
x2 = x + 27
h, w, w2 = Term.height-2-y, 26, 50
h -= h % 2
color_i = list(Theme.themes).index(THEME.current)
if option_len > h:
pages = ceil(option_len / h)
else:
h = option_len
pages = 0
page = 1
selected_int = 0
out_misc += create_box(x, y, w, h+2, "options", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
selected = list(option_items)[selected_int]
if pages:
out += (f'{Mv.to(y+h+1, x+11)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
#out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, opt in enumerate(option_items):
if pages and n < (page - 1) * ceil(h / 2): continue
value = getattr(CONFIG, opt)
t_color = f'{THEME.selected_bg}{THEME.selected_fg}' if opt == selected else f'{THEME.title}'
v_color = "" if opt == selected else f'{THEME.title}'
d_quote = '"' if isinstance(value, str) else ""
if opt == "color_theme":
counter = f' {color_i + 1}/{len(Theme.themes)}'
elif opt == "proc_sorting":
counter = f' {sorting_i + 1}/{len(CONFIG.sorting_options)}'
elif opt == "log_level":
counter = f' {loglevel_i + 1}/{len(CONFIG.log_levels)}'
elif opt == "view_mode":
counter = f' {view_mode_i + 1}/{len(CONFIG.view_modes)}'
else:
counter = ""
out += f'{Mv.to(y+1+cy, x+1)}{t_color}{Fx.b}{opt.replace("_", " ").capitalize() + counter:^24.24}{Fx.ub}{Mv.to(y+2+cy, x+1)}{v_color}'
if opt == selected:
if isinstance(value, bool) or opt in ["color_theme", "proc_sorting", "log_level", "view_mode"]:
out += f'{t_color} {Symbol.left}{v_color}{d_quote + str(value) + d_quote:^20.20}{t_color}{Symbol.right} '
elif inputting:
out += f'{str(input_val)[-17:] + Fx.bl + "█" + Fx.ubl + "" + Symbol.enter:^33.33}'
else:
out += ((f'{t_color} {Symbol.left}{v_color}' if type(value) is int else " ") +
f'{str(value) + " " + Symbol.enter:^20.20}' + (f'{t_color}{Symbol.right} ' if type(value) is int else " "))
else:
out += f'{d_quote + str(value) + d_quote:^24.24}'
out += f'{Term.bg}'
if opt == selected:
h2 = len(option_items[opt]) + 2
y2 = y + (selected_int * 2) - ((page-1) * h)
if y2 + h2 > Term.height: y2 = Term.height - h2
out += f'{create_box(x2, y2, w2, h2, "description", line_color=THEME.div_line)}{THEME.main_fg}'
for n, desc in enumerate(option_items[opt]):
out += f'{Mv.to(y2+1+n, x2+2)}{desc:.48}'
cy += 2
if cy >= h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+1+cy+i, x+1)}{" " * (w-2)}'
if not skip or redraw:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
redraw = True
has_sel = False
if key == "mouse_click" and not inputting:
mx, my = Key.get_mouse()
if mx > x and mx < x + w and my > y and my < y + h + 2:
mouse_sel = ceil((my - y) / 2) - 1 + ceil((page-1) * (h / 2))
if pages and my == y+h+1 and mx > x+11 and mx < x+16:
key = "page_up"
elif pages and my == y+h+1 and mx > x+19 and mx < x+24:
key = "page_down"
elif my == y+h+1:
pass
elif mouse_sel == selected_int:
if mx < x + 6:
key = "left"
elif mx > x + 19:
key = "right"
else:
key = "enter"
elif mouse_sel < len(option_items):
selected_int = mouse_sel
has_sel = True
else:
key = "escape"
if inputting:
if key in ["escape", "mouse_click"]:
inputting = False
elif key == "enter":
inputting = False
if str(getattr(CONFIG, selected)) != input_val:
if selected == "update_ms":
if not input_val or int(input_val) < 100:
CONFIG.update_ms = 100
elif int(input_val) > 86399900:
CONFIG.update_ms = 86399900
else:
CONFIG.update_ms = int(input_val)
elif selected == "tree_depth":
if not input_val or int(input_val) < 0:
CONFIG.tree_depth = 0
else:
CONFIG.tree_depth = int(input_val)
ProcCollector.collapsed = {}
elif isinstance(getattr(CONFIG, selected), str):
setattr(CONFIG, selected, input_val)
if selected.startswith("net_"):
NetCollector.net_min = {"download" : -1, "upload" : -1}
elif selected == "draw_clock":
Box.clock_on = True if len(CONFIG.draw_clock) > 0 else False
if not Box.clock_on: Draw.clear("clock", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key == "backspace" and len(input_val) > 0:
input_val = input_val[:-1]
elif key == "delete":
input_val = ""
elif isinstance(getattr(CONFIG, selected), str) and len(key) == 1:
input_val += key
elif isinstance(getattr(CONFIG, selected), int) and key.isdigit():
input_val += key
elif key == "q":
clean_quit()
elif key in ["escape", "o", "M", "f2"]:
cls.close = True
break
elif key == "enter" and selected in ["update_ms", "disks_filter", "custom_cpu_name", "net_download", "net_upload", "draw_clock", "tree_depth"]:
inputting = True
input_val = str(getattr(CONFIG, selected))
elif key == "left" and selected == "update_ms" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key == "right" and selected == "update_ms" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "left" and selected == "tree_depth" and CONFIG.tree_depth > 0:
CONFIG.tree_depth -= 1
ProcCollector.collapsed = {}
elif key == "right" and selected == "tree_depth":
CONFIG.tree_depth += 1
ProcCollector.collapsed = {}
elif key in ["left", "right"] and isinstance(getattr(CONFIG, selected), bool):
setattr(CONFIG, selected, not getattr(CONFIG, selected))
if selected == "check_temp":
if CONFIG.check_temp:
CpuCollector.get_sensors()
else:
CpuCollector.sensor_method = ""
CpuCollector.got_sensors = False
if selected in ["net_auto", "net_color_fixed", "net_sync"]:
if selected == "net_auto": NetCollector.auto_min = CONFIG.net_auto
NetBox.redraw = True
if selected == "theme_background":
Term.bg = THEME.main_bg if CONFIG.theme_background else "\033[49m"
Draw.now(Term.bg)
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "color_theme" and len(Theme.themes) > 1:
if key == "left":
color_i -= 1
if color_i < 0: color_i = len(Theme.themes) - 1
elif key == "right":
color_i += 1
if color_i > len(Theme.themes) - 1: color_i = 0
CONFIG.color_theme = list(Theme.themes)[color_i]
THEME(CONFIG.color_theme)
Term.refresh(force=True)
Timer.finish()
elif key in ["left", "right"] and selected == "proc_sorting":
ProcCollector.sorting(key)
elif key in ["left", "right"] and selected == "log_level":
if key == "left":
loglevel_i -= 1
if loglevel_i < 0: loglevel_i = len(CONFIG.log_levels) - 1
elif key == "right":
loglevel_i += 1
if loglevel_i > len(CONFIG.log_levels) - 1: loglevel_i = 0
CONFIG.log_level = CONFIG.log_levels[loglevel_i]
errlog.setLevel(getattr(logging, CONFIG.log_level))
errlog.info(f'Loglevel set to {CONFIG.log_level}')
elif key in ["left", "right"] and selected == "view_mode":
if key == "left":
view_mode_i -= 1
if view_mode_i < 0: view_mode_i = len(CONFIG.view_modes) - 1
elif key == "right":
view_mode_i += 1
if view_mode_i > len(CONFIG.view_modes) - 1: view_mode_i = 0
CONFIG.view_mode = CONFIG.view_modes[view_mode_i]
Box.proc_mode = True if CONFIG.view_mode == "proc" else False
Box.stat_mode = True if CONFIG.view_mode == "stat" else False
if ARG_MODE:
ARG_MODE = ""
Draw.clear(saved=True)
Term.refresh(force=True)
cls.resized = False
elif key == "up":
selected_int -= 1
if selected_int < 0: selected_int = len(option_items) - 1
page = floor(selected_int * 2 / h) + 1
elif key == "down":
selected_int += 1
if selected_int > len(option_items) - 1: selected_int = 0
page = floor(selected_int * 2 / h) + 1
elif key in ["mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
selected_int = (page-1) * ceil(h / 2)
elif key in ["mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
selected_int = (page-1) * ceil(h / 2)
elif has_sel:
pass
else:
redraw = False
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(1)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
class Timer:
timestamp: float
return_zero = False
@classmethod
def stamp(cls):
cls.timestamp = time()
@classmethod
def not_zero(cls) -> bool:
if cls.return_zero:
cls.return_zero = False
return False
if cls.timestamp + (CONFIG.update_ms / 1000) > time():
return True
else:
return False
@classmethod
def left(cls) -> float:
return cls.timestamp + (CONFIG.update_ms / 1000) - time()
@classmethod
def finish(cls):
cls.return_zero = True
cls.timestamp = time() - (CONFIG.update_ms / 1000)
Key.break_wait()
class UpdateChecker:
version: str = VERSION
thread: threading.Thread
@classmethod
def run(cls):
cls.thread = threading.Thread(target=cls._checker)
cls.thread.start()
@classmethod
def _checker(cls):
try:
with urllib.request.urlopen("https://github.com/aristocratos/bpytop/raw/master/bpytop.py", timeout=5) as source: # type: ignore
for line in source:
line = line.decode("utf-8")
if line.startswith("VERSION: str ="):
cls.version = line[(line.index("=")+1):].strip('" \n')
break
except Exception as e:
errlog.exception(f'{e}')
else:
if cls.version != VERSION and which("notify-send"):
try:
subprocess.run(["notify-send", "-u", "normal", "BpyTop Update!",
f'New version of BpyTop available!\nCurrent version: {VERSION}\nNew version: {cls.version}\nDownload at github.com/aristocratos/bpytop',
"-i", "update-notifier", "-t", "10000"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
errlog.exception(f'{e}')
class Init:
running: bool = True
initbg_colors: List[str] = []
initbg_data: List[int]
initbg_up: Graph
initbg_down: Graph
resized = False
@classmethod
def start(cls):
Draw.buffer("init", z=1)
Draw.buffer("initbg", z=10)
for i in range(51):
for _ in range(2): cls.initbg_colors.append(Color.fg(i, i, i))
Draw.buffer("banner", (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(11)}{Colors.black_bg}{Colors.default}'
f'{Fx.b}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}{Color.fg("#50")}'), z=2)
for _i in range(7):
perc = f'{str(round((_i + 1) * 14 + 2)) + "%":>5}'
Draw.buffer("+banner", f'{Mv.to(Term.height // 2 - 2 + _i, Term.width // 2 - 28)}{Fx.trans(perc)}{Symbol.v_line}')
Draw.out("banner")
Draw.buffer("+init!", f'{Color.fg("#cc")}{Fx.b}{Mv.to(Term.height // 2 - 2, Term.width // 2 - 21)}{Mv.save}')
cls.initbg_data = [randint(0, 100) for _ in range(Term.width * 2)]
cls.initbg_up = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=True)
cls.initbg_down = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=False)
@classmethod
def success(cls):
if not CONFIG.show_init or cls.resized: return
cls.draw_bg(5)
Draw.buffer("+init!", f'{Mv.restore}{Symbol.ok}\n{Mv.r(Term.width // 2 - 22)}{Mv.save}')
@staticmethod
def fail(err):
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Symbol.fail}')
sleep(2)
errlog.exception(f'{err}')
clean_quit(1, errmsg=f'Error during init! See {CONFIG_DIR}/error.log for more information.')
@classmethod
def draw_bg(cls, times: int = 5):
for _ in range(times):
sleep(0.05)
x = randint(0, 100)
Draw.buffer("initbg", f'{Fx.ub}{Mv.to(0, 0)}{cls.initbg_up(x)}{Mv.to(Term.height // 2, 0)}{cls.initbg_down(x)}')
Draw.out("initbg", "banner", "init")
@classmethod
def done(cls):
cls.running = False
if not CONFIG.show_init: return
if cls.resized:
Draw.now(Term.clear)
else:
cls.draw_bg(10)
Draw.clear("initbg", "banner", "init", saved=True)
if cls.resized: return
del cls.initbg_up, cls.initbg_down, cls.initbg_data, cls.initbg_colors
#? Functions ------------------------------------------------------------------------------------->
def get_cpu_name() -> str:
'''Fetch a suitable CPU identifier from the CPU model name string'''
name: str = ""
nlist: List = []
command: str = ""
cmd_out: str = ""
rem_line: str = ""
if SYSTEM == "Linux":
command = "cat /proc/cpuinfo"
rem_line = "model name"
elif SYSTEM == "MacOS":
command ="sysctl -n machdep.cpu.brand_string"
elif SYSTEM == "BSD":
command ="sysctl hw.model"
rem_line = "hw.model"
try:
cmd_out = subprocess.check_output("LANG=C " + command, shell=True, universal_newlines=True)
except:
pass
if rem_line:
for line in cmd_out.split("\n"):
if rem_line in line:
name = re.sub( ".*" + rem_line + ".*:", "", line,1).lstrip()
else:
name = cmd_out
nlist = name.split(" ")
try:
if "Xeon" in name and "CPU" in name:
name = nlist[nlist.index("CPU")+(-1 if name.endswith("CPU") else 1)]
elif "Ryzen" in name:
name = " ".join(nlist[nlist.index("Ryzen"):nlist.index("Ryzen")+3])
elif "Duo" in name and "@" in name:
name = " ".join(nlist[:nlist.index("@")])
elif "CPU" in name and not nlist[0] == "CPU":
name = nlist[nlist.index("CPU")-1]
except:
pass
name = " ".join(name.split())
return name.replace("Processor ", "").replace("CPU ", "").replace("(R)", "").replace("(TM)", "").replace("Intel ", "")
def create_box(x: int = 0, y: int = 0, width: int = 0, height: int = 0, title: str = "", title2: str = "", line_color: Color = None, title_color: Color = None, fill: bool = True, box = None) -> str:
'''Create a box from a box object or by given arguments'''
out: str = f'{Term.fg}{Term.bg}'
if not line_color: line_color = THEME.div_line
if not title_color: title_color = THEME.title
#* Get values from box class if given
if box:
x = box.x
y = box.y
width = box.width
height =box.height
title = box.name
hlines: Tuple[int, int] = (y, y + height - 1)
out += f'{line_color}'
#* Draw all horizontal lines
for hpos in hlines:
out += f'{Mv.to(hpos, x)}{Symbol.h_line * (width - 1)}'
#* Draw all vertical lines and fill if enabled
for hpos in range(hlines[0]+1, hlines[1]):
out += f'{Mv.to(hpos, x)}{Symbol.v_line}{" " * (width-2) if fill else Mv.r(width-2)}{Symbol.v_line}'
#* Draw corners
out += f'{Mv.to(y, x)}{Symbol.left_up}\
{Mv.to(y, x + width - 1)}{Symbol.right_up}\
{Mv.to(y + height - 1, x)}{Symbol.left_down}\
{Mv.to(y + height - 1, x + width - 1)}{Symbol.right_down}'
#* Draw titles if enabled
if title:
out += f'{Mv.to(y, x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title}{Fx.ub}{line_color}{Symbol.title_right}'
if title2:
out += f'{Mv.to(hlines[1], x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title2}{Fx.ub}{line_color}{Symbol.title_right}'
return f'{out}{Term.fg}{Mv.to(y + 1, x + 1)}'
def now_sleeping(signum, frame):
"""Reset terminal settings and stop background input read before putting to sleep"""
Key.stop()
Collector.stop()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
os.kill(os.getpid(), signal.SIGSTOP)
def now_awake(signum, frame):
"""Set terminal settings and restart background input read"""
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Key.start()
Term.refresh()
Box.calc_sizes()
Box.draw_bg()
Collector.start()
def quit_sigint(signum, frame):
"""SIGINT redirection to clean_quit()"""
clean_quit()
def clean_quit(errcode: int = 0, errmsg: str = "", thread: bool = False):
"""Stop background input read, save current config and reset terminal settings before quitting"""
global THREAD_ERROR
if thread:
THREAD_ERROR = errcode
interrupt_main()
return
if THREAD_ERROR: errcode = THREAD_ERROR
Key.stop()
Collector.stop()
if not errcode: CONFIG.save_config()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
if errcode == 0:
errlog.info(f'Exiting. Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
else:
errlog.warning(f'Exiting with errorcode ({errcode}). Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
if not errmsg: errmsg = f'Bpytop exited with errorcode ({errcode}). See {CONFIG_DIR}/error.log for more information!'
if errmsg: print(errmsg)
raise SystemExit(errcode)
def floating_humanizer(value: Union[float, int], bit: bool = False, per_second: bool = False, start: int = 0, short: bool = False) -> str:
'''Scales up in steps of 1024 to highest possible unit and returns string with unit suffixed
* bit=True or defaults to bytes
* start=int to set 1024 multiplier starting unit
* short=True always returns 0 decimals and shortens unit to 1 character
'''
out: str = ""
mult: int = 8 if bit else 1
selector: int = start
unit: Tuple[str, ...] = UNITS["bit"] if bit else UNITS["byte"]
if isinstance(value, float): value = round(value * 100 * mult)
elif value > 0: value *= 100 * mult
else: value = 0
while len(f'{value}') > 5 and value >= 102400:
value >>= 10
if value < 100:
out = f'{value}'
break
selector += 1
else:
if len(f'{value}') < 5 and len(f'{value}') >= 2 and selector > 0:
decimals = 5 - len(f'{value}')
out = f'{value}'[:-2] + "." + f'{value}'[-decimals:]
elif len(f'{value}') >= 2:
out = f'{value}'[:-2]
else:
out = f'{value}'
if short:
out = out.split(".")[0]
if len(out) > 3:
out = f'{int(out[0]) + 1}'
selector += 1
out += f'{"" if short else " "}{unit[selector][0] if short else unit[selector]}'
if per_second: out += "ps" if bit else "/s"
return out
def units_to_bytes(value: str) -> int:
if not value: return 0
out: int = 0
mult: int = 0
bit: bool = False
value_i: int = 0
units: Dict[str, int] = {"k" : 1, "m" : 2, "g" : 3}
try:
if value.lower().endswith("s"):
value = value[:-1]
if value.lower().endswith("bit"):
bit = True
value = value[:-3]
elif value.lower().endswith("byte"):
value = value[:-4]
if value[-1].lower() in units:
mult = units[value[-1].lower()]
value = value[:-1]
if "." in value and value.replace(".", "").isdigit():
if mult > 0:
value_i = round(float(value) * 1024)
mult -= 1
else:
value_i = round(float(value))
elif value.isdigit():
value_i = int(value)
if bit: value_i = round(value_i / 8)
out = int(value_i) << (10 * mult)
except ValueError:
out = 0
return out
def min_max(value: int, min_value: int=0, max_value: int=100) -> int:
return max(min_value, min(value, max_value))
def process_keys():
mouse_pos: Tuple[int, int] = (0, 0)
filtered: bool = False
global ARG_MODE
while Key.has_key():
key = Key.get()
if key in ["mouse_scroll_up", "mouse_scroll_down", "mouse_click"]:
mouse_pos = Key.get_mouse()
if mouse_pos[0] >= ProcBox.x and mouse_pos[1] >= ProcBox.current_y + 1 and mouse_pos[1] < ProcBox.current_y + ProcBox.current_h - 1:
pass
elif key == "mouse_click":
key = "mouse_unselect"
else:
key = "_null"
if ProcBox.filtering:
if key in ["enter", "mouse_click", "mouse_unselect"]:
ProcBox.filtering = False
Collector.collect(ProcCollector, redraw=True, only_draw=True)
continue
elif key in ["escape", "delete"]:
ProcCollector.search_filter = ""
ProcBox.filtering = False
elif len(key) == 1:
ProcCollector.search_filter += key
elif key == "backspace" and len(ProcCollector.search_filter) > 0:
ProcCollector.search_filter = ProcCollector.search_filter[:-1]
else:
continue
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
if filtered: Collector.collect_done.wait(0.1)
filtered = True
continue
if key == "_null":
continue
elif key == "q":
clean_quit()
elif key == "+" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "-" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key in ["b", "n"]:
NetCollector.switch(key)
elif key in ["M", "escape"]:
Menu.main()
elif key in ["o", "f2"]:
Menu.options()
elif key in ["h", "f1"]:
Menu.help()
elif key == "z":
NetCollector.reset = not NetCollector.reset
Collector.collect(NetCollector, redraw=True)
elif key == "y":
CONFIG.net_sync = not CONFIG.net_sync
Collector.collect(NetCollector, redraw=True)
elif key == "a":
NetCollector.auto_min = not NetCollector.auto_min
NetCollector.net_min = {"download" : -1, "upload" : -1}
Collector.collect(NetCollector, redraw=True)
elif key in ["left", "right"]:
ProcCollector.sorting(key)
elif key == " " and CONFIG.proc_tree and ProcBox.selected > 0:
if ProcBox.selected_pid in ProcCollector.collapsed:
ProcCollector.collapsed[ProcBox.selected_pid] = not ProcCollector.collapsed[ProcBox.selected_pid]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "e":
CONFIG.proc_tree = not CONFIG.proc_tree
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "r":
CONFIG.proc_reversed = not CONFIG.proc_reversed
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "c":
CONFIG.proc_per_core = not CONFIG.proc_per_core
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "g":
CONFIG.mem_graphs = not CONFIG.mem_graphs
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "s":
CONFIG.swap_disk = not CONFIG.swap_disk
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "f":
ProcBox.filtering = True
if not ProcCollector.search_filter: ProcBox.start = 0
Collector.collect(ProcCollector, redraw=True, only_draw=True)
elif key == "m":
if ARG_MODE:
ARG_MODE = ""
elif CONFIG.view_modes.index(CONFIG.view_mode) + 1 > len(CONFIG.view_modes) - 1:
CONFIG.view_mode = CONFIG.view_modes[0]
else:
CONFIG.view_mode = CONFIG.view_modes[(CONFIG.view_modes.index(CONFIG.view_mode) + 1)]
Box.proc_mode = True if CONFIG.view_mode == "proc" else False
Box.stat_mode = True if CONFIG.view_mode == "stat" else False
Draw.clear(saved=True)
Term.refresh(force=True)
elif key.lower() in ["t", "k", "i"] and (ProcBox.selected > 0 or ProcCollector.detailed):
pid: int = ProcBox.selected_pid if ProcBox.selected > 0 else ProcCollector.detailed_pid # type: ignore
if psutil.pid_exists(pid):
if key.lower() == "t": sig = signal.SIGTERM
elif key.lower() == "k": sig = signal.SIGKILL
elif key.lower() == "i": sig = signal.SIGINT
try:
os.kill(pid, sig)
except Exception as e:
errlog.error(f'Exception when sending signal {sig} to pid {pid}')
errlog.exception(f'{e}')
elif key == "delete" and ProcCollector.search_filter:
ProcCollector.search_filter = ""
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key == "enter":
if ProcBox.selected > 0 and ProcCollector.detailed_pid != ProcBox.selected_pid and psutil.pid_exists(ProcBox.selected_pid):
ProcCollector.detailed = True
ProcBox.last_selection = ProcBox.selected
ProcBox.selected = 0
ProcCollector.detailed_pid = ProcBox.selected_pid
ProcBox.resized = True
elif ProcCollector.detailed:
ProcBox.selected = ProcBox.last_selection
ProcBox.last_selection = 0
ProcCollector.detailed = False
ProcCollector.detailed_pid = None
ProcBox.resized = True
else:
continue
ProcCollector.details = {}
ProcCollector.details_cpu = []
ProcCollector.details_mem = []
Graphs.detailed_cpu = NotImplemented
Graphs.detailed_mem = NotImplemented
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key in ["up", "down", "mouse_scroll_up", "mouse_scroll_down", "page_up", "page_down", "home", "end", "mouse_click", "mouse_unselect"]:
ProcBox.selector(key, mouse_pos)
#? Pre main -------------------------------------------------------------------------------------->
CPU_NAME: str = get_cpu_name()
THEME: Theme
def main():
global THEME
Term.width = os.get_terminal_size().columns
Term.height = os.get_terminal_size().lines
#? Init -------------------------------------------------------------------------------------->
if DEBUG: TimeIt.start("Init")
#? Switch to alternate screen, clear screen, hide cursor, enable mouse reporting and disable input echo
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Term.refresh(force=True)
#? Start a thread checking for updates while running init
if CONFIG.update_check: UpdateChecker.run()
#? Draw banner and init status
if CONFIG.show_init and not Init.resized:
Init.start()
#? Load theme
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Loading theme and creating colors... ")}{Mv.save}')
try:
THEME = Theme(CONFIG.color_theme)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup boxes
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Doing some maths and drawing... ")}{Mv.save}')
try:
if CONFIG.check_temp: CpuCollector.get_sensors()
Box.calc_sizes()
Box.draw_bg(now=False)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup signal handlers for SIGSTP, SIGCONT, SIGINT and SIGWINCH
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Setting up signal handlers... ")}{Mv.save}')
try:
signal.signal(signal.SIGTSTP, now_sleeping) #* Ctrl-Z
signal.signal(signal.SIGCONT, now_awake) #* Resume
signal.signal(signal.SIGINT, quit_sigint) #* Ctrl-C
signal.signal(signal.SIGWINCH, Term.refresh) #* Terminal resized
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for reading keyboard input
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting input reader thread... ")}{Mv.save}')
try:
Key.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for data collection and drawing
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting data collection and drawer thread... ")}{Mv.save}')
try:
Collector.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Collect data and draw to buffer
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Collecting data and drawing... ")}{Mv.save}')
try:
Collector.collect(draw_now=False)
pass
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Draw to screen
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Finishing up... ")}{Mv.save}')
try:
Collector.collect_done.wait()
except Exception as e:
Init.fail(e)
else:
Init.success()
Init.done()
Term.refresh()
Draw.out(clear=True)
if CONFIG.draw_clock:
Box.clock_on = True
if DEBUG: TimeIt.stop("Init")
#? Main loop ------------------------------------------------------------------------------------->
def run():
while not False:
Term.refresh()
Timer.stamp()
while Timer.not_zero():
if Key.input_wait(Timer.left()):
process_keys()
Collector.collect()
#? Start main loop
try:
run()
except Exception as e:
errlog.exception(f'{e}')
clean_quit(1)
else:
#? Quit cleanly even if false starts being true...
clean_quit()
if __name__ == "__main__":
main()
| 37.335889 | 541 | 0.632835 |
aceaa9fae5b3cdcdcd5675e23544af10703bf652 | 50,170 | py | Python | bin/ranking.py | cskyan/bionlpsota | e950f2cfbce9d34547614e4962cbdb768f90a7f5 | [
"Apache-2.0"
] | null | null | null | bin/ranking.py | cskyan/bionlpsota | e950f2cfbce9d34547614e4962cbdb768f90a7f5 | [
"Apache-2.0"
] | null | null | null | bin/ranking.py | cskyan/bionlpsota | e950f2cfbce9d34547614e4962cbdb768f90a7f5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################################################
# Copyright (C) 2013-2021 by Caspar. All rights reserved.
# File Name: .py
# Author: Shankai Yan
# E-mail: dr.skyan@gmail.com
# Created Time: 2021-03-29 17:19:30
###########################################################################
#
import os, sys, ast, time, copy, random, operator, pickle, string, logging, itertools
from collections import OrderedDict
from optparse import OptionParser
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
from torch.utils.data.sampler import WeightedRandomSampler
from sklearn import metrics
from transformers import get_linear_schedule_with_warmup
from bionlp.nlp import enrich_txt_by_w2v
from bionlp.util import io, system
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
PAR_DIR = os.path.abspath(os.path.join(FILE_DIR, os.path.pardir))
sys.path.insert(0, PAR_DIR)
from modules.embedding import EmbeddingHead
from modules.ranking import SiameseRankTransformer
from util.dataset import EmbeddingPairDataset
CONFIG_FILE = os.path.join(PAR_DIR, 'etc', 'config.yaml')
DATA_PATH = os.path.join(os.path.expanduser('~'), 'data', 'bionlp')
SC=';;'
NUM_TRIM = 0
opts, args = {}, []
cfgr = None
def siamese_rank(dev_id=None):
'''Predict candidate labels from all available ones using pre-trained model and rank the predictions using siamese network, assuming that each label has a gold standard sample'''
print('### Siamese Rank Mode ###')
orig_task = opts.task
opts.task = opts.task + '_siamese'
# Prepare model related meta data
mdl_name = opts.model.split('_')[0].lower().replace(' ', '_')
common_cfg = cfgr('validate', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
config_kwargs = dict([(k, v) for k, v in opts.__dict__.items() if not k.startswith('_') and k not in set(['dataset', 'model', 'template']) and v is not None and type(v) is not function])
config = Configurable(opts.task, mdl_name, common_cfg=common_cfg, wsdir=PAR_DIR, sc=SC, **config_kwargs)
params = pr('LM', config.lm_params) if mdl_name != 'none' else {}
use_gpu = dev_id is not None
encode_func = config.encode_func
tokenizer = config.tknzr.from_pretrained(params['pretrained_vocab_path'] if 'pretrained_vocab_path' in params else config.lm_mdl_name) if config.tknzr else None
task_type = config.task_type
spcl_tkns = config.lm_tknz_extra_char if config.lm_tknz_extra_char else ['_@_', ' _$_', ' _#_']
special_tkns = (['start_tknids', 'clf_tknids', 'delim_tknids'], spcl_tkns[:3]) if task_type in ['entlmnt', 'sentsim'] else (['start_tknids', 'clf_tknids'], spcl_tkns[:2])
special_tknids = _adjust_encoder(mdl_name, tokenizer, config, special_tkns[1], ret_list=True)
special_tknids_args = dict(zip(special_tkns[0], special_tknids))
task_trsfm_kwargs = dict(list(zip(special_tkns[0], special_tknids))+[('model',opts.model), ('sentsim_func', opts.sentsim_func), ('seqlen',opts.maxlen)])
# Prepare task related meta data
task_path, task_dstype, task_cols, task_trsfm, task_extparms = opts.input if opts.input and os.path.isdir(os.path.join(DATA_PATH, opts.input)) else config.task_path, config.task_ds, config.task_col, config.task_trsfm, config.task_ext_params
trsfms = (task_trsfm[0] if len(task_trsfm) > 0 else [])
trsfms_kwargs = ([] if hasattr(config, 'embed_type') and config.embed_type else ([{'seqlen':opts.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}] if config.task_type=='nmt' else [{'seqlen':opts.maxlen, 'trimlbs':task_extparms.setdefault('trimlbs', False), 'required_special_tkns':['start_tknids', 'clf_tknids', 'delim_tknids'] if task_type in ['entlmnt', 'sentsim'] and (task_extparms.setdefault('sentsim_func', None) is None or not mdl_name.startswith('bert')) else ['start_tknids', 'clf_tknids'], 'special_tkns':special_tknids_args}, task_trsfm_kwargs, {'seqlen':opts.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}])) + (task_trsfm[1] if len(task_trsfm) >= 2 else [{}] * len(task_trsfm[0]))
ds_kwargs = {'sampw':opts.sample_weights, 'sampfrac':opts.sampfrac}
ds_kwargs.update({'ynormfunc':task_extparms.setdefault('ynormfunc', None)})
ext_params = dict([(k, getattr(opts, k)) if hasattr(opts, k) else (k, v) for k, v in config.clf_ext_params.items()])
if hasattr(config, 'embed_type') and config.embed_type: ext_params['embed_type'] = config.embed_type
task_params = dict([(k, getattr(opts, k)) if hasattr(opts, k) and getattr(opts, k) is not None else (k, v) for k, v in task_extparms.setdefault('mdlcfg', {}).items()])
print('Classifier hyper-parameters: %s' % ext_params)
print('Classifier task-related parameters: %s' % task_params)
if (opts.resume):
# Load model
clf, prv_optimizer, resume, chckpnt = load_model(opts.resume)
if opts.refresh:
print('Refreshing and saving the model with newest code...')
try:
save_model(clf, prv_optimizer, '%s_%s.pth' % (opts.task, opts.model))
except Exception as e:
print(e)
prv_task_params = copy.deepcopy(clf.task_params)
# Update parameters
clf.update_params(task_params=task_params, **ext_params)
clf.to_siamese()
if (use_gpu): clf = _handle_model(clf, dev_id=dev_id, distrb=opts.distrb)
optmzr_cls = config.optmzr if config.optmzr else (torch.optim.Adam, {}, None)
optimizer = optmzr_cls[0](clf.parameters(), lr=opts.lr, weight_decay=opts.wdecay, **optmzr_cls[1]) if opts.optim == 'adam' else torch.optim.SGD(clf.parameters(), lr=opts.lr, momentum=0.9)
if len(resume) > 0 and prv_optimizer: optimizer.load_state_dict(prv_optimizer.state_dict())
training_steps = int(len(train_ds) / opts.bsize) if hasattr(train_ds, '__len__') else opts.trainsteps
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=opts.wrmprop, num_training_steps=training_steps) if not opts.noschdlr and len(optmzr_cls) > 2 and optmzr_cls[2] and optmzr_cls[2] == 'linwarm' else None
print((optimizer, scheduler))
else:
# Build model
lm_model = gen_mdl(mdl_name, pretrained=True if type(opts.pretrained) is str and opts.pretrained.lower() == 'true' else opts.pretrained, use_gpu=use_gpu, distrb=opts.distrb, dev_id=dev_id) if mdl_name != 'none' else None
clf = gen_clf(opts.model, opts.encoder, lm_model=lm_model, constraints=opts.cnstrnts.split(',') if opts.cnstrnts else [], task_type=task_type, mlt_trnsfmr=True if task_type in ['entlmnt', 'sentsim'] and task_params.setdefault('sentsim_func', None) is not None else False, task_params=task_params, use_gpu=use_gpu, distrb=opts.distrb, dev_id=dev_id, **ext_params)
clf.to_siamese()
optmzr_cls = config.optmzr if config.optmzr else (torch.optim.Adam, {}, None)
optimizer = optmzr_cls[0](clf.parameters(), lr=opts.lr, weight_decay=opts.wdecay, **optmzr_cls[1]) if opts.optim == 'adam' else torch.optim.SGD(clf.parameters(), lr=opts.lr, momentum=0.9)
training_steps = int(len(train_ds) / opts.bsize) if hasattr(train_ds, '__len__') else opts.trainsteps
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=opts.wrmprop, num_training_steps=training_steps) if not opts.noschdlr and len(optmzr_cls) > 2 and optmzr_cls[2] and optmzr_cls[2] == 'linwarm' else None
print((optimizer, scheduler))
# Prepare data
print('Dataset path: %s' % os.path.join(DATA_PATH, task_path))
train_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'train_siamese.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms else None, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
if mdl_name.startswith('bert'): train_ds = MaskedLMIterDataset(train_ds) if isinstance(train_ds, BaseIterDataset) else MaskedLMDataset(train_ds)
lb_trsfm = [x['get_lb'] for x in task_trsfm[1] if 'get_lb' in x]
if (not opts.weight_class or task_type == 'sentsim'):
class_count = None
elif len(lb_trsfm) > 0:
lb_df = train_ds.df[task_cols['y']].apply(lb_trsfm[0])
class_count = np.array([[1 if lb in y else 0 for lb in train_ds.binlb.keys()] for y in lb_df]).sum(axis=0)
else:
lb_df = train_ds.df[task_cols['y']]
binlb = task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else train_ds.binlb
class_count = lb_df.value_counts()[binlb.keys()].values
if (class_count is None):
class_weights = None
sampler = None
else:
class_weights = torch.Tensor(1.0 / class_count)
class_weights /= class_weights.sum()
sampler = WeightedRandomSampler(weights=class_weights, num_samples=opts.bsize, replacement=True)
if type(dev_id) is list: class_weights = class_weights.repeat(len(dev_id))
train_loader = DataLoader(train_ds, batch_size=opts.bsize, shuffle=False, sampler=None, num_workers=opts.np, drop_last=opts.droplast)
dev_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'dev_siamese.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else train_ds.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
if mdl_name.startswith('bert'): dev_ds = MaskedLMIterDataset(dev_ds) if isinstance(dev_ds, BaseIterDataset) else MaskedLMDataset(dev_ds)
dev_loader = DataLoader(dev_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
test_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'test_siamese.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else train_ds.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
if mdl_name.startswith('bert'): test_ds = MaskedLMIterDataset(test_ds) if isinstance(test_ds, BaseIterDataset) else MaskedLMDataset(test_ds)
test_loader = DataLoader(test_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
# Training on doc/sent-pair datasets
train(clf, optimizer, train_loader, special_tknids_args, scheduler=scheduler, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), weights=class_weights, lmcoef=opts.lmcoef, clipmaxn=opts.clipmaxn, epochs=opts.epochs, earlystop=opts.earlystop, earlystop_delta=opts.es_delta, earlystop_patience=opts.es_patience, task_type=task_type, task_name=opts.task, mdl_name=opts.model, use_gpu=use_gpu, devq=dev_id, resume=resume if opts.resume else {})
# Evaluating on the doc/sent-pair dev and test sets
eval(clf, dev_loader, dev_ds.binlbr, special_tknids_args, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=opts.task, ds_name='dev_siamese', mdl_name=opts.model, use_gpu=use_gpu, ignored_label=task_extparms.setdefault('ignored_label', None))
eval(clf, test_loader, test_ds.binlbr, special_tknids_args, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=opts.task, ds_name='test_siamese', mdl_name=opts.model, use_gpu=use_gpu, ignored_label=task_extparms.setdefault('ignored_label', None))
# Adjust the model
clf_trnsfmr = SiameseRankTransformer(clf)
clf_trnsfmr.merge_siamese(tokenizer=tokenizer, encode_func=config.encode_func, trnsfm=[trsfms, {}, trsfms_kwargs], special_tknids_args=special_tknids_args, pad_val=task_extparms.setdefault('xpad_val', 0), topk=128, lbnotes='../lbnotes.csv')
# Recover the original task
opts.task = orig_task
clf.task_params = prv_task_params
# Prepare model related meta data
task_type = config.task_type
# Prepare task related meta data
task_path, task_dstype, task_cols, task_trsfm, task_extparms = opts.input if opts.input and os.path.isdir(os.path.join(DATA_PATH, opts.input)) else config.task_path, config.task_ds, config.task_col, config.task_trsfm, config.task_ext_params
trsfms = (task_trsfm[0] if len(task_trsfm) > 0 else [])
trsfms_kwargs = ([] if hasattr(config, 'embed_type') and config.embed_type else ([{'seqlen':opts.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}] if config.task_type=='nmt' else [{'seqlen':opts.maxlen, 'trimlbs':task_extparms.setdefault('trimlbs', False), 'required_special_tkns':['start_tknids', 'clf_tknids', 'delim_tknids'] if task_type in ['entlmnt', 'sentsim'] and (task_extparms.setdefault('sentsim_func', None) is None or not mdl_name.startswith('bert')) else ['start_tknids', 'clf_tknids'], 'special_tkns':special_tknids_args}, task_trsfm_kwargs, {'seqlen':opts.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}])) + (task_trsfm[1] if len(task_trsfm) >= 2 else [{}] * len(task_trsfm[0]))
# Prepare dev and test sets
del ds_kwargs['ynormfunc']
dev_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'dev.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else clf.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
if mdl_name.startswith('bert'): dev_ds = MaskedLMIterDataset(train_ds) if isinstance(train_ds, BaseIterDataset) else MaskedLMDataset(dev_ds)
dev_loader = DataLoader(dev_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
test_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'test.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else clf.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
if mdl_name.startswith('bert'): test_ds = MaskedLMIterDataset(train_ds) if isinstance(train_ds, BaseIterDataset) else MaskedLMDataset(test_ds)
test_loader = DataLoader(test_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
# Evaluation
eval(clf, dev_loader, dev_ds.binlbr, special_tknids_args, pad_val=task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=opts.task, ds_name='dev', mdl_name=opts.model, use_gpu=use_gpu, ignored_label=task_extparms.setdefault('ignored_label', None))
if opts.traindev: train(clf, optimizer, dev_loader, special_tknids_args, scheduler=scheduler, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), weights=class_weights, lmcoef=opts.lmcoef, clipmaxn=opts.clipmaxn, epochs=opts.epochs, earlystop=opts.earlystop, earlystop_delta=opts.es_delta, earlystop_patience=opts.es_patience, task_type=task_type, task_name=opts.task, mdl_name=opts.model, use_gpu=use_gpu, devq=dev_id)
eval(clf, test_loader, test_ds.binlbr, special_tknids_args, pad_val=task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=opts.task, ds_name='test', mdl_name=opts.model, use_gpu=use_gpu, ignored_label=task_extparms.setdefault('ignored_label', None))
def simsearch_smsrerank(dev_id=None):
print('### Search Siamese Re-rank Mode ###')
orig_task = opts.task
opts.task = opts.task + '_simsearch'
# Prepare model related meta data
mdl_name = opts.model.split('_')[0].lower().replace(' ', '_')
common_cfg = cfgr('validate', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
config_kwargs = dict([(k, v) for k, v in opts.__dict__.items() if not k.startswith('_') and k not in set(['dataset', 'model', 'template']) and v is not None and type(v) is not function])
config = Configurable(opts.task, mdl_name, common_cfg=common_cfg, wsdir=PAR_DIR, sc=SC, **config_kwargs)
params = pr('LM', config.lm_params) if mdl_name != 'none' else {}
use_gpu = dev_id is not None
encode_func = config.encode_func
tokenizer = config.tknzr.from_pretrained(params['pretrained_vocab_path'] if 'pretrained_vocab_path' in params else config.lm_mdl_name) if config.tknzr else None
task_type = 'sentsim' #TASK_TYPE_MAP[opts.task]
spcl_tkns = config.lm_tknz_extra_char if config.lm_tknz_extra_char else ['_@_', ' _$_', ' _#_']
special_tkns = (['start_tknids', 'clf_tknids', 'delim_tknids'], spcl_tkns[:3]) if task_type in ['entlmnt', 'sentsim'] else (['start_tknids', 'clf_tknids'], spcl_tkns[:2])
special_tknids = _adjust_encoder(mdl_name, tokenizer, config, special_tkns[1], ret_list=True)
special_tknids_args = dict(zip(special_tkns[0], special_tknids))
task_trsfm_kwargs = dict(list(zip(special_tkns[0], special_tknids))+[('model',opts.model), ('sentsim_func', opts.sentsim_func), ('seqlen',opts.maxlen)])
# Prepare task related meta data
task_path, task_dstype, task_cols, task_trsfm, task_extparms = opts.input if opts.input and os.path.isdir(os.path.join(DATA_PATH, opts.input)) else config.task_path, config.task_ds, config.task_col, config.task_trsfm, config.task_ext_params
trsfms = (task_trsfm[0] if len(task_trsfm) > 0 else [])
trsfms_kwargs = ([] if hasattr(config, 'embed_type') and config.embed_type else ([{'seqlen':opts.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}] if config.task_type=='nmt' else [{'seqlen':opts.maxlen, 'trimlbs':task_extparms.setdefault('trimlbs', False), 'required_special_tkns':['start_tknids', 'clf_tknids', 'delim_tknids'] if task_type in ['entlmnt', 'sentsim'] and (task_extparms.setdefault('sentsim_func', None) is None or not mdl_name.startswith('bert')) else ['start_tknids', 'clf_tknids'], 'special_tkns':special_tknids_args}, task_trsfm_kwargs, {'seqlen':opts.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}])) + (task_trsfm[1] if len(task_trsfm) >= 2 else [{}] * len(task_trsfm[0]))
ds_kwargs = {'sampw':opts.sample_weights, 'sampfrac':opts.sampfrac}
ds_kwargs.update({'ynormfunc':task_extparms.setdefault('ynormfunc', None)})
task_params = dict([(k, getattr(opts, k)) if hasattr(opts, k) and getattr(opts, k) is not None else (k, v) for k, v in task_extparms.setdefault('mdlcfg', {}).items()])
# Load model
clf, prv_optimizer, resume, chckpnt = load_model(opts.resume)
if opts.refresh:
print('Refreshing and saving the model with newest code...')
try:
save_model(clf, prv_optimizer, '%s_%s.pth' % (opts.task, opts.model))
except Exception as e:
print(e)
prv_task_params = copy.deepcopy(clf.task_params)
# Update parameters
clf.update_params(task_params=task_params)
clf.mlt_trnsfmr = False
if (use_gpu): clf = _handle_model(clf, dev_id=dev_id, distrb=opts.distrb)
model = config.shell(clf, tokenizer=tokenizer, encode_func=encode_func, transforms=trsfms, transforms_kwargs=trsfms_kwargs, special_tknids_args=special_tknids_args, pad_val=task_extparms.setdefault('xpad_val', 0))
# Prepare dev and test sets
print('Dataset path: %s' % os.path.join(DATA_PATH, task_path))
del ds_kwargs['ynormfunc']
dev_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'dev.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else clf.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
if mdl_name.startswith('bert'): dev_ds = MaskedLMIterDataset(dev_ds) if isinstance(dev_ds, BaseIterDataset) else MaskedLMDataset(dev_ds)
dev_loader = DataLoader(dev_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
test_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'test.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else clf.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
if mdl_name.startswith('bert'): test_ds = MaskedLMIterDataset(test_ds) if isinstance(test_ds, BaseIterDataset) else MaskedLMDataset(test_ds)
test_loader = DataLoader(test_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
lbnotes_fname, lb_embd_fname, lb_embd_raw_fname = 'lbnotes.csv', 'onto_embd.pkl', 'onto_embd_raw.pkl'
if os.path.exists(os.path.join(os.path.pardir, lb_embd_fname)):
with open(os.path.join(os.path.pardir, lb_embd_fname), 'rb') as fd:
lbembd = pickle.load(fd)
else:
lbnotes = pd.read_csv(os.path.join(os.path.pardir, lbnotes_fname), sep='\t', index_col='id')
lb_embd_raw_fname = './onto_embd_raw.pkl'
if os.path.exists(os.path.join(os.path.pardir, lb_embd_raw_fname)):
with open(os.path.join(os.path.pardir, lb_embd_raw_fname), 'rb') as fd:
lbembd_raw = pickle.load(fd)
else:
lb_corpus = lbnotes['text'].tolist()
lbembd_raw = model(lb_corpus, embedding_mode=True, bsize=opts.bsize).numpy()
if opts.do_tfidf:
from sklearn.feature_extraction.text import TfidfVectorizer
lb_vctrz = TfidfVectorizer()
lb_X = lb_vctrz.fit_transform(lb_corpus)
lbembd_raw = np.concatenate((lbembd_raw, lb_X), axis=1)
if opts.do_bm25:
from gensim.summarization.bm25 import get_bm25_weights
lb_bm25_X = np.array(get_bm25_weights(lb_corpus, n_jobs=opts.np))
lbembd_raw = np.concatenate((lbembd_raw, lb_bm25_X), axis=1)
with open(lb_embd_raw_fname, 'wb') as fd:
pickle.dump(lbembd_raw, fd)
lbnotes['embedding'] = [x for x in lbembd_raw]
lbembd = {}
# One label may has multiple notes
for gid, grp in lbnotes['embedding'].groupby('id'):
lbembd[gid] = grp.mean(axis=0) # averaged embedding
with open(lb_embd_fname, 'wb') as fd:
pickle.dump(lbembd, fd)
# Build label embedding index
lb_embd_idx_fname = 'onto_embd_idx.pkl'
if os.path.exists(os.path.join(os.path.pardir, lb_embd_idx_fname)):
with open(os.path.join(os.path.pardir, lb_embd_idx_fname), 'rb') as fd:
lbembd_idx, binlb, binlbr = pickle.load(fd)
else:
import faiss
binlb = dict([(k, i) for i, k in enumerate(lbembd.keys())])
binlbr = dict([(v, k) for k, v in binlb.items()])
dimension = next(x for x in lbembd.values()).shape[0]
lbembd_idx = faiss.IndexFlatL2(dimension)
lbembd_idx.add(np.stack(lbembd.values()))
# with open(lb_embd_idx_fname, 'wb') as fd:
# pickle.dump((lbembd_idx, binlb, binlbr), fd)
lbembd_idx = faiss.index_cpu_to_all_gpus(lbembd_idx)
import scipy.spatial.distance as spdist
embd_clf = EmbeddingHead(clf)
for prefix, df in zip(['dev', 'test'], [dev_ds.df, test_ds.df]):
# Calculate the doc/sentence embeddings
clf_h_cache_fname = '%s_clf_h.pkl' % prefix
if os.path.exists(os.path.join(os.path.pardir, clf_h_cache_fname)):
with open(os.path.join(os.path.pardir, clf_h_cache_fname), 'rb') as fd:
clf_h = pickle.load(fd)
else:
txt_corpus = df['text'].tolist()
clf_h = model(txt_corpus, embedding_mode=True, bsize=opts.bsize).numpy()
if opts.do_tfidf:
from sklearn.feature_extraction.text import TfidfVectorizer
txt_vctrz = TfidfVectorizer()
txt_X = txt_vctrz.fit_transform(txt_corpus)
clf_h = np.concatenate((clf_h, txt_X), axis=1)
if opts.do_bm25:
from gensim.summarization.bm25 import get_bm25_weights
txt_bm25_X = np.array(get_bm25_weights(txt_corpus, n_jobs=opts.np))
clf_h = np.concatenate((clf_h, txt_bm25_X), axis=1)
with open(clf_h_cache_fname, 'wb') as fd:
pickle.dump(clf_h, fd)
# Search the topk similar labels
D, I = lbembd_idx.search(clf_h, opts.topk)
cand_preds = [[binlbr[idx] for idx in indices] for indices in I]
cand_lbs = list(set(itertools.chain.from_iterable(cand_preds)))
cand_lbs_idx = dict([(lb, i) for i, lb in enumerate(cand_lbs)])
cand_embds = np.stack([lbembd[lb] for lb in cand_lbs])
pair_indices = [(i, cand_lbs_idx[lb]) for i, j in zip(range(len(clf_h)), range(len(cand_preds))) for lb in cand_preds[j]]
ds = EmbeddingPairDataset(clf_h, cand_embds, pair_indices)
ds_loader = DataLoader(ds, batch_size=opts.bsize, shuffle=False, sampler=None, num_workers=opts.np, drop_last=False)
preds = []
for step, batch in enumerate(tqdm(ds_loader, desc='[Totally %i pairs] Predicting pairs of embeddings' % len(ds))):
embd_pairs = batch[0].to('cuda') if use_gpu else batch[0]
embd_pairs = [embd_pairs[:,x,:] for x in [0,1]]
logits = embd_clf(embd_pairs)
prob = torch.sigmoid(logits).data.view(-1)
pred = (prob > (embd_clf.thrshld if opts.do_thrshld else opts.pthrshld)).int()
preds.extend(pred.view(-1).detach().cpu().tolist())
orig_idx, pred_lb = zip(*[(pidx[0], cand_lbs[pidx[1]]) for pidx, pred_val in zip(pair_indices, preds) if pred_val > 0])
pred_df = pd.DataFrame([(df.index[gid], ';'.join(grp['label'].tolist())) for gid, grp in pd.DataFrame({'index':orig_idx, 'label':pred_lb}).groupby('index')], columns=['id', 'preds']).set_index('id')
filled_df = df.merge(pred_df, how='left', left_index=True, right_index=True)
filled_df.to_csv('%s_preds.csv' % prefix, sep='\t')
# Calculate the relations between the doc/sentence embedding and the true label embeddings
angular_sim_list, correl_list, plot_data = [[] for x in range(3)]
for i in range(df.shape[0]):
angular_sims, correls = [], []
lbs = df.iloc[i]['labels'].split(';')
for lb in lbs:
if lb in lbembd:
angular_sim, correlation = 1 - np.arccos(1 - spdist.cosine(clf_h[i], lbembd[lb])) / np.pi, 1 - spdist.correlation(clf_h[i], lbembd[lb])
_, _ = angular_sims.append('%.2f' % angular_sim), correls.append('%.2f' % correlation)
plot_data.append((1, angular_sim, correlation))
else:
_, _ = angular_sims.append('N/A'), correls.append('N/A')
_, _ = angular_sim_list.append(';'.join(angular_sims)), correl_list.append(';'.join(correls))
neg_lbs = list(set(lbembd.keys()) - set(lbs))
neg_idx = np.random.choice(range(len(neg_lbs)), len(lbs))
for neg_lb in [neg_lbs[idx] for idx in neg_idx]:
angular_sim, correlation = 1 - np.arccos(1 - spdist.cosine(clf_h[i], lbembd[neg_lb])) / np.pi, 1 - spdist.correlation(clf_h[i], lbembd[neg_lb])
plot_data.append((0, angular_sim, correlation))
df['angular_sim'], df['correlation'] = angular_sim_list, correl_list
df.to_csv('%s_embd.csv' % prefix, sep='\t')
with open('%s_plot_data.pkl' % prefix, 'wb') as fd:
pickle.dump(plot_data, fd)
def simsearch(dev_id=None):
print('### Similarity Search Mode ###')
config_kwargs = dict([(k, v) for k, v in opts.__dict__.items() if not k.startswith('_') and k not in set(['dataset', 'model', 'template']) and v is not None and type(v) is not function])
config = Configurable(opts.task, opts.model, wsdir=PAR_DIR, sc=SC, **config_kwargs)
dict_fname, term_embd_fname = opts.corpus if opts.corpus else 'dict_df.csv', 'term_embd.pkl'
if os.path.exists(os.path.join(os.path.pardir, term_embd_fname)):
with open(os.path.join(os.path.pardir, term_embd_fname), 'rb') as fd:
term_embd, term_labels, txt_vctrz, char_vctrz, ftmdl = pickle.load(fd)
else:
dict_df = pd.read_csv(os.path.join(os.path.pardir, dict_fname), sep='\t').set_index('id')
term_labels, term_texts, term_types = zip(*[(idx, v, k) for idx, row in dict_df.iterrows() for k, v in row.items() if v is not np.nan])
print('Corpus size from file %s: %i' % (dict_fname, dict_df.shape[0]))
txt2vec_output = _txt2vec(term_texts, config, None, use_tfidf=opts.cfg.setdefault('use_tfidf', False), ftdecomp=None, saved_path=os.path.pardir, prefix='dict', **opts.cfg.setdefault('txt2vec_kwargs', {}))
term_embd, txt_vctrz, char_vctrz, ftmdl = txt2vec_output[:4]
feature_weights = txt2vec_output[4:]
with open(term_embd_fname, 'wb') as fd:
pickle.dump((term_embd, term_labels, txt_vctrz, char_vctrz, ftmdl), fd, protocol=4)
# Build dictionary embedding index
dict_embd_idx_fname = 'dict_embd_idx.pkl'
if os.path.exists(os.path.join(os.path.pardir, dict_embd_idx_fname)):
with open(os.path.join(os.path.pardir, dict_embd_idx_fname), 'rb') as fd:
dict_embd_idx = pickle.load(fd)
else:
import faiss
dimension = term_embd.shape[1]
print('Building faiss index with dimension %i' % dimension)
# dict_embd_idx = faiss.IndexFlatL2(dimension)
from bionlp.util.math import VectorDB
# dict_embd_idx = VectorDB(metric=lambda x, y: np.sqrt(np.sum(np.square(x*y-y)))/np.sqrt(np.sum(np.square(y))), feature_weights=feature_weights)
dict_embd_idx = VectorDB(metric=lambda x, y: np.sum(np.abs(x*y-y))/np.sum(y), feature_weights=feature_weights)
dict_embd_idx.add(term_embd.astype('float32'))
# with open(dict_embd_idx_fname, 'wb') as fd:
# pickle.dump((dict_embd_idx), fd)
# dict_embd_idx = faiss.index_cpu_to_all_gpus(dict_embd_idx)
dev_df, test_df = pd.read_csv(os.path.join(DATA_PATH, opts.input, 'dev.%s' % opts.fmt), sep='\t', index_col='id'), pd.read_csv(os.path.join(DATA_PATH, opts.input, 'test.%s' % opts.fmt), sep='\t')
w2v_cache = None
for prefix, df in zip(['dev', 'test'], [dev_df, test_df]):
txt_corpus = df['text'].tolist()
enriched_texts, w2v_cache = enrich_txt_by_w2v(txt_corpus, w2v_model=opts.cfg.setdefault('w2v_model', None), w2v_cache=w2v_cache, topk=opts.cfg.setdefault('txt2vec_kwargs', {}).setdefault('w2v_topk', 10))
clf_h, _, _, _ = _txt2vec(enriched_texts, config, None, txt_vctrz=txt_vctrz, char_vctrz=char_vctrz, use_tfidf=opts.cfg.setdefault('use_tfidf', False), ftdecomp=None, saved_path=os.path.pardir, prefix=prefix, **opts.cfg.setdefault('txt2vec_kwargs', {}))
# Search the topk similar labels
print('Searching dataset %s with size: %s...' % (prefix, str(clf_h.shape)))
clf_h = clf_h.astype('float32')
D, I = dict_embd_idx.search(clf_h[:,:dimension] if clf_h.shape[1] >= dimension else np.hstack((clf_h, np.zeros((clf_h.shape[0], dimension-clf_h.shape[1]), dtype=clf_h.dtype))), opts.topk, n_jobs=opts.np)
cand_preds = [[term_labels[idx] for idx in idxs] for idxs in I]
cand_lbs = [sorted(set(lbs)) for lbs in cand_preds]
df['preds'] = [';'.join(lbs) for lbs in cand_lbs]
df.to_csv('%s_preds.csv' % prefix, sep='\t')
# Evaluation
from sklearn.preprocessing import MultiLabelBinarizer
from bionlp.util import math as imath
true_lbs = [lbs_str.split(';') if type(lbs_str) is str and not lbs_str.isspace() else [] for lbs_str in df['labels']]
mlb = MultiLabelBinarizer()
mlb = mlb.fit(true_lbs + cand_lbs)
lbs = mlb.transform(true_lbs)
pred_lbs = mlb.transform(cand_lbs)
print('exmp-precision: %.3f' % imath.exmp_precision(lbs, pred_lbs) + '\texmp-recall: %.3f' % imath.exmp_recall(lbs, pred_lbs) + '\texmp-f1-score: %.3f' % imath.exmp_fscore(lbs, pred_lbs) + '\n')
def simsearch_sentembd(dev_id=None):
print('### Similarity Search Mode using sentence embedding ###')
orig_task = opts.task
opts.task = opts.task + '_simsearch'
# Prepare model related meta data
mdl_name = opts.model.split('_')[0].lower().replace(' ', '_')
common_cfg = cfgr('validate', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
config_kwargs = dict([(k, v) for k, v in opts.__dict__.items() if not k.startswith('_') and k not in set(['dataset', 'model', 'template']) and v is not None and type(v) is not function])
config = Configurable(opts.task, mdl_name, common_cfg=common_cfg, wsdir=PAR_DIR, sc=SC, **config_kwargs)
params = pr('LM', config.lm_params) if mdl_name != 'none' else {}
use_gpu = dev_id is not None
encode_func = config.encode_func
tokenizer = config.tknzr.from_pretrained(params['pretrained_vocab_path'] if 'pretrained_vocab_path' in params else config.lm_mdl_name) if config.tknzr else None
task_type = 'sentsim' #TASK_TYPE_MAP[opts.task]
spcl_tkns = config.lm_tknz_extra_char if config.lm_tknz_extra_char else ['_@_', ' _$_', ' _#_']
special_tkns = (['start_tknids', 'clf_tknids', 'delim_tknids'], spcl_tkns[:3]) if task_type in ['entlmnt', 'sentsim'] else (['start_tknids', 'clf_tknids'], spcl_tkns[:2])
special_tknids = _adjust_encoder(mdl_name, tokenizer, config, special_tkns[1], ret_list=True)
special_tknids_args = dict(zip(special_tkns[0], special_tknids))
task_trsfm_kwargs = dict(list(zip(special_tkns[0], special_tknids))+[('model',opts.model), ('sentsim_func', opts.sentsim_func), ('seqlen',opts.maxlen)])
# Prepare task related meta data
task_path, task_dstype, task_cols, task_trsfm, task_extparms = opts.input if opts.input and os.path.isdir(os.path.join(DATA_PATH, opts.input)) else config.task_path, config.task_ds, config.task_col, config.task_trsfm, config.task_ext_params
trsfms = (task_trsfm[0] if len(task_trsfm) > 0 else [])
trsfms_kwargs = ([] if hasattr(config, 'embed_type') and config.embed_type else ([{'seqlen':opts.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}] if config.task_type=='nmt' else [{'seqlen':opts.maxlen, 'trimlbs':task_extparms.setdefault('trimlbs', False), 'required_special_tkns':['start_tknids', 'clf_tknids', 'delim_tknids'] if task_type in ['entlmnt', 'sentsim'] and (task_extparms.setdefault('sentsim_func', None) is None or not mdl_name.startswith('bert')) else ['start_tknids', 'clf_tknids'], 'special_tkns':special_tknids_args}, task_trsfm_kwargs, {'seqlen':opts.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}])) + (task_trsfm[1] if len(task_trsfm) >= 2 else [{}] * len(task_trsfm[0]))
ds_kwargs = {'sampw':opts.sample_weights, 'sampfrac':opts.sampfrac}
ds_kwargs.update({'ynormfunc':task_extparms.setdefault('ynormfunc', None)})
task_params = dict([(k, getattr(opts, k)) if hasattr(opts, k) and getattr(opts, k) is not None else (k, v) for k, v in task_extparms.setdefault('mdlcfg', {}).items()])
# Load model
clf, prv_optimizer, resume, chckpnt = load_model(opts.resume)
if opts.refresh:
print('Refreshing and saving the model with newest code...')
try:
save_model(clf, prv_optimizer, '%s_%s.pth' % (opts.task, opts.model))
except Exception as e:
print(e)
prv_task_params = copy.deepcopy(clf.task_params)
# Update parameters
clf.update_params(task_params=task_params, sample_weights=False)
clf.mlt_trnsfmr = False
if (use_gpu): clf = _handle_model(clf, dev_id=dev_id, distrb=opts.distrb)
model = config.shell(clf, tokenizer=tokenizer, encode_func=encode_func, transforms=trsfms, transforms_kwargs=trsfms_kwargs, special_tknids_args=special_tknids_args, pad_val=task_extparms.setdefault('xpad_val', 0))
# Prepare dev and test sets
print('Dataset path: %s' % os.path.join(DATA_PATH, task_path))
del ds_kwargs['ynormfunc']
dev_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'dev.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else clf.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
if mdl_name.startswith('bert'): dev_ds = MaskedLMIterDataset(dev_ds) if isinstance(dev_ds, BaseIterDataset) else MaskedLMDataset(dev_ds)
dev_loader = DataLoader(dev_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
test_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'test.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else clf.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
if mdl_name.startswith('bert'): test_ds = MaskedLMIterDataset(test_ds) if isinstance(test_ds, BaseIterDataset) else MaskedLMDataset(test_ds)
test_loader = DataLoader(test_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
corpus_fnames, corpus_embd_fname, corpus_embd_raw_fname = opts.corpus.split(SC) if opts.corpus else ['corpus_df.csv'], 'corpus_embd.pkl', 'corpus_embd_raw.pkl'
if os.path.exists(os.path.join(os.path.pardir, corpus_embd_fname)):
with open(os.path.join(os.path.pardir, corpus_embd_fname), 'rb') as fd:
corpus_embd, labels, txt_vctrz, char_vctrz, ftmdl = pickle.load(fd)
else:
corpus_df = pd.concat([pd.read_csv(os.path.join(os.path.pardir, fname), sep='\t') for fname in corpus_fnames], axis=0, ignore_index=True).set_index('id')
labels = corpus_df['labels']
corpus_texts = corpus_df['text'].tolist()
corpus_embd_raw_fname = './corpus_embd_raw.pkl'
print('Corpus size from file(s) %s: %i' % (corpus_fnames, corpus_df.shape[0]))
if os.path.exists(os.path.join(os.path.pardir, corpus_embd_raw_fname)):
with open(os.path.join(os.path.pardir, corpus_embd_raw_fname), 'rb') as fd:
corpus_embd_raw = pickle.load(fd)
else:
corpus_embd_raw = model(corpus_texts, embedding_mode=True, bsize=opts.bsize).numpy()
with open(corpus_embd_raw_fname, 'wb') as fd:
pickle.dump(corpus_embd_raw, fd)
corpus_embd_raw, txt_vctrz, char_vctrz, ftmdl = _txt2vec(corpus_texts, config, corpus_embd_raw if opts.cfg.setdefault('clf_h', True) else None, use_tfidf=opts.cfg.setdefault('use_tfidf', True), ftdecomp=opts.cfg.setdefault('ftdecomp', 'pca'), n_components=opts.cfg.setdefault('n_components', 768), saved_path=os.path.pardir, prefix='corpus', **opts.cfg.setdefault('txt2vec_kwargs', {}))
corpus_df['embedding'] = [np.array(x).reshape((-1,)) for x in corpus_embd_raw]
corpus_embd = {}
# One label may has multiple notes
for gid, grp in corpus_df['embedding'].groupby('id'):
corpus_embd[gid] = grp.mean(axis=0) # averaged embedding
with open(corpus_embd_fname, 'wb') as fd:
pickle.dump((corpus_embd, labels, txt_vctrz, char_vctrz, ftmdl), fd)
# Build corpus embedding index
corpus_embd_idx_fname = 'corpus_embd_idx.pkl'
if os.path.exists(os.path.join(os.path.pardir, corpus_embd_idx_fname)):
with open(os.path.join(os.path.pardir, corpus_embd_idx_fname), 'rb') as fd:
corpus_embd_idx, indices, rindices = pickle.load(fd)
else:
import faiss
indices = dict([(k, i) for i, k in enumerate(corpus_embd.keys())])
rindices = dict([(v, k) for k, v in indices.items()])
dimension = next(x for x in corpus_embd.values()).shape[0]
print('Building faiss index with dimension %i' % dimension)
corpus_embd_idx = faiss.IndexFlatL2(dimension)
corpus_embd_idx.add(np.stack(corpus_embd.values()).astype('float32'))
# with open(corpus_embd_idx_fname, 'wb') as fd:
# pickle.dump((corpus_embd_idx, indices, rindices), fd)
# corpus_embd_idx = faiss.index_cpu_to_all_gpus(corpus_embd_idx)
embd_clf = EmbeddingHead(clf)
for prefix, df in zip(['dev', 'test'], [dev_ds.df, test_ds.df]):
txt_corpus = df['text'].tolist()
# Calculate the doc/sentence embeddings
clf_h_cache_fname = '%s_clf_h.pkl' % prefix
if os.path.exists(os.path.join(os.path.pardir, clf_h_cache_fname)):
with open(os.path.join(os.path.pardir, clf_h_cache_fname), 'rb') as fd:
clf_h = pickle.load(fd)
else:
clf_h = model(txt_corpus, embedding_mode=True, bsize=opts.bsize).numpy()
with open(clf_h_cache_fname, 'wb') as fd:
pickle.dump(clf_h, fd)
clf_h, _, _, _ = _txt2vec(txt_corpus, config, clf_h if opts.cfg.setdefault('clf_h', True) else None, txt_vctrz=txt_vctrz, char_vctrz=char_vctrz, use_tfidf=opts.cfg.setdefault('use_tfidf', True), ftdecomp=opts.cfg.setdefault('ftdecomp', 'pca'), ftmdl=ftmdl, n_components=opts.cfg.setdefault('n_components', 768), saved_path=os.path.pardir, prefix=prefix, **opts.cfg.setdefault('txt2vec_kwargs', {}))
# Search the topk similar labels
print('Searching dataset %s with size: %s...' % (prefix, str(clf_h.shape)))
clf_h = clf_h.astype('float32')
D, I = corpus_embd_idx.search(clf_h[:,:dimension] if clf_h.shape[1] >= dimension else np.hstack((clf_h, np.zeros((clf_h.shape[0], dimension-clf_h.shape[1]), dtype=clf_h.dtype))), opts.topk)
cand_preds = [[labels.iloc[idx].split(';') for idx in idxs] for idxs in I]
cand_lbs = [sorted(set(itertools.chain.from_iterable(lbs))) for lbs in cand_preds]
df['preds'] = [';'.join(lbs) for lbs in cand_lbs]
df.to_csv('%s_preds.csv' % prefix, sep='\t')
# Evaluation
from sklearn.preprocessing import MultiLabelBinarizer
from bionlp.util import math as imath
true_lbs = [lbs_str.split(';') if type(lbs_str) is str and not lbs_str.isspace() else [] for lbs_str in df['labels']]
mlb = MultiLabelBinarizer()
mlb = mlb.fit(true_lbs + cand_lbs)
lbs = mlb.transform(true_lbs)
pred_lbs = mlb.transform(cand_lbs)
print('exmp-precision: %.3f' % imath.exmp_precision(lbs, pred_lbs) + '\texmp-recall: %.3f' % imath.exmp_recall(lbs, pred_lbs) + '\texmp-f1-score: %.3f' % imath.exmp_fscore(lbs, pred_lbs) + '\n')
def rerank(dev_id=None):
print('### Re-rank Mode ###')
orig_task = opts.task
opts.task = opts.task + '_rerank'
# Prepare model related meta data
mdl_name = opts.model.split('_')[0].lower().replace(' ', '_')
common_cfg = cfgr('validate', 'common')
pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
config_kwargs = dict([(k, v) for k, v in opts.__dict__.items() if not k.startswith('_') and k not in set(['dataset', 'model', 'template']) and v is not None and type(v) is not function])
config = Configurable(opts.task, mdl_name, common_cfg=common_cfg, wsdir=PAR_DIR, sc=SC, **config_kwargs)
params = pr('LM', config.lm_params) if mdl_name != 'none' else {}
use_gpu = dev_id is not None
encode_func = config.encode_func
tokenizer = config.tknzr.from_pretrained(params['pretrained_vocab_path'] if 'pretrained_vocab_path' in params else config.lm_mdl_name) if config.tknzr else None
task_type = 'entlmnt' #TASK_TYPE_MAP[opts.task]
spcl_tkns = config.lm_tknz_extra_char if config.lm_tknz_extra_char else ['_@_', ' _$_', ' _#_']
special_tkns = (['start_tknids', 'clf_tknids', 'delim_tknids'], spcl_tkns[:3]) if task_type in ['entlmnt', 'sentsim'] else (['start_tknids', 'clf_tknids'], spcl_tkns[:2])
special_tknids = _adjust_encoder(mdl_name, tokenizer, config, special_tkns[1], ret_list=True)
special_tknids_args = dict(zip(special_tkns[0], special_tknids))
task_trsfm_kwargs = dict(list(zip(special_tkns[0], special_tknids))+[('model',opts.model), ('sentsim_func', opts.sentsim_func), ('seqlen',opts.maxlen)])
# Prepare task related meta data
task_path, task_dstype, task_cols, task_trsfm, task_extparms = opts.input if opts.input and os.path.isdir(os.path.join(DATA_PATH, opts.input)) else config.task_path, config.task_ds, config.task_col, config.task_trsfm, config.task_ext_params
trsfms = (task_trsfm[0] if len(task_trsfm) > 0 else [])
trsfms_kwargs = ([] if hasattr(config, 'embed_type') and config.embed_type else ([{'seqlen':opts.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}] if config.task_type=='nmt' else [{'seqlen':opts.maxlen, 'trimlbs':task_extparms.setdefault('trimlbs', False), 'required_special_tkns':['start_tknids', 'clf_tknids', 'delim_tknids'] if task_type in ['entlmnt', 'sentsim'] and (task_extparms.setdefault('sentsim_func', None) is None or not mdl_name.startswith('bert')) else ['start_tknids', 'clf_tknids'], 'special_tkns':special_tknids_args}, task_trsfm_kwargs, {'seqlen':opts.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}])) + (task_trsfm[1] if len(task_trsfm) >= 2 else [{}] * len(task_trsfm[0]))
ds_kwargs = {'ynormfunc':task_extparms.setdefault('ynormfunc', None)}
ds_kwargs.update(dict((k, task_extparms[k]) for k in ['origlb', 'locallb', 'lbtxt', 'neglbs', 'reflb', 'sent_mode'] if k in task_extparms))
if task_dstype in [OntoDataset, OntoIterDataset]:
ds_kwargs['onto_fpath'] = opts.onto if opts.onto else task_extparms.setdefault('onto_fpath', 'onto.csv')
ds_kwargs['onto_col'] = task_cols['ontoid']
task_params = dict([(k, getattr(opts, k)) if hasattr(opts, k) and getattr(opts, k) is not None else (k, v) for k, v in task_extparms.setdefault('mdlcfg', {}).items()])
# Load model
clf, prv_optimizer, resume, chckpnt = load_model(opts.resume)
if opts.refresh:
print('Refreshing and saving the model with newest code...')
try:
save_model(clf, prv_optimizer, '%s_%s.pth' % (opts.task, opts.model))
except Exception as e:
print(e)
prv_task_params = copy.deepcopy(clf.task_params)
# Update parameters
clf.update_params(task_params=task_params, sample_weights=False)
if (use_gpu): clf = _handle_model(clf, dev_id=dev_id, distrb=opts.distrb)
# Prepare dev and test sets
print('Dataset path: %s' % os.path.join(DATA_PATH, task_path))
del ds_kwargs['ynormfunc']
dev_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'dev.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms else clf.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, **ds_kwargs)
if mdl_name.startswith('bert'): dev_ds = MaskedLMIterDataset(dev_ds) if isinstance(dev_ds, BaseIterDataset) else MaskedLMDataset(dev_ds)
dev_loader = DataLoader(dev_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
test_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'test.%s' % opts.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms else clf.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, **ds_kwargs)
if mdl_name.startswith('bert'): test_ds = MaskedLMIterDataset(test_ds) if isinstance(test_ds, BaseIterDataset) else MaskedLMDataset(test_ds)
test_loader = DataLoader(test_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
# Evaluation
eval(clf, dev_loader, dev_ds.binlbr, special_tknids_args, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=opts.task, ds_name='dev', mdl_name=opts.model, use_gpu=use_gpu, devq=dev_id, distrb=opts.distrb, ignored_label=task_extparms.setdefault('ignored_label', None))
eval(clf, test_loader, test_ds.binlbr, special_tknids_args, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=opts.task, ds_name='test', mdl_name=opts.model, use_gpu=use_gpu, devq=dev_id, distrb=opts.distrb, ignored_label=task_extparms.setdefault('ignored_label', None))
| 83.75626 | 831 | 0.702631 |
aceaaa301e0f5137144428719ab2f922870939d8 | 1,912 | py | Python | tests/gis_tests/gis_migrations/migrations/0001_initial.py | gauravbose/digital-menu | 63ceb4fe8fb69f65bd645a254cf82887d5011c33 | [
"BSD-3-Clause"
] | 3 | 2020-05-30T17:08:51.000Z | 2021-12-14T02:55:19.000Z | tests/gis_tests/gis_migrations/migrations/0001_initial.py | gauravbose/digital-menu | 63ceb4fe8fb69f65bd645a254cf82887d5011c33 | [
"BSD-3-Clause"
] | 1 | 2021-03-24T12:21:05.000Z | 2021-03-24T12:31:52.000Z | tests/gis_tests/gis_migrations/migrations/0001_initial.py | gauravbose/digital-menu | 63ceb4fe8fb69f65bd645a254cf82887d5011c33 | [
"BSD-3-Clause"
] | 4 | 2016-07-31T14:29:15.000Z | 2021-10-19T03:32:44.000Z | import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
"""
Used for gis.specific migration tests.
"""
operations = [
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Household',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('neighborhood', models.ForeignKey(to='gis_migrations.Neighborhood', to_field='id', null=True)),
('address', models.CharField(max_length=100)),
('zip_code', models.IntegerField(null=True, blank=True)),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326, geography=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='household',
name='family',
field=models.ForeignKey(blank=True, to='gis_migrations.Family', null=True),
preserve_default=True,
),
]
| 37.490196 | 114 | 0.54864 |
aceaaa75e4d4e124c14dc7db07edd77f16bae1b4 | 4,382 | py | Python | tests/conftest.py | whiletruelearn/sagemaker-python-sdk | c63bf62fd8f6610408c051f5320a52d5b05f1300 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | whiletruelearn/sagemaker-python-sdk | c63bf62fd8f6610408c051f5320a52d5b05f1300 | [
"Apache-2.0"
] | 2 | 2018-04-09T17:53:10.000Z | 2018-04-09T17:53:38.000Z | tests/conftest.py | winstonaws/sagemaker-python-sdk | eb685407ca5ef6139cf2a5a7d9381faa1beac7bf | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import json
import boto3
import pytest
from botocore.config import Config
from sagemaker import Session
from sagemaker.local import LocalSession
from sagemaker.chainer.defaults import CHAINER_VERSION
from sagemaker.pytorch.defaults import PYTORCH_VERSION
from sagemaker.mxnet.defaults import MXNET_VERSION
from sagemaker.tensorflow.defaults import TF_VERSION
DEFAULT_REGION = 'us-west-2'
def pytest_addoption(parser):
parser.addoption('--sagemaker-client-config', action='store', default=None)
parser.addoption('--sagemaker-runtime-config', action='store', default=None)
parser.addoption('--boto-config', action='store', default=None)
parser.addoption('--tf-full-version', action='store', default=TF_VERSION)
parser.addoption('--mxnet-full-version', action='store', default=MXNET_VERSION)
parser.addoption('--chainer-full-version', action='store', default=CHAINER_VERSION)
parser.addoption('--pytorch-full-version', action='store', default=PYTORCH_VERSION)
@pytest.fixture(scope='session')
def sagemaker_client_config(request):
config = request.config.getoption('--sagemaker-client-config')
return json.loads(config) if config else dict()
@pytest.fixture(scope='session')
def sagemaker_runtime_config(request):
config = request.config.getoption('--sagemaker-runtime-config')
return json.loads(config) if config else None
@pytest.fixture(scope='session')
def boto_config(request):
config = request.config.getoption('--boto-config')
return json.loads(config) if config else None
@pytest.fixture(scope='session')
def sagemaker_session(sagemaker_client_config, sagemaker_runtime_config, boto_config):
boto_session = boto3.Session(**boto_config) if boto_config else boto3.Session(region_name=DEFAULT_REGION)
sagemaker_client_config.setdefault('config', Config(retries=dict(max_attempts=10)))
sagemaker_client = boto_session.client('sagemaker', **sagemaker_client_config) if sagemaker_client_config else None
runtime_client = (boto_session.client('sagemaker-runtime', **sagemaker_runtime_config) if sagemaker_runtime_config
else None)
return Session(boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_runtime_client=runtime_client)
@pytest.fixture(scope='session')
def sagemaker_local_session(boto_config):
if boto_config:
boto_session = boto3.Session(**boto_config)
else:
boto_session = boto3.Session(region_name=DEFAULT_REGION)
return LocalSession(boto_session=boto_session)
@pytest.fixture(scope='module', params=['1.4', '1.4.1', '1.5', '1.5.0', '1.6', '1.6.0',
'1.7', '1.7.0', '1.8', '1.8.0', '1.9', '1.9.0',
'1.10', '1.10.0'])
def tf_version(request):
return request.param
@pytest.fixture(scope='module', params=['0.12', '0.12.1', '1.0', '1.0.0', '1.1', '1.1.0', '1.2', '1.2.1'])
def mxnet_version(request):
return request.param
@pytest.fixture(scope='module', params=["0.4", "0.4.0"])
def pytorch_version(request):
return request.param
@pytest.fixture(scope='module', params=['4.0', '4.0.0', '4.1', '4.1.0'])
def chainer_version(request):
return request.param
@pytest.fixture(scope='module')
def tf_full_version(request):
return request.config.getoption('--tf-full-version')
@pytest.fixture(scope='module')
def mxnet_full_version(request):
return request.config.getoption('--mxnet-full-version')
@pytest.fixture(scope='module')
def pytorch_full_version(request):
return request.config.getoption('--pytorch-full-version')
@pytest.fixture(scope='module')
def chainer_full_version(request):
return request.config.getoption('--chainer-full-version')
| 35.918033 | 119 | 0.724327 |
aceaab65bfdbb751484b86d790e5402d44e3bbbe | 10,862 | py | Python | gamestonk_terminal/economy/wsj_model.py | clairvoyant/GamestonkTerminal | 7b40cfe61b32782e36f5de8a08d075532a08c294 | [
"MIT"
] | 1 | 2021-12-04T13:21:40.000Z | 2021-12-04T13:21:40.000Z | gamestonk_terminal/economy/wsj_model.py | clairvoyant/GamestonkTerminal | 7b40cfe61b32782e36f5de8a08d075532a08c294 | [
"MIT"
] | null | null | null | gamestonk_terminal/economy/wsj_model.py | clairvoyant/GamestonkTerminal | 7b40cfe61b32782e36f5de8a08d075532a08c294 | [
"MIT"
] | null | null | null | """WSJ model"""
__docformat__ = "numpy"
import pandas as pd
import requests
def us_indices() -> pd.DataFrame:
"""Get the top US indices
Returns
-------
indices: pd.DataFrame
Dataframe containing name, price, net change and percent change
"""
data = requests.get(
"https://www.wsj.com/market-data/stocks?id=%7B%22application%22%3A%22WSJ%22%2C%22instruments%22%3A%5B%7B"
"%22symbol%22%3A%22INDEX%2FUS%2F%2FDJIA%22%2C%22name%22%3A%22DJIA%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F"
"%2FCOMP%22%2C%22name%22%3A%22Nasdaq%20Composite%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FSPX%22%2C%22name"
"%22%3A%22S%26P%20500%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FDWCF%22%2C%22name%22%3A%22DJ%20Total%20Stock"
"%20Market%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FRUT%22%2C%22name%22%3A%22Russell%202000%22%7D%2C%7B"
"%22symbol%22%3A%22INDEX%2FUS%2F%2FNYA%22%2C%22name%22%3A%22NYSE%20Composite%22%7D%2C%7B%22symbol%22%3A%22INDEX"
"%2FUS%2F%2FB400%22%2C%22name%22%3A%22Barron%27s%20400%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FVIX%22%2C%22"
"name%22%3A%22CBOE%20Volatility%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F%2FDJIA%20FUTURES%22%2C%22name%22%3A%"
"22DJIA%20Futures%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F%2FS%26P%20500%20FUTURES%22%2C%22name%22%3A%22S%26P"
"%20500%20Futures%22%7D%5D%7D&type=mdc_quotes",
headers={"User-Agent": "Mozilla/5.0"},
).json()
name, last_price, net_change, percent_change = [], [], [], []
for entry in data["data"]["instruments"]:
name.append(entry["formattedName"])
last_price.append(entry["lastPrice"])
net_change.append(entry["priceChange"])
percent_change.append(entry["percentChange"])
indices = pd.DataFrame(
{" ": name, "Price": last_price, "Chg": net_change, "%Chg": percent_change}
)
return indices
def market_overview() -> pd.DataFrame:
"""Scrape data for market overview
Returns
-------
overview: pd.DataFrame
Dataframe containing name, price, net change and percent change
"""
data = requests.get(
"https://www.wsj.com/market-data?id=%7B%22application%22%3A%22WSJ%22%2C%22instruments%22%3A%5B%7B%22symbol%22"
"%3A%22INDEX%2FUS%2F%2FDJIA%22%2C%22name%22%3A%22DJIA%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FSPX%22%2C%22"
"name%22%3A%22S%26P%20500%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FCOMP%22%2C%22name%22%3A%22Nasdaq%20"
"Composite%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FJP%2F%2FNIK%22%2C%22name%22%3A%22Japan%3A%20Nikkei%20225%22%7D%"
"2C%7B%22symbol%22%3A%22INDEX%2FUK%2F%2FUKX%22%2C%22name%22%3A%22UK%3A%20FTSE%20100%22%7D%2C%7B%22symbol%22%3A%"
"22FUTURE%2FUS%2F%2FCRUDE%20OIL%20-%20ELECTRONIC%22%2C%22name%22%3A%22Crude%20Oil%20Futures%22%7D%2C%7B%22symbol"
"%22%3A%22FUTURE%2FUS%2F%2FGOLD%22%2C%22name%22%3A%22Gold%20Futures%22%7D%2C%7B%22symbol%22%3A%22CURRENCY%2FUS%2"
"F%2FUSDJPY%22%2C%22name%22%3A%22Yen%22%7D%2C%7B%22symbol%22%3A%22CURRENCY%2FUS%2F%2FEURUSD%22%2C%22name%22%3A%"
"22Euro%22%7D%5D%7D&type=mdc_quotes",
headers={"User-Agent": "Mozilla/5.0"},
).json()
name, last_price, net_change, percent_change = [], [], [], []
for entry in data["data"]["instruments"]:
name.append(entry["formattedName"])
last_price.append(entry["lastPrice"])
net_change.append(entry["priceChange"])
percent_change.append(entry["percentChange"])
overview = pd.DataFrame(
{" ": name, "Price": last_price, "Chg": net_change, "%Chg": percent_change}
)
return overview
def top_commodities() -> pd.DataFrame:
"""Scrape data for top commodities
Returns
-------
commodities: pd.DataFrame
Dataframe containing name, price, net change and percent change
"""
data = requests.get(
"https://www.wsj.com/market-data/commodities?id=%7B%22application%22%3A%22WSJ%22%2C%22instruments%22%3A%5B%7"
"B%22symbol%22%3A%22FUTURE%2FUS%2F%2FCRUDE%20OIL%20-%20ELECTRONIC%22%2C%22name%22%3A%22Crude%20Oil%20Futures"
"%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUK%2F%2FBRENT%20CRUDE%22%2C%22name%22%3A%22Brent%20Crude%20Futures%22"
"%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F%2FGOLD%22%2C%22name%22%3A%22Gold%20Futures%22%7D%2C%7B%22symbol%22%"
"3A%22FUTURE%2FUS%2F%2FSILVER%22%2C%22name%22%3A%22Silver%20Futures%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F"
"%2FNATURAL%20GAS%22%2C%22name%22%3A%22Natural%20Gas%20Futures%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F%2"
"FUNLEADED%20GASOLINE%22%2C%22name%22%3A%22Unleaded%20Gasoline%20Futures%22%7D%2C%7B%22symbol%22%3A%22FUTURE%"
"2FUS%2F%2FCOPPER%22%2C%22name%22%3A%22Copper%20Futures%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F%2FCORN%22%2"
"C%22name%22%3A%22Corn%20Futures%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F%2FWHEAT%22%2C%22name%22%3A%22Wheat"
"%20Futures%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FXX%2F%2FBCOM%22%7D%5D%7D&type=mdc_quotes",
headers={"User-Agent": "Mozilla/5.0"},
).json()
name, last_price, net_change, percent_change = [], [], [], []
for entry in data["data"]["instruments"]:
name.append(entry["formattedName"])
last_price.append(entry["lastPrice"])
net_change.append(entry["priceChange"])
percent_change.append(entry["percentChange"])
commodities = pd.DataFrame(
{" ": name, "Price": last_price, "Chg": net_change, "%Chg": percent_change}
)
return commodities
def us_bonds() -> pd.DataFrame:
"""Scrape data for us bonds
Returns
-------
bonds: pd.DataFrame
Dataframe containing name, coupon rate, yield and change in yield
"""
data = requests.get(
"https://www.wsj.com/market-data?id=%7B%22application%22%3A%22WSJ%22%2C%22instruments%22%3A%5B"
"%7B%22symbol%22%3A%22BOND%2FBX%2F%2FTMUBMUSD30Y%22%2C%22name%22%3A%2230-Year%20Bond%22%7D%2C%7"
"B%22symbol%22%3A%22BOND%2FBX%2F%2FTMUBMUSD10Y%22%2C%22name%22%3A%2210-Year%20Note%22%7D%2C%7B%2"
"2symbol%22%3A%22BOND%2FBX%2F%2FTMUBMUSD07Y%22%2C%22name%22%3A%227-Year%20Note%22%7D%2C%7B%22sym"
"bol%22%3A%22BOND%2FBX%2F%2FTMUBMUSD05Y%22%2C%22name%22%3A%225-Year%20Note%22%7D%2C%7B%22symbol"
"%22%3A%22BOND%2FBX%2F%2FTMUBMUSD03Y%22%2C%22name%22%3A%223-Year%20Note%22%7D%2C%7B%22symbol%22%"
"3A%22BOND%2FBX%2F%2FTMUBMUSD02Y%22%2C%22name%22%3A%222-Year%20Note%22%7D%2C%7B%22symbol%22%3A%"
"22BOND%2FBX%2F%2FTMUBMUSD01Y%22%2C%22name%22%3A%221-Year%20Bill%22%7D%2C%7B%22symbol%22%3A%22"
"BOND%2FBX%2F%2FTMUBMUSD06M%22%2C%22name%22%3A%226-Month%20Bill%22%7D%2C%7B%22symbol%22%3A%22BON"
"D%2FBX%2F%2FTMUBMUSD03M%22%2C%22name%22%3A%223-Month%20Bill%22%7D%2C%7B%22symbol%22%3A%22BOND%"
"2FBX%2F%2FTMUBMUSD01M%22%2C%22name%22%3A%221-Month%20Bill%22%7D%5D%7D&type=mdc_quotes",
headers={"User-Agent": "Mozilla/5.0"},
).json()
name, yield_pct, rate, yld_chng = [], [], [], []
for entry in data["data"]["instruments"]:
name.append(entry["formattedName"])
yield_pct.append(entry["bond"]["yield"])
rate.append(entry["bond"]["couponRate"])
yld_chng.append(entry["bond"]["yieldChange"])
bonds = pd.DataFrame(
{" ": name, "Rate (%)": rate, "Yld (%)": yield_pct, "Yld Chg (%)": yld_chng}
)
return bonds
def global_bonds() -> pd.DataFrame:
"""Scrape data for global bonds
Returns
-------
bonds: pd.DataFrame
Dataframe containing name, coupon rate, yield and change in yield
"""
data = requests.get(
"https://www.wsj.com/market-data?id=%7B%22application%22%3A%22WSJ%22%2C%22bonds%22%3A%5"
"B%7B%22symbol%22%3A%22TMUBMUSD10Y%22%2C%22name%22%3A%22U.S.%2010%20Year%22%7D%2C%7B%22symbol"
"%22%3A%22TMBMKDE-10Y%22%2C%22name%22%3A%22Germany%2010%20Year%22%7D%2C%7B%22symbol%22%3A%22TMB"
"MKGB-10Y%22%2C%22name%22%3A%22U.K.%2010%20Year%22%7D%2C%7B%22symbol%22%3A%22TMBMKJP-10Y%22%2C%"
"22name%22%3A%22Japan%2010%20Year%22%7D%2C%7B%22symbol%22%3A%22TMBMKAU-10Y%22%2C%22name%22%3A%2"
"2Australia%2010%20Year%22%7D%2C%7B%22symbol%22%3A%22AMBMKRM-10Y%22%2C%22name%22%3A%22China%2010"
"%20Year%22%7D%5D%7D&type=mdc_governmentbonds",
headers={"User-Agent": "Mozilla/5.0"},
).json()
name, yield_pct, rate, yld_chng = [], [], [], []
for entry in data["data"]["instruments"]:
name.append(entry["djLegalName"])
yield_pct.append(entry["yieldPercent"])
rate.append(entry["couponPercent"])
yld_chng.append(entry["yieldChange"])
bonds = pd.DataFrame(
{" ": name, "Rate (%)": rate, "Yld (%)": yield_pct, "Yld Chg (%)": yld_chng}
)
return bonds
def global_currencies() -> pd.DataFrame:
"""Scrape data for global currencies
Returns
-------
currencies: pd.DataFrame
Dataframe containing name, price, net change and percent change
"""
data = requests.get(
"https://www.wsj.com/market-data?id=%7B%22application%22%3A%22WSJ%22%2C%22instruments%22%3A%5"
"B%7B%22symbol%22%3A%22CURRENCY%2FUS%2F%2FEURUSD%22%2C%22name%22%3A%22Euro%20(EUR%2FUSD)%22%7D%"
"2C%7B%22symbol%22%3A%22CURRENCY%2FUS%2F%2FUSDJPY%22%2C%22name%22%3A%22Japanese%20Yen%20(USD%2F"
"JPY)%22%7D%2C%7B%22symbol%22%3A%22CURRENCY%2FUS%2F%2FGBPUSD%22%2C%22name%22%3A%22U.K.%20Poun"
"d%20(GBP%2FUSD)%22%7D%2C%7B%22symbol%22%3A%22CURRENCY%2FUS%2F%2FUSDCHF%22%2C%22name%22%3A%22Sw"
"iss%20Franc%20(USD%2FCHF)%22%7D%2C%7B%22symbol%22%3A%22CURRENCY%2FUS%2F%2FUSDCNY%22%2C%22name%2"
"2%3A%22Chinese%20Yuan%20(USD%2FCNY)%22%7D%2C%7B%22symbol%22%3A%22CURRENCY%2FUS%2F%2FUSDCAD%22%2C"
"%22name%22%3A%22Canadian%20%24%20(USD%2FCAD)%22%7D%2C%7B%22symbol%22%3A%22CURRENCY%2FUS%2F%2F"
"USDMXN%22%2C%22name%22%3A%22Mexican%20Peso%20(USD%2FMXN)%22%7D%2C%7B%22symbol%22%3A%22CRYPTO"
"CURRENCY%2FUS%2F%2FBTCUSD%22%2C%22name%22%3A%22Bitcoin%20(BTC%2FUSD)%22%7D%2C%7B%22symbol%22%3A"
"%22INDEX%2FXX%2F%2FBUXX%22%2C%22name%22%3A%22WSJ%20Dollar%20Index%22%7D%2C%7B%22symbol%22%3A%2"
"2INDEX%2FUS%2F%2FDXY%22%2C%22name%22%3A%22U.S.%20Dollar%20Index%22%7D%5D%7D&type=mdc_quotes",
headers={"User-Agent": "Mozilla/5.0"},
).json()
name, last_price, price_change, pct_change = [], [], [], []
for entry in data["data"]["instruments"]:
name.append(entry["formattedName"])
last_price.append(entry["lastPrice"])
price_change.append(entry["priceChange"])
pct_change.append(entry["percentChange"])
currencies = pd.DataFrame(
{" ": name, "Last": last_price, "Chng": price_change, "%Chng": pct_change}
)
return currencies
| 49.598174 | 121 | 0.665992 |
aceaacc2504ec440b3163eb2e5b868f84806a933 | 1,286 | py | Python | api/users/serializers.py | min1995/CSW-capstone-team-1 | b17ced9f55ea85eebf3df510dfc747d75bff0a19 | [
"MIT"
] | null | null | null | api/users/serializers.py | min1995/CSW-capstone-team-1 | b17ced9f55ea85eebf3df510dfc747d75bff0a19 | [
"MIT"
] | null | null | null | api/users/serializers.py | min1995/CSW-capstone-team-1 | b17ced9f55ea85eebf3df510dfc747d75bff0a19 | [
"MIT"
] | 2 | 2020-04-13T11:24:51.000Z | 2020-04-13T11:24:51.000Z | from django.contrib.auth import get_user_model
from rest_framework.serializers import (
ModelSerializer,
SerializerMethodField,
ImageField
)
from beers.models import Beer
from beers.serializers import BeerSerializer
from beershops.models import BeerShop
from beershops.serializers import BeershopSerializer
from .models import BeerBearCustomer, BeershopOwner, User
from coupons.models import Coupon
class UserSerializer(ModelSerializer):
favorite_beer_list = SerializerMethodField()
favorite_beershop_list = SerializerMethodField()
class Meta:
model = User
fields = ('pk', 'qr', 'favorite_beer_list', 'favorite_beershop_list')
def get_favorite_beer_list(self,obj):
beerList = obj.favorite_beer_list.all()
serializer = BeerSerializer(beerList, many=True)
return serializer.data
def get_favorite_beershop_list(self, obj):
beerShopList = obj.favorite_beershop_list.all()
serializer = BeershopSerializer(beerShopList, many=True)
return serializer.data
class BeerBearCustomerSerializer(ModelSerializer):
class Meta:
model = BeerBearCustomer
fields = ('__all__')
class CouponSerializer(ModelSerializer):
class Meta:
model = Coupon
fields = ('__all__') | 33.842105 | 77 | 0.74339 |
aceaad55a6c19bc1698822fbe1709a74736c64ad | 14,372 | py | Python | data.py | jczestochowska/emb2emb | 92f05dcbff529c264ec7ff786a3e82b3a3e9f42f | [
"MIT"
] | null | null | null | data.py | jczestochowska/emb2emb | 92f05dcbff529c264ec7ff786a3e82b3a3e9f42f | [
"MIT"
] | 1 | 2021-11-30T12:52:43.000Z | 2021-12-01T14:48:36.000Z | data.py | jczestochowska/emb2emb | 92f05dcbff529c264ec7ff786a3e82b3a3e9f42f | [
"MIT"
] | 1 | 2021-11-24T14:02:00.000Z | 2021-11-24T14:02:00.000Z | import csv
import os
from random import randint
from os.path import join
from sari.SARI import SARIsent
import torch
from rouge import Rouge
from nltk.translate.bleu_score import corpus_bleu, SmoothingFunction
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import numpy as np
import pandas as pd
def read_all(path):
with open(path, 'r') as f:
all_examples = f.readlines()
all_examples = [t.strip() for t in all_examples]
return all_examples
def read_file(path, params):
all_examples = read_all(path)
num_examples = int(params.data_fraction * len(all_examples))
all_examples = all_examples[:num_examples]
return all_examples
def get_data(params):
if "wiki" in params.dataset_path:
params.run_id = randint(0, 999999999)
# load the binary classifier
if params.binary_classifier_path == "no_eval":
params.binary_classifier = -1 # do not eval binary accuracy
elif params.binary_classifier_path is not None:
params.binary_tokenizer = AutoTokenizer.from_pretrained(
params.binary_classifier_path)
params.binary_classifier = AutoModelForSequenceClassification.from_pretrained(
params.binary_classifier_path)
else:
params.binary_classifier = None
params.current_epoch = 0
return _get_data_pairs(params), evaluate_wiki
elif "yelp" in params.dataset_path:
params.run_id = randint(0, 999999999)
# load the binary classifier
if params.binary_classifier_path:
params.binary_tokenizer = AutoTokenizer.from_pretrained(
params.binary_classifier_path)
params.binary_classifier = AutoModelForSequenceClassification.from_pretrained(
params.binary_classifier_path)
else:
params.binary_classifier = None
params.current_epoch = 0
return _get_data_pairs(params), evaluate_yelp
elif 'gigaword' in params.dataset_path:
params.run_id = randint(0, 999999999)
# load the perplexity regressor
if params.perplexity_regressor_path == "no_eval":
params.perplexity_regressor_path = -1
params.current_epoch = 0
return _get_data_pairs(params), evaluate_gigaword
else:
raise ValueError("Don't know dataset " + str(params.dataset_path))
def evaluate_yelp(model, mode="valid", params=None, predictions=None):
# compute bleu with input
if mode == "valid":
data = "data/yelp/s1.dev" if not params.invert_style else "data/yelp/s2.dev"
elif mode == "test":
data = "data/yelp/s1.test" if not params.invert_style else "data/yelp/s2.test"
inputs = data
ref = data
inputs = read_file(inputs, params)
ref = read_file(ref, params)
ref = [[r] for r in ref]
self_bleu, predictions = evaluate_bleu(model, inputs, ref, params.batch_size,
0 if not params.print_outputs else params.max_prints,
return_predictions=True, predictions=predictions)
b_acc = eval_binary_accuracy(model, predictions, mode, params)
_save_to_csv(params, self_bleu=self_bleu, b_acc=b_acc)
params.current_epoch = params.current_epoch + 1
return self_bleu + b_acc, self_bleu, b_acc
def _save_to_csv(params, b_acc=None, sari=None, bleu=None, self_bleu=None):
write_to_csv({"run_id": params.run_id,
"epoch": params.current_epoch,
"bleu": bleu,
"sari": sari,
"self-bleu": self_bleu,
"b-acc": b_acc},
params)
def write_to_csv(score, opt, escaped_keys=["binary_classifier", "binary_tokenizer", "latent_binary_classifier"]):
"""
Writes the scores and configuration to csv file.
"""
f = open(opt.output_file, 'a')
if os.stat(opt.output_file).st_size == 0:
for i, (key, _) in enumerate(opt.__dict__.items()):
f.write(key + ";")
for i, (key, _) in enumerate(score.items()):
if i < len(score.items()) - 1:
f.write(key + ";")
else:
f.write(key)
f.write('\n')
f.flush()
f.close()
f = open(opt.output_file, 'r')
reader = csv.reader(f, delimiter=";")
column_names = next(reader)
f.close()
def clean_str(s):
return s.replace("\n", "")
f = open(opt.output_file, 'a')
for i, key in enumerate(column_names):
if i < len(column_names) - 1:
if key in opt.__dict__:
if key in escaped_keys:
val_str = ""
else:
val_str = str(opt.__dict__[key])
val_str = clean_str(val_str)
f.write(val_str + ";")
else:
f.write(str(score[key]) + ";")
else:
if key in opt.__dict__:
val_str = str(opt.__dict__[key])
f.write(clean_str(val_str))
else:
f.write(str(score[key]))
f.write('\n')
f.flush()
f.close()
def eval_binary_accuracy(model, predictions, mode="valid", params=None):
target = 0 if params.invert_style else 1
if params.binary_classifier is not None:
if params.binary_classifier == -1:
return 0.
total_count = len(predictions)
tokenizer = params.binary_tokenizer
model = params.binary_classifier
model.eval()
correct = 0.
for stidx in range(0, len(predictions), params.batch_size):
# prepare batch
predictions_batch = predictions[stidx:(stidx + params.batch_size)]
predictions_batch = tokenizer.batch_encode_plus(
predictions_batch, return_tensors="pt", pad_to_max_length=True)
# returns logits, hidden_states
predictions_batch = model(**predictions_batch)
predictions_batch = predictions_batch[0] # get logits
predictions_batch = torch.softmax(predictions_batch, dim=1)
predictions_batch = predictions_batch[:, target]
b_acc = (predictions_batch > 0.5).sum()
correct = correct + b_acc.item()
return correct / float(total_count)
else:
model.eval()
binary_classifier = model.loss_fn.classifier
batch_size = params.batch_size
target = 0 # we want to generate from the "fake distribution" labeled "0"
correct = 0
for stidx in range(0, len(predictions), batch_size):
# prepare batch
Sx_batch = predictions[stidx:stidx + batch_size]
# model forward
clf_predictions = model.compute_emb2emb(Sx_batch)[0]
clf_predictions = torch.sigmoid(binary_classifier(clf_predictions))
if target == 1:
b_acc = (clf_predictions > 0.5).sum()
elif target == 0:
b_acc = (clf_predictions < 0.5).sum()
correct = correct + b_acc.item()
return correct / float(len(predictions))
def bleu_tokenize(s):
return s.split()
def evaluate_bleu(model, input_sentences, reference_sentences, batch_size, max_prints, return_predictions=False,
predictions=None):
model.eval()
if predictions is None:
pred_outputs = _get_predictions(
model, input_sentences, reference_sentences, batch_size, max_prints)
else:
pred_outputs = predictions
# corpus_bleu(list_of_references, hypotheses) # list_of_refereces : list
# of list of list of str, hypotheses list of list of str
list_of_references = []
for refs in reference_sentences:
new_refs = []
for r in refs:
new_refs.append(bleu_tokenize(r))
list_of_references.append(new_refs)
pred_outputs_bleu = [bleu_tokenize(h) for h in pred_outputs]
score = corpus_bleu(list_of_references, pred_outputs_bleu,
smoothing_function=SmoothingFunction().method1)
if return_predictions:
return score, pred_outputs
else:
return score
def _get_predictions(model, input_sentences, reference_sentences, batch_size, max_prints):
model.eval()
pred_outputs = []
for i, stidx in enumerate(range(0, len(input_sentences), batch_size)):
if i % 10 == 0:
print("Eval progress:", float(stidx) / len(input_sentences))
# prepare batch
Sx_batch = input_sentences[stidx:stidx + batch_size]
Sy_batch = reference_sentences[stidx:stidx + batch_size][0]
# model forward
with torch.no_grad():
pred_outputs.extend(model(Sx_batch, Sy_batch))
for i in range(min(len(input_sentences), max_prints)):
pretty_print_prediction(
input_sentences[i], reference_sentences[i][0], pred_outputs[i])
return pred_outputs
def evaluate_gigaword(model, dataset, params=None):
inputs = dataset['Sx']
refs = dataset['Sy']
model.eval()
pred_outputs = ['' for _ in range(len(inputs))]
rouges = []
for i, stidx in enumerate(range(0, len(inputs), params.batch_size)):
if i % 10 == 0:
print("Eval progress:", float(stidx) / len(inputs))
# prepare batch
Sx_batch = inputs[stidx:stidx + params.batch_size]
# model forward
with torch.no_grad():
other_args = {'next_x_batch': []} if params.emb2emb_additive_noise else {}
pred_outputs = model(Sx_batch, Sx_batch, **other_args)
ground_truth = refs[stidx:stidx + params.batch_size]
try:
rouges.append(calculate_rouge_metrics(pred_outputs, ground_truth)['rouge-l'])
except ValueError as e:
print(e)
print("======= Exception for batch rouge calculation ========")
print(pred_outputs, ground_truth)
pred_outputs = pred_outputs[-params.max_prints:]
num_prints = min(params.max_prints, len(Sx_batch))
for i in range(num_prints, 0, -1):
pretty_print_prediction(inputs[-i], refs[-i], pred_outputs[-i])
df = pd.DataFrame(rouges)
return dict((df.sum() * params.batch_size) / len(inputs))
def calculate_rouge_metrics(model_outputs, reference, metrics=["rouge-1", "rouge-2", "rouge-3", "rouge-l"], avg=True):
rouge = Rouge(metrics=metrics)
return rouge.get_scores(ignore_empty=True, hyps=model_outputs, refs=reference, avg=avg)
def evaluate_wiki(model, mode="valid", params=None):
sari, predictions = evaluate_sari(model, mode, params)
b_acc = eval_binary_accuracy(model, predictions, mode, params)
reference_sentences, norm_sentences, _ = _load_wikilarge_references(mode)
bleu = evaluate_bleu(model, norm_sentences, reference_sentences, params.batch_size,
max_prints=0, return_predictions=False, predictions=predictions)
if params.eval_self_bleu:
self_bleu = evaluate_bleu(model, norm_sentences, [
[n] for n in norm_sentences], params.batch_size, max_prints=0, return_predictions=False,
predictions=predictions)
else:
self_bleu = -1.
_save_to_csv(params, b_acc=b_acc, sari=sari,
bleu=bleu, self_bleu=self_bleu)
params.current_epoch = params.current_epoch + 1
return sari, sari, b_acc
def _load_wikilarge_references(mode):
if mode == "valid":
base_path = "./data/simplification/valid/"
elif mode == "test":
base_path = "./data/simplification/test/"
norm_sentences = read_all(join(base_path, "norm"))
simp_sentences = read_all(join(base_path, "simp"))
reference_sentences_sep = [
read_all(join(base_path, "turk" + str(i))) for i in range(8)]
reference_sentences = []
for i in range(len(reference_sentences_sep[0])):
reference_sentences.append(
[reference_sentences_sep[j][i] for j in range(8)])
return reference_sentences, norm_sentences, simp_sentences
def evaluate_sari(model, mode="valid", params=None):
batch_size = params.batch_size
model.eval()
reference_sentences, norm_sentences, simp_sentences = _load_wikilarge_references(
mode)
pred_simple_sentences = []
for stidx in range(0, len(norm_sentences), batch_size):
# prepare batch
Sx_batch = norm_sentences[stidx:stidx + batch_size]
Sy_batch = simp_sentences[stidx:stidx + batch_size]
# model forward
with torch.no_grad():
pred_simple_sentences.extend(model(Sx_batch, Sy_batch))
copy_baseline = _calc_sari(
norm_sentences, norm_sentences, reference_sentences, params)
obtained_scores = _calc_sari(
norm_sentences, pred_simple_sentences, reference_sentences, params)
print("Text Simplification Copy-Baseline:", copy_baseline)
return obtained_scores, pred_simple_sentences
def _calc_sari(norm_sentences, pred_simple_sentences, reference_sentences, params):
sari_scores = []
for i, (n, s, rs) in enumerate(zip(norm_sentences, pred_simple_sentences, reference_sentences)):
sari_scores.append(SARIsent(n, s, rs))
if params.print_outputs and i < params.max_prints:
pretty_print_prediction(n, rs[0], s)
return np.array(sari_scores).mean()
def _get_data_pairs(params):
"""
The dataset is assumed to be given as a directory containing
the files 's1' (input sequence) and 's2' (output sequence) for each of the
data splits, i.e. 's1.train', 's1.dev', 's1.test', and 's2.train', 's2.dev',
's2.test'.
Each file contains one text per line.
"""
dataset_path = params.dataset_path
endings = ["train", "dev", "test"]
data_dict = {e: {} for e in endings}
for ending in endings:
s1 = read_file(join(dataset_path, "s1." + ending), params)
s2 = read_file(join(dataset_path, "s2." + ending), params)
data_dict[ending]["Sx"] = s1 if not params.invert_style else s2
data_dict[ending]["Sy"] = s2 if not params.invert_style else s1
return data_dict["train"], data_dict["dev"], data_dict["test"]
def pretty_print_prediction(input_text, gold_output, predicted_output):
print("\n\n\n")
print("Input: ", input_text)
print("Output: ", predicted_output)
print("Gold: ", gold_output)
| 36.02005 | 118 | 0.641873 |
aceaaf9d9622bcf433b5960423708495bd24a018 | 14,446 | py | Python | tests/commands/test_changelog_command.py | edvm/commitizen | d42f744c3dd96aac568272f9d84e8000a22fc9ff | [
"MIT"
] | null | null | null | tests/commands/test_changelog_command.py | edvm/commitizen | d42f744c3dd96aac568272f9d84e8000a22fc9ff | [
"MIT"
] | null | null | null | tests/commands/test_changelog_command.py | edvm/commitizen | d42f744c3dd96aac568272f9d84e8000a22fc9ff | [
"MIT"
] | null | null | null | import sys
from datetime import date
import pytest
from commitizen import cli, git
from commitizen.commands.changelog import Changelog
from commitizen.exceptions import (
DryRunExit,
NoCommitsFoundError,
NoRevisionError,
NotAGitProjectError,
)
from tests.utils import create_file_and_commit
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_on_empty_project(mocker):
testargs = ["cz", "changelog", "--dry-run"]
mocker.patch.object(sys, "argv", testargs)
with pytest.raises(NoCommitsFoundError) as excinfo:
cli.main()
assert "No commits found" in str(excinfo)
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_from_version_zero_point_two(mocker, capsys):
create_file_and_commit("feat: new file")
create_file_and_commit("refactor: not in changelog")
# create tag
testargs = ["cz", "bump", "--yes"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
capsys.readouterr()
create_file_and_commit("feat: after 0.2.0")
create_file_and_commit("feat: after 0.2")
testargs = ["cz", "changelog", "--start-rev", "0.2.0", "--dry-run"]
mocker.patch.object(sys, "argv", testargs)
with pytest.raises(DryRunExit):
cli.main()
out, _ = capsys.readouterr()
assert out == "## Unreleased\n\n### Feat\n\n- after 0.2\n- after 0.2.0\n\n"
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_with_different_cz(mocker, capsys):
create_file_and_commit("JRA-34 #comment corrected indent issue")
create_file_and_commit("JRA-35 #time 1w 2d 4h 30m Total work logged")
testargs = ["cz", "-n", "cz_jira", "changelog", "--dry-run"]
mocker.patch.object(sys, "argv", testargs)
with pytest.raises(DryRunExit):
cli.main()
out, _ = capsys.readouterr()
assert (
out
== "## Unreleased\n\n\n- JRA-35 #time 1w 2d 4h 30m Total work logged\n- JRA-34 #comment corrected indent issue\n\n"
)
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_from_start(mocker, capsys, changelog_path):
create_file_and_commit("feat: new file")
create_file_and_commit("refactor: is in changelog")
create_file_and_commit("Merge into master")
testargs = ["cz", "changelog"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
with open(changelog_path, "r") as f:
out = f.read()
assert (
out
== "## Unreleased\n\n### Refactor\n\n- is in changelog\n\n### Feat\n\n- new file\n"
)
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_replacing_unreleased_using_incremental(
mocker, capsys, changelog_path
):
create_file_and_commit("feat: add new output")
create_file_and_commit("fix: output glitch")
create_file_and_commit("Merge into master")
testargs = ["cz", "changelog"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
testargs = ["cz", "bump", "--yes"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
create_file_and_commit("fix: mama gotta work")
create_file_and_commit("feat: add more stuff")
create_file_and_commit("Merge into master")
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
with open(changelog_path, "r") as f:
out = f.read()
today = date.today().isoformat()
assert (
out
== f"## Unreleased\n\n### Feat\n\n- add more stuff\n\n### Fix\n\n- mama gotta work\n\n## 0.2.0 ({today})\n\n### Fix\n\n- output glitch\n\n### Feat\n\n- add new output\n"
)
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_is_persisted_using_incremental(mocker, capsys, changelog_path):
create_file_and_commit("feat: add new output")
create_file_and_commit("fix: output glitch")
create_file_and_commit("Merge into master")
testargs = ["cz", "bump", "--yes"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
testargs = ["cz", "changelog"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
with open(changelog_path, "a") as f:
f.write("\nnote: this should be persisted using increment\n")
create_file_and_commit("fix: mama gotta work")
create_file_and_commit("feat: add more stuff")
create_file_and_commit("Merge into master")
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
with open(changelog_path, "r") as f:
out = f.read()
today = date.today().isoformat()
assert (
out
== f"## Unreleased\n\n### Feat\n\n- add more stuff\n\n### Fix\n\n- mama gotta work\n\n## 0.2.0 ({today})\n\n### Fix\n\n- output glitch\n\n### Feat\n\n- add new output\n\nnote: this should be persisted using increment\n"
)
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_incremental_angular_sample(mocker, capsys, changelog_path):
with open(changelog_path, "w") as f:
f.write(
"# [10.0.0-next.3](https://github.com/angular/angular/compare/10.0.0-next.2...10.0.0-next.3) (2020-04-22)\n"
"\n"
"### Bug Fixes"
"\n"
"* **common:** format day-periods that cross midnight ([#36611](https://github.com/angular/angular/issues/36611)) ([c6e5fc4](https://github.com/angular/angular/commit/c6e5fc4)), closes [#36566](https://github.com/angular/angular/issues/36566)\n"
)
create_file_and_commit("irrelevant commit")
git.tag("10.0.0-next.3")
create_file_and_commit("feat: add new output")
create_file_and_commit("fix: output glitch")
create_file_and_commit("fix: mama gotta work")
create_file_and_commit("feat: add more stuff")
create_file_and_commit("Merge into master")
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
with open(changelog_path, "r") as f:
out = f.read()
assert (
out
== "## Unreleased\n\n### Feat\n\n- add more stuff\n- add new output\n\n### Fix\n\n- mama gotta work\n- output glitch\n\n# [10.0.0-next.3](https://github.com/angular/angular/compare/10.0.0-next.2...10.0.0-next.3) (2020-04-22)\n\n### Bug Fixes\n* **common:** format day-periods that cross midnight ([#36611](https://github.com/angular/angular/issues/36611)) ([c6e5fc4](https://github.com/angular/angular/commit/c6e5fc4)), closes [#36566](https://github.com/angular/angular/issues/36566)\n"
)
KEEP_A_CHANGELOG = """# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.0.0] - 2017-06-20
### Added
- New visual identity by [@tylerfortune8](https://github.com/tylerfortune8).
- Version navigation.
### Changed
- Start using "changelog" over "change log" since it's the common usage.
### Removed
- Section about "changelog" vs "CHANGELOG".
## [0.3.0] - 2015-12-03
### Added
- RU translation from [@aishek](https://github.com/aishek).
"""
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_incremental_keep_a_changelog_sample(mocker, capsys, changelog_path):
with open(changelog_path, "w") as f:
f.write(KEEP_A_CHANGELOG)
create_file_and_commit("irrelevant commit")
git.tag("1.0.0")
create_file_and_commit("feat: add new output")
create_file_and_commit("fix: output glitch")
create_file_and_commit("fix: mama gotta work")
create_file_and_commit("feat: add more stuff")
create_file_and_commit("Merge into master")
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
with open(changelog_path, "r") as f:
out = f.read()
assert (
out
== """# Changelog\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n\n## Unreleased\n\n### Feat\n\n- add more stuff\n- add new output\n\n### Fix\n\n- mama gotta work\n- output glitch\n\n## [1.0.0] - 2017-06-20\n### Added\n- New visual identity by [@tylerfortune8](https://github.com/tylerfortune8).\n- Version navigation.\n\n### Changed\n- Start using "changelog" over "change log" since it\'s the common usage.\n\n### Removed\n- Section about "changelog" vs "CHANGELOG".\n\n## [0.3.0] - 2015-12-03\n### Added\n- RU translation from [@aishek](https://github.com/aishek).\n"""
)
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_hook(mocker, config):
changelog_hook_mock = mocker.Mock()
changelog_hook_mock.return_value = "cool changelog hook"
create_file_and_commit("feat: new file")
create_file_and_commit("refactor: is in changelog")
create_file_and_commit("Merge into master")
changelog = Changelog(
config, {"unreleased_version": None, "incremental": True, "dry_run": False}
)
mocker.patch.object(changelog.cz, "changelog_hook", changelog_hook_mock)
changelog()
full_changelog = (
"## Unreleased\n\n### Refactor\n\n- is in changelog\n\n### Feat\n\n- new file\n"
)
changelog_hook_mock.assert_called_with(full_changelog, full_changelog)
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_multiple_incremental_do_not_add_new_lines(
mocker, capsys, changelog_path
):
"""Test for bug https://github.com/commitizen-tools/commitizen/issues/192"""
create_file_and_commit("feat: add new output")
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
create_file_and_commit("fix: output glitch")
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
create_file_and_commit("fix: mama gotta work")
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
create_file_and_commit("feat: add more stuff")
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
with open(changelog_path, "r") as f:
out = f.read()
assert out.startswith("#")
def test_changelog_without_revision(mocker, tmp_commitizen_project):
changelog_file = tmp_commitizen_project.join("CHANGELOG.md")
changelog_file.write(
"""
# Unreleased
## v1.0.0
"""
)
# create_file_and_commit("feat: new file")
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
with pytest.raises(NoRevisionError):
cli.main()
def test_changelog_with_different_tag_name_and_changelog_content(
mocker, tmp_commitizen_project
):
changelog_file = tmp_commitizen_project.join("CHANGELOG.md")
changelog_file.write(
"""
# Unreleased
## v1.0.0
"""
)
create_file_and_commit("feat: new file")
git.tag("2.0.0")
# create_file_and_commit("feat: new file")
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
with pytest.raises(NoRevisionError):
cli.main()
def test_changelog_in_non_git_project(tmpdir, config, mocker):
testargs = ["cz", "changelog", "--incremental"]
mocker.patch.object(sys, "argv", testargs)
with tmpdir.as_cwd():
with pytest.raises(NotAGitProjectError):
cli.main()
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_breaking_change_content_v1_beta(mocker, capsys):
commit_message = (
"feat(users): email pattern corrected\n\n"
"BREAKING CHANGE: migrate by renaming user to users\n\n"
"footer content"
)
create_file_and_commit(commit_message)
testargs = ["cz", "changelog", "--dry-run"]
mocker.patch.object(sys, "argv", testargs)
with pytest.raises(DryRunExit):
cli.main()
out, _ = capsys.readouterr()
assert out == (
"## Unreleased\n\n### Feat\n\n- **users**: email pattern corrected\n\n"
"### BREAKING CHANGE\n\n- migrate by renaming user to users\n\n"
)
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_breaking_change_content_v1(mocker, capsys):
commit_message = (
"feat(users): email pattern corrected\n\n"
"body content\n\n"
"BREAKING CHANGE: migrate by renaming user to users"
)
create_file_and_commit(commit_message)
testargs = ["cz", "changelog", "--dry-run"]
mocker.patch.object(sys, "argv", testargs)
with pytest.raises(DryRunExit):
cli.main()
out, _ = capsys.readouterr()
assert out == (
"## Unreleased\n\n### Feat\n\n- **users**: email pattern corrected\n\n"
"### BREAKING CHANGE\n\n- migrate by renaming user to users\n\n"
)
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_config_flag_increment(mocker, changelog_path, config_path):
with open(config_path, "a") as f:
f.write("changelog_incremental = true\n")
with open(changelog_path, "a") as f:
f.write("\nnote: this should be persisted using increment\n")
create_file_and_commit("feat: add new output")
testargs = ["cz", "changelog"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
with open(changelog_path, "r") as f:
out = f.read()
assert "this should be persisted using increment" in out
@pytest.mark.usefixtures("tmp_commitizen_project")
def test_changelog_config_start_rev_option(mocker, capsys, config_path):
# create commit and tag
create_file_and_commit("feat: new file")
testargs = ["cz", "bump", "--yes"]
mocker.patch.object(sys, "argv", testargs)
cli.main()
capsys.readouterr()
create_file_and_commit("feat: after 0.2.0")
create_file_and_commit("feat: after 0.2")
with open(config_path, "a") as f:
f.write('changelog_start_rev = "0.2.0"\n')
testargs = ["cz", "changelog", "--dry-run"]
mocker.patch.object(sys, "argv", testargs)
with pytest.raises(DryRunExit):
cli.main()
out, _ = capsys.readouterr()
assert out == "## Unreleased\n\n### Feat\n\n- after 0.2\n- after 0.2.0\n\n"
| 33.517401 | 777 | 0.669874 |
aceab18408d6683caaf0c5b6809081a55e275bda | 156 | py | Python | livestock/broiler/doctype/temperature/test_temperature.py | jayan13/livestock | 75f4ccb246818d9cd55400d88fefbb36c168c713 | [
"MIT"
] | null | null | null | livestock/broiler/doctype/temperature/test_temperature.py | jayan13/livestock | 75f4ccb246818d9cd55400d88fefbb36c168c713 | [
"MIT"
] | null | null | null | livestock/broiler/doctype/temperature/test_temperature.py | jayan13/livestock | 75f4ccb246818d9cd55400d88fefbb36c168c713 | [
"MIT"
] | null | null | null | # Copyright (c) 2022, alantechnologies and Contributors
# See license.txt
# import frappe
import unittest
class TestTemperature(unittest.TestCase):
pass
| 17.333333 | 55 | 0.794872 |
aceab25a1bbbed43069680cc39570ba36cba5f51 | 1,263 | py | Python | tests/music_based/test_HS.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | tests/music_based/test_HS.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | tests/music_based/test_HS.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Created by "Thieu" at 18:26, 19/03/2022 ----------%
# Email: nguyenthieu2102@gmail.com %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
from mealpy.music_based import HS
from mealpy.optimizer import Optimizer
import numpy as np
import pytest
@pytest.fixture(scope="module") # scope: Call only 1 time at the beginning
def problem():
def fitness_function(solution):
return np.sum(solution ** 2)
problem = {
"fit_func": fitness_function,
"lb": [-10, -10, -10, -10, -10],
"ub": [10, 10, 10, 10, 10],
"minmax": "min",
"log_to": None
}
return problem
def test_HS_results(problem):
models = [
HS.OriginalHS(problem, epoch=100, pop_size=50),
HS.BaseHS(problem, epoch=100, pop_size=50),
]
for model in models:
best_position, best_fitness = model.solve()
assert isinstance(model, Optimizer)
assert isinstance(best_position, np.ndarray)
assert len(best_position) == len(problem["lb"])
| 33.236842 | 132 | 0.509897 |
aceab2a3c3ef8f7d02885363608ead40c8016060 | 2,544 | py | Python | ws2122-lspm/Lib/site-packages/pm4py/visualization/align_table/visualizer.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-19T04:02:46.000Z | 2022-01-19T04:02:46.000Z | ws2122-lspm/Lib/site-packages/pm4py/visualization/align_table/visualizer.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2021-11-19T07:21:48.000Z | 2021-11-19T07:21:48.000Z | ws2122-lspm/Lib/site-packages/pm4py/visualization/align_table/visualizer.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-14T17:15:38.000Z | 2022-01-14T17:15:38.000Z | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.visualization.align_table.variants import classic
from pm4py.visualization.common import gview
from pm4py.visualization.common import save as gsave
from pm4py.visualization.common.gview import serialize, serialize_dot
from enum import Enum
from pm4py.util import exec_utils
from typing import Optional, Dict, Any, Union, Tuple
from pm4py.objects.log.obj import EventLog, EventStream
from pm4py.util import typing
import graphviz
class Variants(Enum):
CLASSIC = classic
DEFAULT_VARIANT = Variants.CLASSIC
def apply(log: EventLog, aligned_traces: typing.ListAlignments, variant=DEFAULT_VARIANT, parameters: Optional[Dict[Any, Any]] = None) -> graphviz.Source:
"""
Gets the alignment table visualization from the alignments output
Parameters
-------------
log
Event log
aligned_traces
Aligned traces
variant
Variant of the algorithm to apply, possible values:
- Variants.CLASSIC
parameters
Parameters of the algorithm
Returns
-------------
gviz
Graphviz object
"""
return exec_utils.get_variant(variant).apply(log, aligned_traces, parameters=parameters)
def save(gviz: graphviz.Digraph, output_file_path: str):
"""
Save the diagram
Parameters
-----------
gviz
GraphViz diagram
output_file_path
Path where the GraphViz output should be saved
"""
gsave.save(gviz, output_file_path)
def view(gviz: graphviz.Digraph):
"""
View the diagram
Parameters
-----------
gviz
GraphViz diagram
"""
return gview.view(gviz)
def matplotlib_view(gviz: graphviz.Digraph):
"""
Views the diagram using Matplotlib
Parameters
---------------
gviz
Graphviz
"""
return gview.matplotlib_view(gviz)
| 26.226804 | 153 | 0.692217 |
aceab2b90fe011a4264a917b03a45b4fbccf70aa | 4,127 | py | Python | source/applications/system/models.py | whitegreyblack/PyWin | 78f3637b4c03c11d7f6ef15b20a1acf699d4be24 | [
"MIT"
] | null | null | null | source/applications/system/models.py | whitegreyblack/PyWin | 78f3637b4c03c11d7f6ef15b20a1acf699d4be24 | [
"MIT"
] | null | null | null | source/applications/system/models.py | whitegreyblack/PyWin | 78f3637b4c03c11d7f6ef15b20a1acf699d4be24 | [
"MIT"
] | null | null | null | import curses
import random
import source.utils as utils
from source.applications.application import Application
from source.window import (DisplayWindow, PromptWindow, ScrollableWindow,
Window, WindowProperty)
from source.window.scrollable import (keypress_up, keypress_down)
"""
Currently we manually build the file system during runtime.
Ex.:
data = {
Folder(x): ...,
...,
}
Eventually we would like to move to a data reader/parser of
a file to keep it simple and easier to modify
Ex.:
Folder 1:
Folder 1.1:
File A
File B
File C
Folder 2:
-- empty --
File D
"""
def parse_system_structure(data):
pass
# At its most basic representation, it's just a basic tree structure with
# unlimited number (not really) of children elements
# Ex. Tree(n1(n4, n5), n2(), n3(n6))
class SystemWindow(ScrollableWindow):
def keypress_enter(self, *arg, **kwargs):
if hasattr(self.data[self.index], 'children'):
data = self.data[self.index].children
if data:
self.data = data
self.index = 0
else:
self.data = None
def keypress_escape(self, *arg, **kwargs):
data = self.data[self.index].parent
if data:
self.data = data
else:
exit()
def draw(self):
if not self.showing:
return
Window.draw(self)
if not self.data:
self.window.addstr(1, 1, "No data")
return
rows_in_view = None
s, e = 0, self.height
halfscreen = self.height // 2
if len(self.data) > self.height:
if self.index < halfscreen:
pass
elif self.index > len(self.data) - halfscreen - 1:
s = len(self.data) - self.height
e = s + self.height + 1
else:
s = self.index - halfscreen
e = s + self.height
rows_in_view = self.data[s:e]
else:
s = 0
rows_in_view = self.data
for i, r in enumerate(rows_in_view):
if not isinstance(r, str):
_, _, r = r.display(1, 1, self.width, self.height, 0)
print(r)
l = r[:self.width]
available = self.width - len(l)
l = f"{l}{' ' * (self.width - len(l))}"
c = curses.color_pair(1)
if s + i == self.index:
if self.focused:
c = curses.color_pair(2)
else:
c = curses.color_pair(3)
# c = curses.color_pair((s + i == self.index) * 2)
self.window.addstr(i + 1, 1, l, c)
class Directory(object):
def __init__(self, sysobjs=None):
self.sysobjs = sysobjs if sysobjs else []
def items(self):
return len(self.sysobjs)
class SystemObject(object):
def __init__(self, name:str):
self.name = name
self.parent = None
def __str__(self):
return self.name
def display(self, x, y, mx, my, indent):
indentation = " " * indent
space_remaining = mx - indent
space_empty = None
# fills entirely
if len(self.name) > space_remaining:
space_empty = ""
display_name = self.name[:space_remaining-2] + "~/"
else:
display_name = self.name + "/"
space_remaining -= len(display_name) - 1
space_empty = " " * space_remaining
print(display_name)
return x, y, f"{indentation}{display_name}{space_empty}"
class File(SystemObject):
def __repr__(self):
return f"File({self.name})"
class Folder(SystemObject):
def __init__(self, name:str, children:list=None):
super().__init__(name)
if children:
for child in children:
child.parent = self
self.children = children
else:
self.children = []
def __repr__(self):
return f"Folder({self.name})" | 26.973856 | 73 | 0.536709 |
aceab2ffc45326a966a38e1a94ebb71e478bfa75 | 126,670 | py | Python | nova/tests/api/ec2/test_cloud.py | bopopescu/OpenStack-DNRM-Nova | 7354f378398850113ac93b511547ed05218dc770 | [
"Apache-2.0"
] | null | null | null | nova/tests/api/ec2/test_cloud.py | bopopescu/OpenStack-DNRM-Nova | 7354f378398850113ac93b511547ed05218dc770 | [
"Apache-2.0"
] | null | null | null | nova/tests/api/ec2/test_cloud.py | bopopescu/OpenStack-DNRM-Nova | 7354f378398850113ac93b511547ed05218dc770 | [
"Apache-2.0"
] | 1 | 2020-07-24T09:10:21.000Z | 2020-07-24T09:10:21.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import copy
import datetime
import functools
import iso8601
import os
import string
import tempfile
import fixtures
from oslo.config import cfg
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.image import s3
from nova.network import api as network_api
from nova.network import neutronv2
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack.compute.contrib import (
test_neutron_security_groups as test_neutron)
from nova.tests import fake_network
from nova.tests import fake_utils
from nova.tests.image import fake
from nova.tests import matchers
from nova import utils
from nova.virt import fake as fake_virt
from nova import volume
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('default_flavor', 'nova.compute.flavors')
CONF.import_opt('use_ipv6', 'nova.netconf')
LOG = logging.getLogger(__name__)
HOST = "testhost"
def get_fake_cache():
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3',
floats=['1.2.3.4',
'5.6.7.8']),
_ip('192.168.0.4')]}]}}]
if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return info
def get_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
if isinstance(instances, list):
for instance in instances:
instance['info_cache'] = {'network_info': get_fake_cache()}
else:
instances['info_cache'] = {'network_info': get_fake_cache()}
return instances
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
ec2utils.reset_cache()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API')
self.useFixture(fixtures.FakeLogger('boto'))
fake_utils.stub_out_utils_spawn_n(self.stubs)
def fake_show(meh, context, id):
return {'id': id,
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
def fake_detail(_self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
fake.stub_out_image_service(self.stubs)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
fake_network.set_stub_network_methods(self.stubs)
# set up our cloud
self.cloud = cloud.CloudController()
self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
# Short-circuit the conductor service
self.flags(use_local=True, group='conductor')
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.volume_api = volume.API()
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
self.volume_api.reset_fake_api(self.context)
super(CloudTestCase, self).tearDown()
fake.FakeImageService_reset()
def fake_get_target(obj, iqn):
return 1
def fake_remove_iscsi_target(obj, tid, lun, vol_id, **kwargs):
pass
def _stub_instance_get_with_fixed_ips(self, func_name):
orig_func = getattr(self.cloud.compute_api, func_name)
def fake_get(*args, **kwargs):
return get_instances_with_cached_ips(orig_func, *args, **kwargs)
self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
keypair_api = compute_api.KeypairAPI()
return keypair_api.create_key_pair(self.context, self.context.user_id,
name)
def test_describe_regions(self):
# Makes sure describe regions runs without raising an exception.
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 1)
self.flags(region_list=["one=test_host1", "two=test_host2"])
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 2)
def test_describe_addresses(self):
# Makes sure describe addresses runs without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_describe_specific_address(self):
# Makes sure describe specific address works.
addresses = ["10.10.10.10", "10.10.10.11"]
for address in addresses:
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
result = self.cloud.describe_addresses(self.context)
self.assertEqual(len(result['addressesSet']), 2)
result = self.cloud.describe_addresses(self.context,
public_ip=['10.10.10.10'])
self.assertEqual(len(result['addressesSet']), 1)
for address in addresses:
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_allocate_address(self):
address = "10.10.10.10"
allocate = self.cloud.allocate_address
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.assertEqual(allocate(self.context)['publicIp'], address)
db.floating_ip_destroy(self.context, address)
self.assertRaises(exception.NoMoreFloatingIps,
allocate,
self.context)
def test_release_address(self):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova',
'project_id': self.project_id})
result = self.cloud.release_address(self.context, address)
self.assertEqual(result.get('return', None), 'true')
def test_associate_disassociate_address(self):
# Verifies associate runs cleanly without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
# TODO(jkoelker) Probably need to query for instance_type_id and
# make sure we get a valid one
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'],
instance_uuid=inst['uuid'],
host=inst['host'],
vpn=None,
rxtx_factor=3,
project_id=project_id,
macs=None)
fixed_ips = nw_info.fixed_ips()
ec2_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
self.stubs.Set(ec2utils, 'get_ip_info_for_instance',
lambda *args: {'fixed_ips': ['10.0.0.1'],
'fixed_ip6s': [],
'floating_ips': []})
self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
lambda *args: 1)
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
self.cloud.disassociate_address(self.context,
public_ip=address)
self.cloud.release_address(self.context,
public_ip=address)
self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'],
inst['host'])
db.instance_destroy(self.context, inst['uuid'])
db.floating_ip_destroy(self.context, address)
def test_disassociate_auto_assigned_address(self):
"""Verifies disassociating auto assigned floating IP
raises an exception
"""
address = "10.10.10.10"
def fake_get(*args, **kwargs):
pass
def fake_disassociate_floating_ip(*args, **kwargs):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
lambda *args: 1)
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
self.stubs.Set(network_api.API, 'disassociate_floating_ip',
fake_disassociate_floating_ip)
self.assertRaises(exception.EC2APIError,
self.cloud.disassociate_address,
self.context, public_ip=address)
def test_disassociate_unassociated_address(self):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.assertRaises(exception.InstanceNotFound,
self.cloud.disassociate_address,
self.context, public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_describe_security_groups(self):
# Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context)
# NOTE(vish): should have the default group as well
self.assertEqual(len(result['securityGroupInfo']), 2)
result = self.cloud.describe_security_groups(self.context,
group_name=[sec['name']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_all_tenants(self):
# Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': 'foobar',
'name': 'test'})
def _check_name(result, i, expected):
self.assertEqual(result['securityGroupInfo'][i]['groupName'],
expected)
# include all tenants
filter = [{'name': 'all-tenants', 'value': {'1': 1}}]
result = self.cloud.describe_security_groups(self.context,
filter=filter)
self.assertEqual(len(result['securityGroupInfo']), 2)
_check_name(result, 0, 'default')
_check_name(result, 1, sec['name'])
# exclude all tenants
filter = [{'name': 'all-tenants', 'value': {'1': 0}}]
result = self.cloud.describe_security_groups(self.context,
filter=filter)
self.assertEqual(len(result['securityGroupInfo']), 1)
_check_name(result, 0, 'default')
# default all tenants
result = self.cloud.describe_security_groups(self.context)
self.assertEqual(len(result['securityGroupInfo']), 1)
_check_name(result, 0, 'default')
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context,
group_id=[sec['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
default = db.security_group_get_by_name(self.context,
self.context.project_id,
'default')
result = self.cloud.describe_security_groups(self.context,
group_id=[default['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
'default')
db.security_group_destroy(self.context, sec['id'])
def test_create_delete_security_group(self):
descript = 'test description'
create = self.cloud.create_security_group
result = create(self.context, 'testgrp', descript)
group_descript = result['securityGroupSet'][0]['groupDescription']
self.assertEqual(descript, group_descript)
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, 'testgrp'))
def test_security_group_quota_limit(self):
self.flags(quota_security_groups=10)
for i in range(1, CONF.quota_security_groups + 1):
name = 'test name %i' % i
descript = 'test description %i' % i
create = self.cloud.create_security_group
result = create(self.context, name, descript)
# 11'th group should fail
self.assertRaises(exception.EC2APIError,
create, self.context, 'foo', 'bar')
def test_delete_security_group_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, group_id=sec['id']))
def test_delete_security_group_with_bad_name(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, 'badname')
def test_delete_security_group_with_bad_group_id(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, group_id=999)
def test_delete_security_group_no_params(self):
delete = self.cloud.delete_security_group
self.assertRaises(exception.EC2APIError, delete, self.context)
def test_authorize_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges':
{'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_fail_missing_source_group(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges': {'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}},
'ip_protocol': u'tcp'}]}
self.assertRaises(exception.SecurityGroupNotFound, authz,
self.context, group_name=sec['name'], **kwargs)
def test_authorize_security_group_ingress_ip_permissions_groups(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'somegroup1'})
sec = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'othergroup2'})
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'},
'2': {'user_id': u'someuser',
'group_name': u'othergroup2'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_describe_security_group_ingress_groups(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec1 = db.security_group_create(self.context, kwargs)
sec2 = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'somegroup1'})
sec3 = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'othergroup2'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [
{'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}}},
{'ip_protocol': 'tcp',
'from_port': 80,
'to_port': 80,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'othergroup2'}}}]}
self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs))
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
self.assertEquals(len(groups['securityGroupInfo']), 1)
actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
self.assertEquals(len(actual_rules), 4)
expected_rules = [{'fromPort': -1,
'groups': [{'groupName': 'somegroup1',
'userId': 'someuser'}],
'ipProtocol': 'icmp',
'ipRanges': [],
'toPort': -1},
{'fromPort': 1,
'groups': [{'groupName': u'somegroup1',
'userId': u'someuser'}],
'ipProtocol': 'tcp',
'ipRanges': [],
'toPort': 65535},
{'fromPort': 1,
'groups': [{'groupName': u'somegroup1',
'userId': u'someuser'}],
'ipProtocol': 'udp',
'ipRanges': [],
'toPort': 65535},
{'fromPort': 80,
'groups': [{'groupName': u'othergroup2',
'userId': u'someuser'}],
'ipProtocol': u'tcp',
'ipRanges': [],
'toPort': 80}]
for rule in expected_rules:
self.assertTrue(rule in actual_rules)
db.security_group_destroy(self.context, sec3['id'])
db.security_group_destroy(self.context, sec2['id'])
db.security_group_destroy(self.context, sec1['id'])
def test_revoke_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
def test_authorize_revoke_security_group_ingress_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
def test_authorize_security_group_ingress_missing_protocol_params(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.EC2APIError, authz, self.context, 'test')
def test_authorize_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.EC2APIError, authz, self.context, **kwargs)
def test_authorize_security_group_ingress_already_exists(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_name=sec['name'], **kwargs)
self.assertRaises(exception.EC2APIError, authz, self.context,
group_name=sec['name'], **kwargs)
def test_security_group_ingress_quota_limit(self):
self.flags(quota_security_group_rules=20)
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec_group = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
for i in range(100, 120):
kwargs = {'to_port': i, 'from_port': i, 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec_group['id'], **kwargs)
kwargs = {'to_port': 121, 'from_port': 121, 'ip_protocol': 'tcp'}
self.assertRaises(exception.EC2APIError, authz, self.context,
group_id=sec_group['id'], **kwargs)
def _test_authorize_security_group_no_ports_with_source_group(self, proto):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
auth_kwargs = {'ip_protocol': proto,
'groups': {'1': {'user_id': self.context.user_id,
'group_name': u'test'}}}
self.assertTrue(authz(self.context, group_name=sec['name'],
**auth_kwargs))
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
self.assertEquals(len(groups['securityGroupInfo']), 1)
actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
expected_rules = [{'groups': [{'groupName': 'test',
'userId': self.context.user_id}],
'ipProtocol': proto,
'ipRanges': []}]
if proto == 'icmp':
expected_rules[0]['fromPort'] = -1
expected_rules[0]['toPort'] = -1
else:
expected_rules[0]['fromPort'] = 1
expected_rules[0]['toPort'] = 65535
self.assertTrue(expected_rules == actual_rules)
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
db.security_group_destroy(self.context, sec['id'])
def _test_authorize_security_group_no_ports_no_source_group(self, proto):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
auth_kwargs = {'ip_protocol': proto}
self.assertRaises(exception.EC2APIError, authz, self.context,
group_name=sec['name'], **auth_kwargs)
db.security_group_destroy(self.context, sec['id'])
def test_authorize_security_group_no_ports_icmp(self):
self._test_authorize_security_group_no_ports_with_source_group('icmp')
self._test_authorize_security_group_no_ports_no_source_group('icmp')
def test_authorize_security_group_no_ports_tcp(self):
self._test_authorize_security_group_no_ports_with_source_group('tcp')
self._test_authorize_security_group_no_ports_no_source_group('tcp')
def test_authorize_security_group_no_ports_udp(self):
self._test_authorize_security_group_no_ports_with_source_group('udp')
self._test_authorize_security_group_no_ports_no_source_group('udp')
def test_revoke_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
revoke = self.cloud.revoke_security_group_ingress
self.assertRaises(exception.EC2APIError, revoke,
self.context, **kwargs)
def test_delete_security_group_in_use_by_group(self):
group1 = self.cloud.create_security_group(self.context, 'testgrp1',
"test group 1")
group2 = self.cloud.create_security_group(self.context, 'testgrp2',
"test group 2")
kwargs = {'groups': {'1': {'user_id': u'%s' % self.context.user_id,
'group_name': u'testgrp2'}},
}
self.cloud.authorize_security_group_ingress(self.context,
group_name='testgrp1', **kwargs)
group1 = db.security_group_get_by_name(self.context,
self.project_id, 'testgrp1')
get_rules = db.security_group_rule_get_by_security_group
self.assertTrue(get_rules(self.context, group1['id']))
self.cloud.delete_security_group(self.context, 'testgrp2')
self.assertFalse(get_rules(self.context, group1['id']))
def test_delete_security_group_in_use_by_instance(self):
# Ensure that a group can not be deleted if in use by an instance.
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
args = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active'}
inst = db.instance_create(self.context, args)
args = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'name': 'testgrp',
'description': 'Test group'}
group = db.security_group_create(self.context, args)
db.instance_add_security_group(self.context, inst['uuid'], group['id'])
self.assertRaises(exception.InvalidGroup,
self.cloud.delete_security_group,
self.context, 'testgrp')
db.instance_destroy(self.context, inst['uuid'])
self.cloud.delete_security_group(self.context, 'testgrp')
def test_describe_availability_zones(self):
# Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
# Aggregate based zones
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'zone1'})
db.aggregate_host_add(self.context, agg['id'], 'host1_zones')
agg = db.aggregate_create(self.context,
{'name': 'agg2'}, {'availability_zone': 'zone2'})
db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
result = self.cloud.describe_availability_zones(self.context)
self.assertEqual(len(result['availabilityZoneInfo']), 3)
admin_ctxt = context.get_admin_context(read_deleted="no")
result = self.cloud.describe_availability_zones(admin_ctxt,
zone_name='verbose')
self.assertEqual(len(result['availabilityZoneInfo']), 16)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def test_describe_availability_zones_verbose(self):
# Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'second_zone'})
db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
admin_ctxt = context.get_admin_context(read_deleted="no")
result = self.cloud.describe_availability_zones(admin_ctxt,
zone_name='verbose')
self.assertEqual(len(result['availabilityZoneInfo']), 15)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def assertEqualSorted(self, x, y):
self.assertEqual(sorted(x), sorted(y))
def test_describe_instances(self):
# Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'hostname': 'server-1234',
'vm_state': 'active',
'system_metadata': sys_meta})
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host2',
'hostname': 'server-4321',
'vm_state': 'active',
'system_metadata': sys_meta})
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'zone1'})
db.aggregate_host_add(self.context, agg['id'], 'host1')
comp2 = db.service_create(self.context, {'host': 'host2',
'topic': "compute"})
agg2 = db.aggregate_create(self.context,
{'name': 'agg2'}, {'availability_zone': 'zone2'})
db.aggregate_host_add(self.context, agg2['id'], 'host2')
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
# Now try filtering.
instance_id = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], instance_id)
self.assertEqual(instance['placement']['availabilityZone'],
'zone2')
self.assertEqual(instance['publicDnsName'], '1.2.3.4')
self.assertEqual(instance['ipAddress'], '1.2.3.4')
self.assertEqual(instance['dnsName'], '1.2.3.4')
self.assertEqual(instance['tagSet'], [])
self.assertEqual(instance['privateDnsName'], 'server-4321')
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertEqual(instance['dnsNameV6'],
'fe80:b33f::a8bb:ccff:fedd:eeff')
# A filter with even one invalid id should cause an exception to be
# raised
self.assertRaises(exception.InstanceNotFound,
self.cloud.describe_instances, self.context,
instance_id=[instance_id, '435679'])
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def test_describe_instances_all_invalid(self):
# Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
instance_id = ec2utils.id_to_ec2_inst_id('435679')
self.assertRaises(exception.InstanceNotFound,
self.cloud.describe_instances, self.context,
instance_id=[instance_id])
def test_describe_instances_with_filters(self):
# Makes sure describe_instances works and filters results.
filters = {'filter': [{'name': 'test',
'value': ['a', 'b']},
{'name': 'another_test',
'value': 'a string'}]}
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': []})
def test_describe_instances_with_tag_filters(self):
# Makes sure describe_instances works and filters tag results.
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
utc = iso8601.iso8601.Utc()
# Create some test images
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1,
tzinfo=utc),
'system_metadata': sys_meta
}
inst2_kwargs = {
'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host2',
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1112',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2,
tzinfo=utc),
'system_metadata': sys_meta
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
inst2 = db.instance_create(self.context, inst2_kwargs)
ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
# Create some tags
# We get one overlapping pair, one overlapping key, and a
# disparate pair
# inst1 : {'foo': 'bar', 'baz': 'wibble', 'bax': 'wobble'}
# inst2 : {'foo': 'bar', 'baz': 'quux', 'zog': 'bobble'}
md = {'key': 'foo', 'value': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
tag=[md])
md2 = {'key': 'baz', 'value': 'wibble'}
md3 = {'key': 'bax', 'value': 'wobble'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1],
tag=[md2, md3])
md4 = {'key': 'baz', 'value': 'quux'}
md5 = {'key': 'zog', 'value': 'bobble'}
self.cloud.create_tags(self.context, resource_id=[ec2_id2],
tag=[md4, md5])
# We should be able to search by:
inst1_ret = {
'groupSet': None,
'instancesSet': [{'amiLaunchIndex': None,
'dnsName': '1.2.3.4',
'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
'imageId': 'ami-00000001',
'instanceId': 'i-00000001',
'instanceState': {'code': 16,
'name': 'running'},
'instanceType': u'm1.medium',
'ipAddress': '1.2.3.4',
'keyName': 'None (None, host1)',
'launchTime':
datetime.datetime(2012, 5, 1, 1, 1, 1,
tzinfo=utc),
'placement': {
'availabilityZone': 'nova'},
'privateDnsName': u'server-1111',
'privateIpAddress': '192.168.0.3',
'productCodesSet': None,
'publicDnsName': '1.2.3.4',
'rootDeviceName': '/dev/sda1',
'rootDeviceType': 'instance-store',
'tagSet': [{'key': u'foo',
'value': u'bar'},
{'key': u'baz',
'value': u'wibble'},
{'key': u'bax',
'value': u'wobble'}]}],
'ownerId': None,
'reservationId': u'a'}
inst2_ret = {
'groupSet': None,
'instancesSet': [{'amiLaunchIndex': None,
'dnsName': '1.2.3.4',
'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
'imageId': 'ami-00000001',
'instanceId': 'i-00000002',
'instanceState': {'code': 16,
'name': 'running'},
'instanceType': u'm1.medium',
'ipAddress': '1.2.3.4',
'keyName': u'None (None, host2)',
'launchTime':
datetime.datetime(2012, 5, 1, 1, 1, 2,
tzinfo=utc),
'placement': {
'availabilityZone': 'nova'},
'privateDnsName': u'server-1112',
'privateIpAddress': '192.168.0.3',
'productCodesSet': None,
'publicDnsName': '1.2.3.4',
'rootDeviceName': '/dev/sda1',
'rootDeviceType': 'instance-store',
'tagSet': [{'key': u'foo',
'value': u'bar'},
{'key': u'baz',
'value': u'quux'},
{'key': u'zog',
'value': u'bobble'}]}],
'ownerId': None,
'reservationId': u'b'}
# No filter
result = self.cloud.describe_instances(self.context)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Key search
# Both should have tags with key 'foo' and value 'bar'
filters = {'filter': [{'name': 'tag:foo',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Both should have tags with key 'foo'
filters = {'filter': [{'name': 'tag-key',
'value': ['foo']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Value search
# Only inst2 should have tags with key 'baz' and value 'quux'
filters = {'filter': [{'name': 'tag:baz',
'value': ['quux']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst2_ret]})
# Only inst2 should have tags with value 'quux'
filters = {'filter': [{'name': 'tag-value',
'value': ['quux']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst2_ret]})
# Multiple values
# Both should have tags with key 'baz' and values in the set
# ['quux', 'wibble']
filters = {'filter': [{'name': 'tag:baz',
'value': ['quux', 'wibble']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Both should have tags with key 'baz' or tags with value 'bar'
filters = {'filter': [{'name': 'tag-key',
'value': ['baz']},
{'name': 'tag-value',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# destroy the test instances
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
def test_describe_instances_sorting(self):
# Makes sure describe_instances works and is sorted as expected.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
inst_base = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'system_metadata': sys_meta,
}
utc = iso8601.iso8601.Utc()
inst1_kwargs = {}
inst1_kwargs.update(inst_base)
inst1_kwargs['host'] = 'host1'
inst1_kwargs['hostname'] = 'server-1111'
inst1_kwargs['created_at'] = datetime.datetime(2012, 5, 1, 1, 1, 1,
tzinfo=utc)
inst1 = db.instance_create(self.context, inst1_kwargs)
inst2_kwargs = {}
inst2_kwargs.update(inst_base)
inst2_kwargs['host'] = 'host2'
inst2_kwargs['hostname'] = 'server-2222'
inst2_kwargs['created_at'] = datetime.datetime(2012, 2, 1, 1, 1, 1,
tzinfo=utc)
inst2 = db.instance_create(self.context, inst2_kwargs)
inst3_kwargs = {}
inst3_kwargs.update(inst_base)
inst3_kwargs['host'] = 'host3'
inst3_kwargs['hostname'] = 'server-3333'
inst3_kwargs['created_at'] = datetime.datetime(2012, 2, 5, 1, 1, 1,
tzinfo=utc)
inst3 = db.instance_create(self.context, inst3_kwargs)
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
comp2 = db.service_create(self.context, {'host': 'host2',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]['instancesSet']
self.assertEqual(result[0]['launchTime'], inst2_kwargs['created_at'])
self.assertEqual(result[1]['launchTime'], inst3_kwargs['created_at'])
self.assertEqual(result[2]['launchTime'], inst1_kwargs['created_at'])
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
db.instance_destroy(self.context, inst3['uuid'])
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def test_describe_instance_state(self):
# Makes sure describe_instances for instanceState works.
def test_instance_state(expected_code, expected_name,
power_state_, vm_state_, values=None):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
values = values or {}
values.update({'image_ref': image_uuid, 'instance_type_id': 1,
'power_state': power_state_, 'vm_state': vm_state_,
'system_metadata': sys_meta})
inst = db.instance_create(self.context, values)
instance_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
result = result['instancesSet'][0]['instanceState']
name = result['name']
code = result['code']
self.assertEqual(code, expected_code)
self.assertEqual(name, expected_name)
db.instance_destroy(self.context, inst['uuid'])
test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
power_state.RUNNING, vm_states.ACTIVE)
test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
power_state.NOSTATE, vm_states.STOPPED,
{'shutdown_terminate': False})
def test_describe_instances_no_ipv6(self):
# Makes sure describe_instances w/ no ipv6 works.
self.flags(use_ipv6=False)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'hostname': 'server-1234',
'vm_state': 'active',
'system_metadata': sys_meta})
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
instance = result['instancesSet'][0]
instance_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
self.assertEqual(instance['instanceId'], instance_id)
self.assertEqual(instance['publicDnsName'], '1.2.3.4')
self.assertEqual(instance['ipAddress'], '1.2.3.4')
self.assertEqual(instance['dnsName'], '1.2.3.4')
self.assertEqual(instance['privateDnsName'], 'server-1234')
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertNotIn('dnsNameV6', instance)
db.instance_destroy(self.context, inst1['uuid'])
db.service_destroy(self.context, comp1['id'])
def test_describe_instances_deleted(self):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
args1 = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst1 = db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst2 = db.instance_create(self.context, args2)
db.instance_destroy(self.context, inst1['uuid'])
result = self.cloud.describe_instances(self.context)
self.assertEqual(len(result['reservationSet']), 1)
result1 = result['reservationSet'][0]['instancesSet']
self.assertEqual(result1[0]['instanceId'],
ec2utils.id_to_ec2_inst_id(inst2['uuid']))
def test_describe_instances_with_image_deleted(self):
image_uuid = 'aebef54a-ed67-4d10-912f-14455edce176'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
args1 = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst1 = db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst2 = db.instance_create(self.context, args2)
result = self.cloud.describe_instances(self.context)
self.assertEqual(len(result['reservationSet']), 2)
def test_describe_images(self):
describe_images = self.cloud.describe_images
def fake_detail(meh, context, **kwargs):
return [{'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}}]
def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id')
def fake_detail_none(self, context, **kwargs):
return []
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all
result1 = describe_images(self.context)
result1 = result1['imagesSet'][0]
self.assertEqual(result1['imageId'], 'ami-00000001')
# provided a valid image_id
result2 = describe_images(self.context, ['ami-00000001'])
self.assertEqual(1, len(result2['imagesSet']))
# provide more than 1 valid image_id
result3 = describe_images(self.context, ['ami-00000001',
'ami-00000002'])
self.assertEqual(2, len(result3['imagesSet']))
# provide a non-existing image_id
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_none)
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
def assertDictListUnorderedMatch(self, L1, L2, key):
self.assertEqual(len(L1), len(L2))
for d1 in L1:
self.assertTrue(key in d1)
for d2 in L2:
self.assertTrue(key in d2)
if d1[key] == d2[key]:
self.assertThat(d1, matchers.DictMatches(d2))
def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
{'device': 'sdb0', 'virtual': 'ephemeral0'},
{'device': 'sdb1', 'virtual': 'ephemeral1'},
{'device': 'sdb2', 'virtual': 'ephemeral2'},
{'device': 'sdb3', 'virtual': 'ephemeral3'},
{'device': 'sdb4', 'virtual': 'ephemeral4'},
{'device': 'sdc0', 'virtual': 'swap'},
{'device': 'sdc1', 'virtual': 'swap'},
{'device': 'sdc2', 'virtual': 'swap'},
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
{'device_name': '/dev/sdb1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e3'},
{'device_name': '/dev/sdb2',
'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4'},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e5'},
{'device_name': '/dev/sdc2',
'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e6'},
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
{'device_name': '/dev/sdc4', 'no_device': True}]
image1 = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available',
'mappings': mappings1,
'block_device_mapping': block_device_mapping1,
}
}
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7'}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'type': 'machine',
'root_device_name': '/dev/sdb1',
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
def fake_show(meh, context, image_id):
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
for i in _images:
if str(i['id']) == str(image_id):
return i
raise exception.ImageNotFound(image_id=image_id)
def fake_detail(meh, context, **kwargs):
return [copy.deepcopy(image1), copy.deepcopy(image2)]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
volumes = []
snapshots = []
if create_volumes_and_snapshots:
for bdm in block_device_mapping1:
if 'volume_id' in bdm:
vol = self._volume_create(bdm['volume_id'])
volumes.append(vol['id'])
if 'snapshot_id' in bdm:
snap = self._snapshot_create(bdm['snapshot_id'])
snapshots.append(snap['id'])
return (volumes, snapshots)
def _assertImageSet(self, result, root_device_type, root_device_name):
self.assertEqual(1, len(result['imagesSet']))
result = result['imagesSet'][0]
self.assertTrue('rootDeviceType' in result)
self.assertEqual(result['rootDeviceType'], root_device_type)
self.assertTrue('rootDeviceName' in result)
self.assertEqual(result['rootDeviceName'], root_device_name)
self.assertTrue('blockDeviceMapping' in result)
return result
_expected_root_device_name1 = '/dev/sda1'
# NOTE(yamahata): noDevice doesn't make sense when returning mapping
# It makes sense only when user overriding existing
# mapping.
_expected_bdms1 = [
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
'snap-00000001'}},
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
'vol-00000001'}},
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
# {'deviceName': '/dev/sdb4', 'noDevice': True},
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
'snap-00000002'}},
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
'vol-00000002'}},
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
# {'deviceName': '/dev/sdc4', 'noDevice': True}
]
_expected_root_device_name2 = '/dev/sdb1'
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
'ebs': {'snapshotId': 'snap-00000003'}}]
# NOTE(yamahata):
# InstanceBlockDeviceMappingItemType
# rootDeviceType
# rootDeviceName
# blockDeviceMapping
# deviceName
# virtualName
# ebs
# snapshotId
# volumeSize
# deleteOnTermination
# noDevice
def test_describe_image_mapping(self):
# test for rootDeviceName and blockDeviceMapping.
describe_images = self.cloud.describe_images
self._setUpImageSet()
result = describe_images(self.context, ['ami-00000001'])
result = self._assertImageSet(result, 'instance-store',
self._expected_root_device_name1)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_images(self.context, ['ami-00000002'])
result = self._assertImageSet(result, 'ebs',
self._expected_root_device_name2)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute
def fake_show(meh, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'is_public': True}
def fake_detail(self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
result = describe_image_attribute(self.context, 'ami-00000001',
'kernel')
self.assertEqual('aki-00000001', result['kernel']['value'])
result = describe_image_attribute(self.context, 'ami-00000001',
'ramdisk')
self.assertEqual('ari-00000001', result['ramdisk']['value'])
def test_describe_image_attribute_root_device_name(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name1)
result = describe_image_attribute(self.context, 'ami-00000002',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name2)
def test_describe_image_attribute_block_device_mapping(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_image_attribute(self.context, 'ami-00000002',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_modify_image_attribute(self):
modify_image_attribute = self.cloud.modify_image_attribute
fake_metadata = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'is_public': False}
def fake_show(meh, context, id):
return copy.deepcopy(fake_metadata)
def fake_detail(self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
def fake_update(meh, context, image_id, metadata, data=None):
self.assertEqual(metadata['properties']['kernel_id'],
fake_metadata['properties']['kernel_id'])
self.assertEqual(metadata['properties']['ramdisk_id'],
fake_metadata['properties']['ramdisk_id'])
self.assertTrue(metadata['is_public'])
image = copy.deepcopy(fake_metadata)
image.update(metadata)
return image
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
self.assertTrue(result['is_public'])
def test_register_image(self):
register_image = self.cloud.register_image
def fake_create(*args, **kwargs):
# NOTE(vish): We are mocking s3 so make sure we have converted
# to ids instead of uuids.
return {'id': 1,
'name': 'fake_name',
'container_format': 'ami',
'properties': {'kernel_id': 1,
'ramdisk_id': 1,
'type': 'machine'
},
'is_public': False
}
self.stubs.Set(s3.S3ImageService, 'create', fake_create)
image_location = 'fake_bucket/fake.img.manifest.xml'
result = register_image(self.context, image_location)
self.assertEqual(result['imageId'], 'ami-00000001')
def test_register_image_empty(self):
register_image = self.cloud.register_image
self.assertRaises(exception.EC2APIError, register_image, self.context,
image_location=None)
def test_register_image_name(self):
register_image = self.cloud.register_image
def fake_create(_self, context, metadata, data=None):
self.assertEqual(metadata['name'], self.expected_name)
metadata['id'] = 1
metadata['container_format'] = 'ami'
metadata['is_public'] = False
return metadata
self.stubs.Set(s3.S3ImageService, 'create', fake_create)
self.expected_name = 'fake_bucket/fake.img.manifest.xml'
result = register_image(self.context,
image_location=self.expected_name,
name=None)
self.expected_name = 'an image name'
result = register_image(self.context,
image_location='some_location',
name=self.expected_name)
def test_format_image(self):
image = {
'id': 1,
'container_format': 'ami',
'name': 'name',
'owner': 'someone',
'properties': {
'image_location': 'location',
'kernel_id': 1,
'ramdisk_id': 1,
'type': 'machine'},
'is_public': False}
expected = {'name': 'name',
'imageOwnerId': 'someone',
'isPublic': False,
'imageId': 'ami-00000001',
'imageState': None,
'rootDeviceType': 'instance-store',
'architecture': None,
'imageLocation': 'location',
'kernelId': 'aki-00000001',
'ramdiskId': 'ari-00000001',
'rootDeviceName': '/dev/sda1',
'imageType': 'machine',
'description': None}
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
image['properties']['image_location'] = None
expected['imageLocation'] = 'None (name)'
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
image['name'] = None
image['properties']['image_location'] = 'location'
expected['imageLocation'] = 'location'
expected['name'] = 'location'
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
def test_deregister_image(self):
deregister_image = self.cloud.deregister_image
def fake_delete(self, context, id):
return None
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
# valid image
result = deregister_image(self.context, 'ami-00000001')
self.assertTrue(result)
# invalid image
self.stubs.UnsetAll()
def fake_detail_empty(self, context, **kwargs):
return []
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001')
def test_deregister_image_wrong_container_type(self):
deregister_image = self.cloud.deregister_image
def fake_delete(self, context, id):
return None
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
self.assertRaises(exception.NotFound, deregister_image, self.context,
'aki-00000001')
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def test_get_password_data(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=CONF.default_flavor,
max_count=1)
self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
output = self.cloud.get_password_data(context=self.context,
instance_id=[instance_id])
self.assertEquals(output['passwordData'], 'fakepass')
rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=CONF.default_flavor,
max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEquals(base64.b64decode(output['output']),
'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_key_generation(self):
result = self._create_key('test')
private_key = result['private_key']
expected = db.key_pair_get(self.context,
self.context.user_id,
'test')['public_key']
(fd, fname) = tempfile.mkstemp()
os.write(fd, private_key)
public_key, err = utils.execute('ssh-keygen', '-e', '-f', fname)
os.unlink(fname)
# assert key fields are equal
self.assertEqual(''.join(public_key.split("\n")[2:-2]),
expected.split(" ")[1].strip())
def test_describe_key_pairs(self):
self._create_key('test1')
self._create_key('test2')
result = self.cloud.describe_key_pairs(self.context)
keys = result["keySet"]
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
def test_describe_bad_key_pairs(self):
self.assertRaises(exception.KeypairNotFound,
self.cloud.describe_key_pairs, self.context,
key_name=['DoesNotExist'])
def test_import_key_pair(self):
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
f = open(pubkey_path + '/dummy.pub', 'r')
dummypub = f.readline().rstrip()
f.close
f = open(pubkey_path + '/dummy.fingerprint', 'r')
dummyfprint = f.readline().rstrip()
f.close
key_name = 'testimportkey'
public_key_material = base64.b64encode(dummypub)
result = self.cloud.import_key_pair(self.context,
key_name,
public_key_material)
self.assertEqual(result['keyName'], key_name)
self.assertEqual(result['keyFingerprint'], dummyfprint)
keydata = db.key_pair_get(self.context,
self.context.user_id,
key_name)
self.assertEqual(dummypub, keydata['public_key'])
self.assertEqual(dummyfprint, keydata['fingerprint'])
def test_import_key_pair_quota_limit(self):
self.flags(quota_key_pairs=0)
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
f = open(pubkey_path + '/dummy.pub', 'r')
dummypub = f.readline().rstrip()
f.close
f = open(pubkey_path + '/dummy.fingerprint', 'r')
dummyfprint = f.readline().rstrip()
f.close
key_name = 'testimportkey'
public_key_material = base64.b64encode(dummypub)
self.assertRaises(exception.EC2APIError,
self.cloud.import_key_pair, self.context, key_name,
public_key_material)
def test_create_key_pair(self):
good_names = ('a', 'a' * 255, string.ascii_letters + ' -_')
bad_names = ('', 'a' * 256, '*', '/')
for key_name in good_names:
result = self.cloud.create_key_pair(self.context,
key_name)
self.assertEqual(result['keyName'], key_name)
for key_name in bad_names:
self.assertRaises(exception.InvalidKeypair,
self.cloud.create_key_pair,
self.context,
key_name)
def test_create_key_pair_quota_limit(self):
self.flags(quota_key_pairs=10)
for i in range(0, 10):
key_name = 'key_%i' % i
result = self.cloud.create_key_pair(self.context,
key_name)
self.assertEqual(result['keyName'], key_name)
# 11'th group should fail
self.assertRaises(exception.EC2APIError,
self.cloud.create_key_pair,
self.context,
'foo')
def test_delete_key_pair(self):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['imageId'], 'ami-00000001')
self.assertEqual(instance['instanceId'], 'i-00000001')
self.assertEqual(instance['instanceState']['name'], 'running')
self.assertEqual(instance['instanceType'], 'm1.small')
def test_run_instances_availability_zone(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1,
'placement': {'availability_zone': 'fake'},
}
run_instances = self.cloud.run_instances
def fake_show(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
def fake_format(*args, **kwargs):
pass
self.stubs.Set(self.cloud, '_format_run_instances', fake_format)
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['availability_zone'], 'fake')
return ({'id': 'fake-instance'}, 'fake-res-id')
self.stubs.Set(self.cloud.compute_api, 'create', fake_create)
# NOTE(vish) the assert for this call is in the fake_create method.
run_instances(self.context, **kwargs)
def test_run_instances_idempotent(self):
# Ensure subsequent run_instances calls with same client token
# are idempotent and that ones with different client_token are not
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
kwargs['client_token'] = 'client-token-1'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000001')
kwargs['client_token'] = 'client-token-2'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000002')
kwargs['client_token'] = 'client-token-2'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000002')
kwargs['client_token'] = 'client-token-1'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000001')
kwargs['client_token'] = 'client-token-3'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000003')
def test_run_instances_image_state_none(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_no_state(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}, 'container_format': 'ami'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
self.assertRaises(exception.EC2APIError, run_instances,
self.context, **kwargs)
def test_run_instances_image_state_invalid(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_decrypt(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
self.assertRaises(exception.EC2APIError, run_instances,
self.context, **kwargs)
def test_run_instances_image_status_active(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_stat_active(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
def fake_id_to_glance_id(context, id):
return 'cedef40a-ed67-4d10-800e-17455edce175'
self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
self.stubs.Set(ec2utils, 'id_to_glance_id', fake_id_to_glance_id)
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
def _restart_compute_service(self, periodic_interval_max=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval_max:
self.compute = self.start_service(
'compute', periodic_interval_max=periodic_interval_max)
else:
self.compute = self.start_service('compute')
def test_stop_start_instance(self):
# Makes sure stop/start instance works.
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
result = self.cloud.start_instances(self.context, [instance_id])
self.assertTrue(result)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
def test_start_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
result = self.cloud.start_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_stop_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances_invalid_instance_id(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
self.assertRaises(exception.InstanceNotFound,
self.cloud.terminate_instances,
self.context, ['i-2'])
self._restart_compute_service()
def test_terminate_instances_disable_terminate(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
internal_uuid = db.get_instance_uuid_by_ec2_id(self.context,
ec2utils.ec2_id_to_id(instance_id))
instance = db.instance_update(self.context, internal_uuid,
{'disable_terminate': True})
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 16,
'name': 'running'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
instance = db.instance_update(self.context, internal_uuid,
{'disable_terminate': False})
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances_two_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
inst1 = self._run_instance(**kwargs)
inst2 = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [inst1])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 48,
'name': 'terminated'}},
{'instanceId': 'i-00000002',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [inst1, inst2])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_reboot_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
result = self.cloud.reboot_instances(self.context, [instance_id])
self.assertTrue(result)
def _volume_create(self, volume_id=None):
kwargs = {'name': 'test-volume',
'description': 'test volume description',
'status': 'available',
'host': 'fake',
'size': 1,
'attach_status': 'detached'}
if volume_id:
kwargs['volume_id'] = volume_id
return self.volume_api.create_with_kwargs(self.context, **kwargs)
def _snapshot_create(self, snapshot_id=None):
kwargs = {'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4',
'status': "available",
'volume_size': 1}
if snapshot_id:
kwargs['snap_id'] = snapshot_id
return self.volume_api.create_snapshot_with_kwargs(self.context,
**kwargs)
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
return result['snapshotId']
def _do_test_create_image(self, no_reboot):
"""Make sure that CreateImage works."""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
def fake_show(meh, context, id):
bdm = [dict(snapshot_id=snapshots[0],
volume_size=1,
device_name='sda1',
delete_on_termination=False)]
props = dict(kernel_id='cedef40a-ed67-4d10-800e-17455edce175',
ramdisk_id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
root_device_name='/dev/sda1',
block_device_mapping=bdm)
return dict(id=id,
properties=props,
container_format='ami',
status='active',
is_public=True)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def fake_block_device_mapping_get_all_by_instance(context, inst_id):
return [dict(id=1,
source_type='snapshot',
destination_type='volume',
instance_uuid=inst_id,
snapshot_id=snapshots[0],
volume_id=volumes[0],
volume_size=1,
device_name='sda1',
delete_on_termination=False,
no_device=None,
connection_info='{"foo":"bar"}')]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
virt_driver = {}
def fake_power_on(self, context, instance, network_info,
block_device_info):
virt_driver['powered_on'] = True
self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
def fake_power_off(self, instance):
virt_driver['powered_off'] = True
self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)
result = self.cloud.create_image(self.context, ec2_instance_id,
no_reboot=no_reboot)
ec2_ids = [result['imageId']]
created_image = self.cloud.describe_images(self.context,
ec2_ids)['imagesSet'][0]
self.assertTrue('blockDeviceMapping' in created_image)
bdm = created_image['blockDeviceMapping'][0]
self.assertEquals(bdm.get('deviceName'), 'sda1')
self.assertTrue('ebs' in bdm)
self.assertEquals(bdm['ebs'].get('snapshotId'),
ec2utils.id_to_ec2_snap_id(snapshots[0]))
self.assertEquals(created_image.get('kernelId'), 'aki-00000001')
self.assertEquals(created_image.get('ramdiskId'), 'ari-00000002')
self.assertEquals(created_image.get('rootDeviceType'), 'ebs')
self.assertNotEqual(virt_driver.get('powered_on'), no_reboot)
self.assertNotEqual(virt_driver.get('powered_off'), no_reboot)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
self._restart_compute_service()
def test_create_image_no_reboot(self):
# Make sure that CreateImage works.
self._do_test_create_image(True)
def test_create_image_with_reboot(self):
# Make sure that CreateImage works.
self._do_test_create_image(False)
def test_create_image_instance_store(self):
"""
Ensure CreateImage fails as expected for an instance-store-backed
instance
"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
def fake_block_device_mapping_get_all_by_instance(context, inst_id):
return [dict(snapshot_id=snapshots[0],
volume_id=volumes[0],
source_type='snapshot',
destination_type='volume',
volume_size=1,
device_name='vda',
delete_on_termination=False,
no_device=None)]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
self.assertRaises(exception.InvalidParameterValue,
self.cloud.create_image,
self.context,
ec2_instance_id,
no_reboot=True)
@staticmethod
def _fake_bdm_get(ctxt, id):
return [{'volume_id': 87654321,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'no_device': None,
'delete_on_termination': True,
'device_name': '/dev/sdh'},
{'volume_id': None,
'snapshot_id': 98765432,
'source_type': 'snapshot',
'destination_type': 'volume',
'no_device': None,
'delete_on_termination': True,
'device_name': '/dev/sdi'},
{'volume_id': None,
'snapshot_id': None,
'no_device': True,
'delete_on_termination': None,
'device_name': None},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdb'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdd'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sd3'},
]
def test_describe_instance_attribute(self):
# Make sure that describe_instance_attribute works.
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
self._fake_bdm_get)
def fake_get(ctxt, instance_id):
inst_type = flavors.get_default_flavor()
inst_type['name'] = 'fake_type'
sys_meta = flavors.save_flavor_info({}, inst_type)
sys_meta = utils.dict_to_metadata(sys_meta)
return {
'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
'root_device_name': '/dev/sdh',
'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}],
'vm_state': vm_states.STOPPED,
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'user_data': 'fake-user data',
'shutdown_terminate': False,
'disable_terminate': False,
'system_metadata': sys_meta,
}
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
def fake_get_instance_uuid_by_ec2_id(ctxt, int_id):
if int_id == 305419896:
return 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
raise exception.InstanceNotFound(instance_id=int_id)
self.stubs.Set(db, 'get_instance_uuid_by_ec2_id',
fake_get_instance_uuid_by_ec2_id)
get_attribute = functools.partial(
self.cloud.describe_instance_attribute,
self.context, 'i-12345678')
bdm = get_attribute('blockDeviceMapping')
bdm['blockDeviceMapping'].sort()
expected_bdm = {'instance_id': 'i-12345678',
'rootDeviceType': 'ebs',
'blockDeviceMapping': [
{'deviceName': '/dev/sdh',
'ebs': {'status': 'attached',
'deleteOnTermination': True,
'volumeId': 'vol-05397fb1',
'attachTime': '13:56:24'}}]}
expected_bdm['blockDeviceMapping'].sort()
self.assertEqual(bdm, expected_bdm)
groupSet = get_attribute('groupSet')
groupSet['groupSet'].sort()
expected_groupSet = {'instance_id': 'i-12345678',
'groupSet': [{'groupId': 'fake0'},
{'groupId': 'fake1'}]}
expected_groupSet['groupSet'].sort()
self.assertEqual(groupSet, expected_groupSet)
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
{'instance_id': 'i-12345678',
'instanceInitiatedShutdownBehavior': 'stop'})
self.assertEqual(get_attribute('disableApiTermination'),
{'instance_id': 'i-12345678',
'disableApiTermination': False})
self.assertEqual(get_attribute('instanceType'),
{'instance_id': 'i-12345678',
'instanceType': 'fake_type'})
self.assertEqual(get_attribute('kernel'),
{'instance_id': 'i-12345678',
'kernel': 'aki-00000001'})
self.assertEqual(get_attribute('ramdisk'),
{'instance_id': 'i-12345678',
'ramdisk': 'ari-00000002'})
self.assertEqual(get_attribute('rootDeviceName'),
{'instance_id': 'i-12345678',
'rootDeviceName': '/dev/sdh'})
# NOTE(yamahata): this isn't supported
# get_attribute('sourceDestCheck')
self.assertEqual(get_attribute('userData'),
{'instance_id': 'i-12345678',
'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
def test_instance_initiated_shutdown_behavior(self):
def test_dia_iisb(expected_result, **kwargs):
"""test describe_instance_attribute
attribute instance_initiated_shutdown_behavior
"""
kwargs.update({'instance_type': CONF.default_flavor,
'max_count': 1})
instance_id = self._run_instance(**kwargs)
result = self.cloud.describe_instance_attribute(self.context,
instance_id, 'instanceInitiatedShutdownBehavior')
self.assertEqual(result['instanceInitiatedShutdownBehavior'],
expected_result)
expected = {'instancesSet': [
{'instanceId': instance_id,
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context,
[instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
test_dia_iisb('stop', image_id='ami-1')
block_device_mapping = [{'device_name': '/dev/vdb',
'virtual_name': 'ephemeral0'}]
test_dia_iisb('stop', image_id='ami-2',
block_device_mapping=block_device_mapping)
def fake_show(self, context, id_):
LOG.debug("id_ %s", id_)
prop = {}
if id_ == 'ami-3':
pass
elif id_ == 'ami-4':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}]}
elif id_ == 'ami-5':
prop = {'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
elif id_ == 'ami-6':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}],
'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
prop_base = {'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}
prop_base.update(prop)
return {
'id': id_,
'name': 'fake_name',
'properties': prop_base,
'container_format': 'ami',
'status': 'active'}
# NOTE(yamahata): create ami-3 ... ami-6
# ami-1 and ami-2 is already created by setUp()
for i in range(3, 7):
db.s3_image_create(self.context, 'ami-%d' % i)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
test_dia_iisb('stop', image_id='ami-3')
test_dia_iisb('stop', image_id='ami-4')
test_dia_iisb('stop', image_id='ami-5')
test_dia_iisb('stop', image_id='ami-6')
def test_create_delete_tags(self):
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
# Create a test image
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
# Create some tags
md = {'key': 'foo', 'value': 'bar'}
md_result = {'foo': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id],
tag=[md])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md_result)
self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
# Delete them
self.cloud.delete_tags(self.context, resource_id=[ec2_id],
tag=[{'key': 'foo', 'value': 'bar'}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, {})
self.assertEqual(meta_changes, [{'foo': ['-']}])
def test_describe_tags(self):
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
# Create some test images
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
}
inst2_kwargs = {
'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1112',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2)
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
inst2 = db.instance_create(self.context, inst2_kwargs)
ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
# Create some tags
# We get one overlapping pair, and each has a different key value pair
# inst1 : {'foo': 'bar', 'bax': 'wibble'}
# inst1 : {'foo': 'bar', 'baz': 'quux'}
md = {'key': 'foo', 'value': 'bar'}
md_result = {'foo': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
tag=[md])
self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md_result)
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst2)
self.assertEqual(metadata, md_result)
md2 = {'key': 'baz', 'value': 'quux'}
md2_result = {'baz': 'quux'}
md2_result.update(md_result)
self.cloud.create_tags(self.context, resource_id=[ec2_id2],
tag=[md2])
self.assertEqual(meta_changes, [{'baz': ['+', 'quux']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst2)
self.assertEqual(metadata, md2_result)
md3 = {'key': 'bax', 'value': 'wibble'}
md3_result = {'bax': 'wibble'}
md3_result.update(md_result)
self.cloud.create_tags(self.context, resource_id=[ec2_id1],
tag=[md3])
self.assertEqual(meta_changes, [{'bax': ['+', 'wibble']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md3_result)
inst1_key_foo = {'key': u'foo', 'resource_id': 'i-00000001',
'resource_type': 'instance', 'value': u'bar'}
inst1_key_bax = {'key': u'bax', 'resource_id': 'i-00000001',
'resource_type': 'instance', 'value': u'wibble'}
inst2_key_foo = {'key': u'foo', 'resource_id': 'i-00000002',
'resource_type': 'instance', 'value': u'bar'}
inst2_key_baz = {'key': u'baz', 'resource_id': 'i-00000002',
'resource_type': 'instance', 'value': u'quux'}
# We should be able to search by:
# No filter
tags = self.cloud.describe_tags(self.context)['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
inst2_key_baz, inst1_key_bax])
# Resource ID
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'resource-id',
'value': [ec2_id1]}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst1_key_bax])
# Resource Type
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'resource-type',
'value': ['instance']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
inst2_key_baz, inst1_key_bax])
# Key, either bare or with wildcards
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['foo']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']}])['tagSet']
self.assertEqualSorted(tags, [inst2_key_baz])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['ba?']}])['tagSet']
self.assertEqual(tags, [])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['b*']}])['tagSet']
self.assertEqual(tags, [])
# Value, either bare or with wildcards
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['bar']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['wi*']}])['tagSet']
self.assertEqual(tags, [])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['quu?']}])['tagSet']
self.assertEqual(tags, [])
# Multiple values
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz', 'bax']}])['tagSet']
self.assertEqualSorted(tags, [inst2_key_baz, inst1_key_bax])
# Multiple filters (AND): no match
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']},
{'name': 'value',
'value': ['wibble']}])['tagSet']
self.assertEqual(tags, [])
# Multiple filters (AND): match
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']},
{'name': 'value',
'value': ['quux']}])['tagSet']
self.assertEqualSorted(tags, [inst2_key_baz])
# And we should fail on supported resource types
self.assertRaises(exception.EC2APIError,
self.cloud.describe_tags,
self.context,
filter=[{'name': 'resource-type',
'value': ['instance', 'volume']}])
def test_resource_type_from_id(self):
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'i-12345'),
'instance')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'r-12345'),
'reservation')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'vol-12345'),
'volume')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'snap-12345'),
'snapshot')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'ami-12345'),
'image')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'ari-12345'),
'image')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'aki-12345'),
'image')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'x-12345'),
None)
class CloudTestCaseNeutronProxy(test.TestCase):
def setUp(self):
cfg.CONF.set_override('security_group_api', 'neutron')
self.cloud = cloud.CloudController()
self.original_client = neutronv2.get_client
neutronv2.get_client = test_neutron.get_client
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
super(CloudTestCaseNeutronProxy, self).setUp()
def tearDown(self):
neutronv2.get_client = self.original_client
test_neutron.get_client()._reset()
super(CloudTestCaseNeutronProxy, self).tearDown()
def test_describe_security_groups(self):
# Makes sure describe_security_groups works and filters results.
group_name = 'test'
description = 'test'
self.cloud.create_security_group(self.context, group_name,
description)
result = self.cloud.describe_security_groups(self.context)
# NOTE(vish): should have the default group as well
self.assertEqual(len(result['securityGroupInfo']), 2)
result = self.cloud.describe_security_groups(self.context,
group_name=[group_name])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(result['securityGroupInfo'][0]['groupName'],
group_name)
self.cloud.delete_security_group(self.context, group_name)
def test_describe_security_groups_by_id(self):
group_name = 'test'
description = 'test'
self.cloud.create_security_group(self.context, group_name,
description)
neutron = test_neutron.get_client()
# Get id from neutron since cloud.create_security_group
# does not expose it.
search_opts = {'name': group_name}
groups = neutron.list_security_groups(
**search_opts)['security_groups']
result = self.cloud.describe_security_groups(self.context,
group_id=[groups[0]['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
group_name)
self.cloud.delete_security_group(self.context, group_name)
def test_create_delete_security_group(self):
descript = 'test description'
create = self.cloud.create_security_group
result = create(self.context, 'testgrp', descript)
group_descript = result['securityGroupSet'][0]['groupDescription']
self.assertEqual(descript, group_descript)
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, 'testgrp'))
| 44.950319 | 79 | 0.545504 |
aceab44588aeb9cb693cb5bf82b35308508a4c73 | 312 | py | Python | test/tests/angrtest.py | radareorg/esilsolve | 0de8242c61e8dd751c561db32255f45b6878f3c8 | [
"MIT"
] | 67 | 2020-12-13T10:01:10.000Z | 2022-03-23T01:17:36.000Z | test/tests/angrtest.py | radareorg/esilsolve | 0de8242c61e8dd751c561db32255f45b6878f3c8 | [
"MIT"
] | 1 | 2021-04-07T14:33:13.000Z | 2022-02-24T19:16:39.000Z | test/tests/angrtest.py | radareorg/esilsolve | 0de8242c61e8dd751c561db32255f45b6878f3c8 | [
"MIT"
] | 9 | 2020-12-13T14:35:16.000Z | 2022-02-26T21:56:14.000Z | import angr
proj = angr.load_shellcode(b")\xd8", arch="x86")
state = proj.factory.blank_state()
#state.solver.add(state.regs.eflags == 0x202)
#print(state.regs.eflags)
state.regs.eax = state.solver.BVV(0, 32)
state.regs.ebx = state.solver.BVV(1<<31, 32)
successor = state.step()[0]
print(successor.regs.eflags) | 31.2 | 48 | 0.727564 |
aceab56ecc5d95031f785bac01f834010667b591 | 107,185 | py | Python | sdk/communication/azure-communication-callingserver/azure/communication/callingserver/_generated/operations/_call_connections_operations.py | zihzhan-msft/azure-sdk-for-python | f4b3484dbf75ec9db1f0ade2ca568c9bd538d62e | [
"MIT"
] | null | null | null | sdk/communication/azure-communication-callingserver/azure/communication/callingserver/_generated/operations/_call_connections_operations.py | zihzhan-msft/azure-sdk-for-python | f4b3484dbf75ec9db1f0ade2ca568c9bd538d62e | [
"MIT"
] | null | null | null | sdk/communication/azure-communication-callingserver/azure/communication/callingserver/_generated/operations/_call_connections_operations.py | zihzhan-msft/azure-sdk-for-python | f4b3484dbf75ec9db1f0ade2ca568c9bd538d62e | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_get_audio_routing_groups_request(
call_connection_id, # type: str
audio_routing_group_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/audioRoutingGroups/{audioRoutingGroupId}')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
"audioRoutingGroupId": _SERIALIZER.url("audio_routing_group_id", audio_routing_group_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_audio_routing_group_request(
call_connection_id, # type: str
audio_routing_group_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/audioRoutingGroups/{audioRoutingGroupId}')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
"audioRoutingGroupId": _SERIALIZER.url("audio_routing_group_id", audio_routing_group_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_audio_routing_group_request(
call_connection_id, # type: str
audio_routing_group_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/audioRoutingGroups/{audioRoutingGroupId}')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
"audioRoutingGroupId": _SERIALIZER.url("audio_routing_group_id", audio_routing_group_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_call_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_call_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_call_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_hangup_call_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/:hangup')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_play_audio_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/:playAudio')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_cancel_all_media_operations_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/:cancelAllMediaOperations')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_keep_alive_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/:keepAlive')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_transfer_to_participant_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/:transferToParticipant')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_transfer_to_call_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/:transferToCall')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_audio_routing_group_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/:createAudioRoutingGroup')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_participants_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/participants')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_add_participant_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/participants')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_remove_participant_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/participants:remove')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_participant_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/participants:get')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_participant_play_audio_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/participants:playAudio')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_cancel_participant_media_operation_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/participants:cancelMediaOperation')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_mute_participant_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/participants:mute')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_unmute_participant_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/participants:unmute')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_hold_participant_meeting_audio_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/participants:holdMeetingAudio')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_resume_participant_meeting_audio_request(
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/calling/callConnections/{callConnectionId}/participants:resumeMeetingAudio')
path_format_arguments = {
"callConnectionId": _SERIALIZER.url("call_connection_id", call_connection_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class CallConnectionsOperations(object):
"""CallConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.communication.callingserver.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_audio_routing_groups(
self,
call_connection_id, # type: str
audio_routing_group_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AudioRoutingGroupResult"
"""Get audio routing groups from a call.
Get audio routing groups from a call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param audio_routing_group_id: The audio routing group id.
:type audio_routing_group_id: str
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AudioRoutingGroupResult, or the result of cls(response)
:rtype: ~azure.communication.callingserver.models.AudioRoutingGroupResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AudioRoutingGroupResult"]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
request = build_get_audio_routing_groups_request(
call_connection_id=call_connection_id,
audio_routing_group_id=audio_routing_group_id,
api_version=api_version,
template_url=self.get_audio_routing_groups.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('AudioRoutingGroupResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_audio_routing_groups.metadata = {'url': '/calling/callConnections/{callConnectionId}/audioRoutingGroups/{audioRoutingGroupId}'} # type: ignore
@distributed_trace
def delete_audio_routing_group(
self,
call_connection_id, # type: str
audio_routing_group_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete audio routing group from a call.
Delete audio routing group from a call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param audio_routing_group_id: The audio routing group id.
:type audio_routing_group_id: str
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
request = build_delete_audio_routing_group_request(
call_connection_id=call_connection_id,
audio_routing_group_id=audio_routing_group_id,
api_version=api_version,
template_url=self.delete_audio_routing_group.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
delete_audio_routing_group.metadata = {'url': '/calling/callConnections/{callConnectionId}/audioRoutingGroups/{audioRoutingGroupId}'} # type: ignore
@distributed_trace
def update_audio_routing_group(
self,
call_connection_id, # type: str
audio_routing_group_id, # type: str
update_audio_routing_group_request, # type: "_models.UpdateAudioRoutingGroupRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Update audio routing group.
Update audio routing group.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param audio_routing_group_id: The audio routing group id.
:type audio_routing_group_id: str
:param update_audio_routing_group_request: The update audio routing group request.
:type update_audio_routing_group_request:
~azure.communication.callingserver.models.UpdateAudioRoutingGroupRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(update_audio_routing_group_request, 'UpdateAudioRoutingGroupRequest')
request = build_update_audio_routing_group_request(
call_connection_id=call_connection_id,
audio_routing_group_id=audio_routing_group_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.update_audio_routing_group.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
update_audio_routing_group.metadata = {'url': '/calling/callConnections/{callConnectionId}/audioRoutingGroups/{audioRoutingGroupId}'} # type: ignore
@distributed_trace
def create_call(
self,
call_request, # type: "_models.CreateCallRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.CreateCallResult"
"""Create a new call.
Create a new call.
:param call_request: Create call request.
:type call_request: ~azure.communication.callingserver.models.CreateCallRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CreateCallResult, or the result of cls(response)
:rtype: ~azure.communication.callingserver.models.CreateCallResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CreateCallResult"]
error_map = {
404: ResourceNotFoundError,
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(call_request, 'CreateCallRequest')
request = build_create_call_request(
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.create_call.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('CreateCallResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_call.metadata = {'url': '/calling/callConnections'} # type: ignore
@distributed_trace
def get_call(
self,
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CallConnectionProperties"
"""Get call connection.
Get call connection.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CallConnectionProperties, or the result of cls(response)
:rtype: ~azure.communication.callingserver.models.CallConnectionProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CallConnectionProperties"]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
request = build_get_call_request(
call_connection_id=call_connection_id,
api_version=api_version,
template_url=self.get_call.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('CallConnectionProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_call.metadata = {'url': '/calling/callConnections/{callConnectionId}'} # type: ignore
@distributed_trace
def delete_call(
self,
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete the call.
Delete the call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
request = build_delete_call_request(
call_connection_id=call_connection_id,
api_version=api_version,
template_url=self.delete_call.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
delete_call.metadata = {'url': '/calling/callConnections/{callConnectionId}'} # type: ignore
@distributed_trace
def hangup_call(
self,
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Hangup the call.
Hangup the call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
request = build_hangup_call_request(
call_connection_id=call_connection_id,
api_version=api_version,
template_url=self.hangup_call.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
hangup_call.metadata = {'url': '/calling/callConnections/{callConnectionId}/:hangup'} # type: ignore
@distributed_trace
def play_audio(
self,
call_connection_id, # type: str
play_audio_request, # type: "_models.PlayAudioRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.PlayAudioResult"
"""Play audio in the call.
Play audio in the call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param play_audio_request: The play audio request.
:type play_audio_request: ~azure.communication.callingserver.models.PlayAudioRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PlayAudioResult, or the result of cls(response)
:rtype: ~azure.communication.callingserver.models.PlayAudioResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PlayAudioResult"]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(play_audio_request, 'PlayAudioRequest')
request = build_play_audio_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.play_audio.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('PlayAudioResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
play_audio.metadata = {'url': '/calling/callConnections/{callConnectionId}/:playAudio'} # type: ignore
@distributed_trace
def cancel_all_media_operations(
self,
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Cancel all media operations.
Cancel all media operations.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
request = build_cancel_all_media_operations_request(
call_connection_id=call_connection_id,
api_version=api_version,
template_url=self.cancel_all_media_operations.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
cancel_all_media_operations.metadata = {'url': '/calling/callConnections/{callConnectionId}/:cancelAllMediaOperations'} # type: ignore
@distributed_trace
def keep_alive(
self,
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Keep the call alive.
Keep the call alive.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
request = build_keep_alive_request(
call_connection_id=call_connection_id,
api_version=api_version,
template_url=self.keep_alive.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
keep_alive.metadata = {'url': '/calling/callConnections/{callConnectionId}/:keepAlive'} # type: ignore
@distributed_trace
def transfer_to_participant(
self,
call_connection_id, # type: str
transfer_to_participant_request, # type: "_models.TransferToParticipantRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.TransferCallResult"
"""Transfer the call to a participant.
Transfer the call to a participant.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param transfer_to_participant_request: The transfer to participant request.
:type transfer_to_participant_request:
~azure.communication.callingserver.models.TransferToParticipantRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TransferCallResult, or the result of cls(response)
:rtype: ~azure.communication.callingserver.models.TransferCallResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TransferCallResult"]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(transfer_to_participant_request, 'TransferToParticipantRequest')
request = build_transfer_to_participant_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.transfer_to_participant.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('TransferCallResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
transfer_to_participant.metadata = {'url': '/calling/callConnections/{callConnectionId}/:transferToParticipant'} # type: ignore
@distributed_trace
def transfer_to_call(
self,
call_connection_id, # type: str
transfer_to_call_request, # type: "_models.TransferToCallRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.TransferCallResult"
"""Transfer the current call to another call.
Transfer the current call to another call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param transfer_to_call_request: The transfer to call request.
:type transfer_to_call_request: ~azure.communication.callingserver.models.TransferToCallRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TransferCallResult, or the result of cls(response)
:rtype: ~azure.communication.callingserver.models.TransferCallResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TransferCallResult"]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(transfer_to_call_request, 'TransferToCallRequest')
request = build_transfer_to_call_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.transfer_to_call.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('TransferCallResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
transfer_to_call.metadata = {'url': '/calling/callConnections/{callConnectionId}/:transferToCall'} # type: ignore
@distributed_trace
def create_audio_routing_group(
self,
call_connection_id, # type: str
audio_routing_group_request, # type: "_models.AudioRoutingGroupRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.CreateAudioRoutingGroupResult"
"""Create audio routing group in a call.
Create audio routing group in a call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param audio_routing_group_request: The audio routing group request.
:type audio_routing_group_request:
~azure.communication.callingserver.models.AudioRoutingGroupRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CreateAudioRoutingGroupResult, or the result of cls(response)
:rtype: ~azure.communication.callingserver.models.CreateAudioRoutingGroupResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CreateAudioRoutingGroupResult"]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(audio_routing_group_request, 'AudioRoutingGroupRequest')
request = build_create_audio_routing_group_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.create_audio_routing_group.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('CreateAudioRoutingGroupResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_audio_routing_group.metadata = {'url': '/calling/callConnections/{callConnectionId}/:createAudioRoutingGroup'} # type: ignore
@distributed_trace
def get_participants(
self,
call_connection_id, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.CallParticipant"]
"""Get participants from a call.
Get participants from a call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of CallParticipant, or the result of cls(response)
:rtype: list[~azure.communication.callingserver.models.CallParticipant]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.CallParticipant"]]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
request = build_get_participants_request(
call_connection_id=call_connection_id,
api_version=api_version,
template_url=self.get_participants.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('[CallParticipant]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_participants.metadata = {'url': '/calling/callConnections/{callConnectionId}/participants'} # type: ignore
@distributed_trace
def add_participant(
self,
call_connection_id, # type: str
add_participant_request, # type: "_models.AddParticipantRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.AddParticipantResult"
"""Add a participant to the call.
Add a participant to the call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param add_participant_request: Add participant request.
:type add_participant_request: ~azure.communication.callingserver.models.AddParticipantRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AddParticipantResult, or the result of cls(response)
:rtype: ~azure.communication.callingserver.models.AddParticipantResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AddParticipantResult"]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(add_participant_request, 'AddParticipantRequest')
request = build_add_participant_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.add_participant.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('AddParticipantResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_participant.metadata = {'url': '/calling/callConnections/{callConnectionId}/participants'} # type: ignore
@distributed_trace
def remove_participant(
self,
call_connection_id, # type: str
remove_participant_request, # type: "_models.RemoveParticipantRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Remove participant from the call using identifier.
Remove participant from the call using identifier.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param remove_participant_request: The identifier of the participant to be removed from the
call.
:type remove_participant_request:
~azure.communication.callingserver.models.RemoveParticipantRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(remove_participant_request, 'RemoveParticipantRequest')
request = build_remove_participant_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.remove_participant.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
remove_participant.metadata = {'url': '/calling/callConnections/{callConnectionId}/participants:remove'} # type: ignore
@distributed_trace
def get_participant(
self,
call_connection_id, # type: str
get_participant_request, # type: "_models.GetParticipantRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.CallParticipant"
"""Get participant from the call using identifier.
Get participant from the call using identifier.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param get_participant_request: The identifier of the participant to get from the call.
:type get_participant_request: ~azure.communication.callingserver.models.GetParticipantRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CallParticipant, or the result of cls(response)
:rtype: ~azure.communication.callingserver.models.CallParticipant
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CallParticipant"]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(get_participant_request, 'GetParticipantRequest')
request = build_get_participant_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.get_participant.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('CallParticipant', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_participant.metadata = {'url': '/calling/callConnections/{callConnectionId}/participants:get'} # type: ignore
@distributed_trace
def participant_play_audio(
self,
call_connection_id, # type: str
play_audio_to_participant_request, # type: "_models.PlayAudioToParticipantRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.PlayAudioResult"
"""Play audio to a participant.
Play audio to a participant.
:param call_connection_id: The callConnectionId.
:type call_connection_id: str
:param play_audio_to_participant_request: The play audio to participant request.
:type play_audio_to_participant_request:
~azure.communication.callingserver.models.PlayAudioToParticipantRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PlayAudioResult, or the result of cls(response)
:rtype: ~azure.communication.callingserver.models.PlayAudioResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PlayAudioResult"]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(play_audio_to_participant_request, 'PlayAudioToParticipantRequest')
request = build_participant_play_audio_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.participant_play_audio.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('PlayAudioResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
participant_play_audio.metadata = {'url': '/calling/callConnections/{callConnectionId}/participants:playAudio'} # type: ignore
@distributed_trace
def cancel_participant_media_operation(
self,
call_connection_id, # type: str
cancel_media_operation_request, # type: "_models.CancelParticipantMediaOperationRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Cancel media operation for a participant.
Cancel media operation for a participant.
:param call_connection_id: The callConnectionId.
:type call_connection_id: str
:param cancel_media_operation_request: The cancel media operation for participant request.
:type cancel_media_operation_request:
~azure.communication.callingserver.models.CancelParticipantMediaOperationRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(cancel_media_operation_request, 'CancelParticipantMediaOperationRequest')
request = build_cancel_participant_media_operation_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.cancel_participant_media_operation.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
cancel_participant_media_operation.metadata = {'url': '/calling/callConnections/{callConnectionId}/participants:cancelMediaOperation'} # type: ignore
@distributed_trace
def mute_participant(
self,
call_connection_id, # type: str
mute_participant_request, # type: "_models.MuteParticipantRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Mute participant in the call.
Mute participant in the call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param mute_participant_request: The identifier of the participant to mute in the call.
:type mute_participant_request:
~azure.communication.callingserver.models.MuteParticipantRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(mute_participant_request, 'MuteParticipantRequest')
request = build_mute_participant_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.mute_participant.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
mute_participant.metadata = {'url': '/calling/callConnections/{callConnectionId}/participants:mute'} # type: ignore
@distributed_trace
def unmute_participant(
self,
call_connection_id, # type: str
unmute_participant_request, # type: "_models.UnmuteParticipantRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Unmute participant in the call.
Unmute participant in the call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param unmute_participant_request: The identifier of the participant to unmute in the call.
:type unmute_participant_request:
~azure.communication.callingserver.models.UnmuteParticipantRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(unmute_participant_request, 'UnmuteParticipantRequest')
request = build_unmute_participant_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.unmute_participant.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
unmute_participant.metadata = {'url': '/calling/callConnections/{callConnectionId}/participants:unmute'} # type: ignore
@distributed_trace
def hold_participant_meeting_audio(
self,
call_connection_id, # type: str
hold_meeting_audio_request, # type: "_models.HoldMeetingAudioRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Hold meeting audio of a participant in the call.
Hold meeting audio of a participant in the call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param hold_meeting_audio_request: The request payload for holding meeting audio for a
participant.
:type hold_meeting_audio_request:
~azure.communication.callingserver.models.HoldMeetingAudioRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(hold_meeting_audio_request, 'HoldMeetingAudioRequest')
request = build_hold_participant_meeting_audio_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.hold_participant_meeting_audio.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
hold_participant_meeting_audio.metadata = {'url': '/calling/callConnections/{callConnectionId}/participants:holdMeetingAudio'} # type: ignore
@distributed_trace
def resume_participant_meeting_audio(
self,
call_connection_id, # type: str
resume_meeting_audio_request, # type: "_models.ResumeMeetingAudioRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Resume meeting audio of a participant in the call.
Resume meeting audio of a participant in the call.
:param call_connection_id: The call connection id.
:type call_connection_id: str
:param resume_meeting_audio_request: The request payload for resuming meeting audio for a
participant.
:type resume_meeting_audio_request:
~azure.communication.callingserver.models.ResumeMeetingAudioRequest
:keyword api_version: Api Version. The default value is "2021-11-15-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
409: ResourceExistsError,
400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = self._serialize.body(resume_meeting_audio_request, 'ResumeMeetingAudioRequest')
request = build_resume_participant_meeting_audio_request(
call_connection_id=call_connection_id,
api_version=api_version,
content_type=content_type,
json=json,
template_url=self.resume_participant_meeting_audio.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
resume_participant_meeting_audio.metadata = {'url': '/calling/callConnections/{callConnectionId}/participants:resumeMeetingAudio'} # type: ignore
| 44.960151 | 154 | 0.687055 |
aceab6dbfc8983d6ab20d6fe20951e8c79b17ee4 | 7,707 | py | Python | python/tvm/contrib/nnpack.py | zhanghaohit/incubator-tvm | ee0af843f3c5a3429e888079afb5f30789bd9bee | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 286 | 2020-06-23T06:40:44.000Z | 2022-03-30T01:27:49.000Z | python/tvm/contrib/nnpack.py | zhanghaohit/incubator-tvm | ee0af843f3c5a3429e888079afb5f30789bd9bee | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 10 | 2020-07-31T03:26:59.000Z | 2021-12-27T15:00:54.000Z | python/tvm/contrib/nnpack.py | zhanghaohit/incubator-tvm | ee0af843f3c5a3429e888079afb5f30789bd9bee | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 30 | 2020-07-17T01:04:14.000Z | 2021-12-27T14:05:19.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to NNPACK libraries."""
from __future__ import absolute_import as _abs
from .. import api as _api
from .. import intrin as _intrin
from .._ffi.function import _init_api
def is_available():
"""Check whether NNPACK is available, that is, `nnp_initialize()`
returns `nnp_status_success`.
"""
return _initialize() == 0
def fully_connected_inference(lhs, rhs, nthreads=1):
"""Create an extern op that compute fully connected of 1D tensor lhs and
2D tensor rhs with nnpack.
Parameters
----------
lhs : Tensor
lhs 1D array input[input_channels] of FP32 elements
rhs : Tensor
lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements
Returns
-------
C : Tensor
lhs 1D array out[output_channels] of FP32 elements.
"""
m = rhs.shape[0]
return _api.extern(
(m, ), [lhs, rhs],
lambda ins, outs: _intrin.call_packed(
"tvm.contrib.nnpack.fully_connected_inference",
ins[0], ins[1], outs[0], nthreads), name="C")
class ConvolutionAlgorithm:
AUTO = 0
FFT_8x8 = 1
FFT_16x16 = 2
WT_8x8 = 3
IMPLICIT_GEMM = 4
DIRECT = 5
WT_8x8_FP16 = 6
class ConvolutionTransformStrategy:
COMPUTE = 1
PRECOMPUTE = 2
def convolution_inference(
data, kernel, bias, padding, stride, nthreads=1,
algorithm=ConvolutionAlgorithm.AUTO):
"""Create an extern op to do inference convolution of 4D tensor data and
4D tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
data : Tensor
data 4D tensor input[batch][input_channels][input_height][input_width] of
FP32 elements.
kernel : Tensor
kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
bias : Tensor
bias 1D array bias[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
padding : list
padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
which indicates the padding around the feature map.
stride : list
stride A 2-dim list of [stride_height, stride_width], which indicates
the stride.
Returns
-------
output : Tensor
output 4D tensor output[batch][output_channels][output_height][output_width]
of FP32 elements.
"""
assert isinstance(padding, list) and len(padding) == 4
assert isinstance(stride, list) and len(stride) == 2
batch, _, input_height, input_width = data.shape
output_channels, _, kernel_height, kernel_width = kernel.shape
idxdiv = _api.indexdiv
output_height = idxdiv(
input_height + padding[0] + padding[1] - kernel_height, stride[0]) + 1
output_width = idxdiv(
input_width + padding[0] + padding[1] - kernel_width, stride[1]) + 1
return _api.extern(
(batch, output_channels, output_height, output_width),
[data, kernel, bias] if bias is not None else [data, kernel],
lambda ins, outs: _intrin.call_packed(
"tvm.contrib.nnpack.convolution_inference",
ins[0],
ins[1],
ins[2] if bias is not None else 0,
outs[0], padding[0], padding[1], padding[2], padding[3],
stride[0], stride[1], nthreads, algorithm), name="C")
def convolution_inference_without_weight_transform(
data, transformed_kernel, bias, padding, stride, nthreads=1,
algorithm=ConvolutionAlgorithm.AUTO):
"""Create an extern op to do inference convolution of 4D tensor data and
4D pre-transformed tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
data : Tensor
data 4D tensor input[batch][input_channels][input_height][input_width] of
FP32 elements.
transformed_kernel : Tensor
transformed_kernel 4D tensor kernel[output_channels][input_channels][tile]
[tile] of FP32 elements.
bias : Tensor
bias 1D array bias[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
padding : list
padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
which indicates the padding around the feature map.
stride : list
stride A 2-dim list of [stride_height, stride_width], which indicates
the stride.
Returns
-------
output : Tensor
output 4D tensor output[batch][output_channels][output_height][output_width]
of FP32 elements.
"""
assert algorithm in (ConvolutionAlgorithm.WT_8x8,
ConvolutionAlgorithm.WT_8x8_FP16)
assert isinstance(padding, list) and len(padding) == 4
assert isinstance(stride, list) and len(stride) == 2
batch, _, input_height, input_width = data.shape
output_channels, _, _, _ = transformed_kernel.shape
kernel_height, kernel_width = (3, 3)
idxdiv = _api.indexdiv
output_height = idxdiv(input_height + padding[0] + padding[1] - kernel_height, stride[0]) + 1
output_width = idxdiv(input_width + padding[0] + padding[1] - kernel_width, stride[1]) + 1
return _api.extern(
(batch, output_channels, output_height, output_width),
[data, transformed_kernel, bias] if bias is not None else [data, transformed_kernel],
lambda ins, outs: _intrin.call_packed(
"tvm.contrib.nnpack.convolution_inference_without_weight_transform",
ins[0],
ins[1],
ins[2] if bias is not None else 0,
outs[0], padding[0], padding[1], padding[2], padding[3],
stride[0], stride[1], nthreads, algorithm), name="C", dtype='float32')
def convolution_inference_weight_transform(
kernel, nthreads=1,
algorithm=ConvolutionAlgorithm.AUTO,
dtype='float32'):
"""Create an extern op to do inference convolution of 3D tensor data and
4D tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
kernel : Tensor
kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
Returns
-------
output : Tensor
output 4D tensor output[output_channels][input_channels][tile][tile]
of FP32 elements.
"""
assert algorithm in (ConvolutionAlgorithm.WT_8x8, ConvolutionAlgorithm.WT_8x8_FP16)
output_channels, input_channels, _, _ = kernel.shape
transform_tile_size = 8
if not isinstance(dtype, str):
dtype = dtype.dtype
return _api.extern(
(output_channels, input_channels, transform_tile_size, transform_tile_size),
[kernel],
lambda ins, outs: _intrin.call_packed(
"tvm.contrib.nnpack.convolution_inference_weight_transform",
ins[0], outs[0], nthreads, algorithm), name="transform_kernel", dtype=dtype)
_init_api("tvm.contrib.nnpack")
| 37.412621 | 97 | 0.673414 |
aceab83e766b28b4fda0adee3b143bc3771f830e | 2,052 | py | Python | libica/openapi/libgds/test/test_object_store_settings.py | umccr-illumina/libica | 916d27eea499f29bee590268b84208effb0cc576 | [
"MIT"
] | null | null | null | libica/openapi/libgds/test/test_object_store_settings.py | umccr-illumina/libica | 916d27eea499f29bee590268b84208effb0cc576 | [
"MIT"
] | 4 | 2021-11-15T10:47:51.000Z | 2022-02-22T04:43:20.000Z | libica/openapi/libgds/test/test_object_store_settings.py | umccr-illumina/libica | 916d27eea499f29bee590268b84208effb0cc576 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Genomic Data Store Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import libica.openapi.libgds
from libica.openapi.libgds.models.object_store_settings import ObjectStoreSettings # noqa: E501
from libica.openapi.libgds.rest import ApiException
class TestObjectStoreSettings(unittest.TestCase):
"""ObjectStoreSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ObjectStoreSettings
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = libica.openapi.libgds.models.object_store_settings.ObjectStoreSettings() # noqa: E501
if include_optional :
return ObjectStoreSettings(
aws_s3 = libica.openapi.libgds.models.awss3_object_store_setting.AWSS3ObjectStoreSetting(
bucket_name = '012',
key_prefix = 'gds-volumes/', ),
# platform_credentials_name = '0'
secret_name='test',
)
else :
return ObjectStoreSettings(
aws_s3 = libica.openapi.libgds.models.awss3_object_store_setting.AWSS3ObjectStoreSetting(
bucket_name = '012',
key_prefix = 'gds-volumes/', ),
# platform_credentials_name = '0',
secret_name='test',
)
def testObjectStoreSettings(self):
"""Test ObjectStoreSettings"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 33.096774 | 124 | 0.656433 |
aceab85ec2bf10f3c6ec35768c5a4a6de491d2d6 | 10,327 | gyp | Python | ui/aura/aura.gyp | sunjc53yy/chromium | 049b380040949089c2a6e447b0cd0ac3c4ece38e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ui/aura/aura.gyp | sunjc53yy/chromium | 049b380040949089c2a6e447b0cd0ac3c4ece38e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ui/aura/aura.gyp | sunjc53yy/chromium | 049b380040949089c2a6e447b0cd0ac3c4ece38e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
# GN version: //ui/aura
'target_name': 'aura',
'type': '<(component)',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:base_i18n',
'../../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../../skia/skia.gyp:skia',
'../base/ui_base.gyp:ui_base',
'../compositor/compositor.gyp:compositor',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
'../events/platform/events_platform.gyp:events_platform',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
],
'defines': [
'AURA_IMPLEMENTATION',
],
'sources': [
'client/aura_constants.cc',
'client/aura_constants.h',
'client/capture_client.cc',
'client/capture_client.h',
'client/capture_delegate.h',
'client/cursor_client.cc',
'client/cursor_client.h',
'client/cursor_client_observer.h',
'client/cursor_client_observer.cc',
'client/default_capture_client.cc',
'client/default_capture_client.h',
'client/event_client.cc',
'client/event_client.h',
'client/focus_change_observer.cc',
'client/focus_change_observer.h',
'client/focus_client.cc',
'client/focus_client.h',
'client/screen_position_client.cc',
'client/screen_position_client.h',
'client/visibility_client.cc',
'client/visibility_client.h',
'client/window_stacking_client.cc',
'client/window_stacking_client.h',
'client/window_tree_client.cc',
'client/window_tree_client.h',
'env.cc',
'env.h',
'env_observer.h',
'input_state_lookup.cc',
'input_state_lookup.h',
'input_state_lookup_win.cc',
'input_state_lookup_win.h',
'layout_manager.cc',
'layout_manager.h',
'remote_window_tree_host_win.cc',
'remote_window_tree_host_win.h',
'scoped_window_targeter.cc',
'scoped_window_targeter.h',
'window.cc',
'window.h',
'window_event_dispatcher.cc',
'window_event_dispatcher.h',
'window_delegate.h',
'window_layer_type.h',
'window_observer.cc',
'window_observer.h',
'window_targeter.cc',
'window_targeter.h',
'window_tracker.cc',
'window_tracker.h',
'window_tree_host.cc',
'window_tree_host.h',
'window_tree_host_mac.mm',
'window_tree_host_mac.h',
'window_tree_host_observer.h',
'window_tree_host_ozone.cc',
'window_tree_host_ozone.h',
'window_tree_host_win.cc',
'window_tree_host_win.h',
'window_tree_host_x11.cc',
'window_tree_host_x11.h',
'../wm/public/activation_change_observer.h',
'../wm/public/activation_change_observer.cc',
'../wm/public/activation_client.cc',
'../wm/public/activation_client.h',
'../wm/public/activation_delegate.cc',
'../wm/public/activation_delegate.h',
'../wm/public/animation_host.cc',
'../wm/public/animation_host.h',
'../wm/public/dispatcher_client.cc',
'../wm/public/dispatcher_client.h',
'../wm/public/drag_drop_client.cc',
'../wm/public/drag_drop_client.h',
'../wm/public/drag_drop_delegate.cc',
'../wm/public/drag_drop_delegate.h',
'../wm/public/scoped_tooltip_disabler.cc',
'../wm/public/scoped_tooltip_disabler.h',
'../wm/public/tooltip_client.cc',
'../wm/public/tooltip_client.h',
'../wm/public/transient_window_client.cc',
'../wm/public/transient_window_client.h',
'../wm/public/window_move_client.cc',
'../wm/public/window_move_client.h',
'../wm/public/window_types.h',
],
'conditions': [
['use_x11==1', {
'dependencies': [
'../../build/linux/system.gyp:x11',
'../../build/linux/system.gyp:xrandr',
'../../build/linux/system.gyp:xi',
'../events/platform/x11/x11_events_platform.gyp:x11_events_platform',
],
}],
['OS=="win"', {
'dependencies': [
'../metro_viewer/metro_viewer.gyp:metro_viewer_messages',
'../platform_window/win/win_window.gyp:win_window',
'../../ipc/ipc.gyp:ipc',
],
'sources!': [
'input_state_lookup.cc',
],
}],
['use_ozone==1', {
'dependencies': [
'../events/ozone/events_ozone.gyp:events_ozone',
'../ozone/ozone.gyp:ozone',
'../ozone/ozone.gyp:ozone_base',
],
}],
],
},
{
# GN version: //ui/aura:test_support
'target_name': 'aura_test_support',
'type': 'static_library',
'dependencies': [
'../../skia/skia.gyp:skia',
'../../testing/gtest.gyp:gtest',
'../base/ui_base.gyp:ui_base',
'../base/ui_base.gyp:ui_base_test_support',
'../compositor/compositor.gyp:compositor_test_support',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
'../events/events.gyp:events_test_support',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
'aura',
],
'include_dirs': [
'..',
],
'sources': [
'test/aura_test_base.cc',
'test/aura_test_base.h',
'test/aura_test_helper.cc',
'test/aura_test_helper.h',
'test/aura_test_utils.cc',
'test/aura_test_utils.h',
'test/env_test_helper.h',
'test/event_generator_delegate_aura.cc',
'test/event_generator_delegate_aura.h',
'test/test_cursor_client.cc',
'test/test_cursor_client.h',
'test/test_focus_client.cc',
'test/test_focus_client.h',
'test/test_screen.cc',
'test/test_screen.h',
'test/test_window_tree_client.cc',
'test/test_window_tree_client.h',
'test/test_windows.cc',
'test/test_windows.h',
'test/test_window_delegate.cc',
'test/test_window_delegate.h',
'test/ui_controls_factory_aura.h',
'test/ui_controls_factory_aurawin.cc',
'test/ui_controls_factory_aurax11.cc',
'test/ui_controls_factory_ozone.cc',
'test/window_event_dispatcher_test_api.cc',
'test/window_event_dispatcher_test_api.h',
'test/window_test_api.cc',
'test/window_test_api.h',
'test/x11_event_sender.cc',
'test/x11_event_sender.h',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
},
{
'target_name': 'aura_demo',
'type': 'executable',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:base_i18n',
'../../skia/skia.gyp:skia',
'../../third_party/icu/icu.gyp:icui18n',
'../../third_party/icu/icu.gyp:icuuc',
'../base/ui_base.gyp:ui_base',
'../compositor/compositor.gyp:compositor',
'../compositor/compositor.gyp:compositor_test_support',
'../events/events.gyp:events',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
'aura',
'aura_test_support',
],
'include_dirs': [
'..',
],
'sources': [
'demo/demo_main.cc',
],
'conditions': [
['use_x11==1', {
'dependencies': [
'../gfx/x/gfx_x11.gyp:gfx_x11',
],
}],
]
},
{
'target_name': 'aura_bench',
'type': 'executable',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:base_i18n',
'../../cc/cc.gyp:cc',
'../../gpu/gpu.gyp:gles2_implementation',
'../../skia/skia.gyp:skia',
'../../third_party/icu/icu.gyp:icui18n',
'../../third_party/icu/icu.gyp:icuuc',
'../base/ui_base.gyp:ui_base',
'../compositor/compositor.gyp:compositor',
'../compositor/compositor.gyp:compositor_test_support',
'../events/events.gyp:events',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
'aura',
'aura_test_support',
],
'include_dirs': [
'..',
],
'sources': [
'bench/bench_main.cc',
],
'conditions': [
['use_x11==1', {
'dependencies': [
'../gfx/x/gfx_x11.gyp:gfx_x11',
],
}],
]
},
{
'target_name': 'aura_unittests',
'type': 'executable',
'dependencies': [
'../../base/base.gyp:test_support_base',
'../../skia/skia.gyp:skia',
'../../testing/gtest.gyp:gtest',
'../base/ui_base.gyp:ui_base',
'../base/ui_base.gyp:ui_base_test_support',
'../compositor/compositor.gyp:compositor',
'../compositor/compositor.gyp:compositor_test_support',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
'../events/events.gyp:gesture_detection',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
'../gl/gl.gyp:gl',
'aura_test_support',
'aura',
],
'include_dirs': [
'..',
],
'sources': [
'gestures/gesture_recognizer_unittest.cc',
'window_event_dispatcher_unittest.cc',
'test/run_all_unittests.cc',
'window_targeter_unittest.cc',
'window_unittest.cc',
],
'conditions': [
# osmesa GL implementation is used on linux.
['OS=="linux"', {
'dependencies': [
'<(DEPTH)/third_party/mesa/mesa.gyp:osmesa',
],
}],
['OS=="linux" and use_allocator!="none"', {
'dependencies': [
# See http://crbug.com/162998#c4 for why this is needed.
'../../base/allocator/allocator.gyp:allocator',
],
}],
],
},
],
}
| 32.99361 | 97 | 0.551951 |
aceaba7f92a9df578087380861c2673d49c77a3c | 397 | py | Python | HelloWorld/HelloWorld/wsgi.py | Agoriro/HelloWorld | 95ed8e72cb4a49907d420295403e510ac18abc92 | [
"MIT"
] | null | null | null | HelloWorld/HelloWorld/wsgi.py | Agoriro/HelloWorld | 95ed8e72cb4a49907d420295403e510ac18abc92 | [
"MIT"
] | null | null | null | HelloWorld/HelloWorld/wsgi.py | Agoriro/HelloWorld | 95ed8e72cb4a49907d420295403e510ac18abc92 | [
"MIT"
] | null | null | null | """
WSGI config for HelloWorld project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HelloWorld.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
aceabab9c87876f4346e8159d9c3e2c542be34e1 | 798 | py | Python | goldenbraid/tests/test_sbol.py | bioinfcomav/goldebraid | 7a50c9c4e65308fb51abf4f236457d12e9d028d6 | [
"Apache-2.0"
] | null | null | null | goldenbraid/tests/test_sbol.py | bioinfcomav/goldebraid | 7a50c9c4e65308fb51abf4f236457d12e9d028d6 | [
"Apache-2.0"
] | null | null | null | goldenbraid/tests/test_sbol.py | bioinfcomav/goldebraid | 7a50c9c4e65308fb51abf4f236457d12e9d028d6 | [
"Apache-2.0"
] | null | null | null | import os
from django.test import TestCase
from Bio.SeqIO import read
import goldenbraid
from goldenbraid.tests.test_fixtures import FIXTURES_TO_LOAD, FIXTURES_TO_LOAD5
from goldenbraid.sbol import convert_to_sbol
# from goldenbraid.models import Feature
TEST_DATA = os.path.join(os.path.split(goldenbraid.__path__[0])[0],
'goldenbraid', 'tests', 'data')
class TestSBOL(TestCase):
'It tests that we can load the fixtures'
# fixtures = FIXTURES_TO_LOAD5
multi_db = True
def test_sbol(self):
# feat = os.path.join(TEST_DATA, 'pEGBAn11.gb')
# print feat.genbank_file
seq = read(os.path.join(TEST_DATA, 'pEGBAn11.gb'), 'gb')
fhand = open('/tmp/sbol.xml', 'w')
fhand.write(convert_to_sbol(seq))
fhand.close()
| 30.692308 | 79 | 0.685464 |
aceabade5874c46fb13de05ca76b2a21bed1c203 | 14,754 | py | Python | modules/zenmapCore_Kvasir/UmitOptionParser.py | noraj/Kvasir | a5b3775184a8343240e1154a1f762f75df04dc0a | [
"BSD-3-Clause"
] | 194 | 2015-01-04T23:06:42.000Z | 2022-02-11T17:39:28.000Z | modules/zenmapCore_Kvasir/UmitOptionParser.py | The-L0st-Technocrat/Kvasir | ac04512546685cbe814d3eabf644ee3161b6052c | [
"BSD-3-Clause"
] | 26 | 2015-01-02T19:15:39.000Z | 2020-11-11T17:58:34.000Z | modules/zenmapCore_Kvasir/UmitOptionParser.py | The-L0st-Technocrat/Kvasir | ac04512546685cbe814d3eabf644ee3161b6052c | [
"BSD-3-Clause"
] | 65 | 2015-01-19T08:30:51.000Z | 2020-12-28T23:53:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@nmap.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the terms and conditions of this license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@nmap.com for further *
# * information. *
# * *
# * If you have received a written license agreement or contract for *
# * Covered Software stating terms other than these, you may choose to use *
# * and redistribute Covered Software under those terms instead of these. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
import re
from optparse import OptionParser
from zenmapCore_Kvasir.Name import APP_NAME, NMAP_DISPLAY_NAME
from zenmapCore_Kvasir.Version import VERSION
import zenmapCore_Kvasir.I18N
from zenmapCore_Kvasir.BasePaths import base_paths
class UmitOptionParser(OptionParser):
def __init__(self, args=False):
OptionParser.__init__(self, version="%%prog %s" % VERSION)
self.set_usage("%prog [options] [result files]")
self.add_option("--confdir",
default = base_paths["user_config_dir"],
dest = "confdir",
metavar = "DIR",
help = _("\
Use DIR as the user configuration directory. Default: %default"))
## Open Scan Results (GUI)
### Run, opening the specified scan result file, which should be
### a nmap XML output file.
### This option should be verified if there is no options, and user
### specified some positional arguments, which should be considered as
### scan result files.
self.add_option("-f", "--file",
default=[],
action="append",
type="string",
dest="result_files",
help=_("Specify a scan result file in Nmap XML output \
format. Can be used more than once to specify several \
scan result files."))
## Run nmap with args (GUI)
### Open and run nmap with specified args. The positional
### args should be used to feed the nmap command
self.add_option("-n", "--nmap",
default=[],
action="callback",
callback=self.__nmap_callback,
help=_("Run %s with the specified args.") % NMAP_DISPLAY_NAME)
## Execute a profile against a target (GUI)
### Positional args should be taken as targets to feed this scan
self.add_option("-p", "--profile",
default="",
action="store",
help=_("Begin with the specified profile \
selected. If combined with the -t (--target) option, \
automatically run the profile against the specified target."))
## Targets (GUI)
### Specify a target to be used along with other command line option
### or simply opens with the first tab target field filled with
### the target specified with this option
self.add_option("-t", "--target",
default=False,
action="store",
help=_("Specify a target to be used along with other \
options. If specified alone, open with the target field filled with the \
specified target"))
## Verbosity
self.add_option("-v", "--verbose",
default=0,
action="count",
help=_("Increase verbosity of the output. May be \
used more than once to get even more verbosity"))
# Parsing options and arguments
if args:
self.options, self.args = self.parse_args(args)
else:
self.options, self.args = self.parse_args()
def __nmap_callback(self, option, opt_str, value, parser):
nmap_args = []
# Iterate over next arguments that were passed at the command line
# that wasn't parsed yet.
while parser.rargs:
# Store the next argument in a specific list
nmap_args.append(parser.rargs[0])
# Remove the added argument from rargs to avoid it's latter
# parsing by optparse
del parser.rargs[0]
# Set the variable nmap at parser.values, so you may call option.nmap
# and have the nmap_args as result
setattr(parser.values, "nmap", nmap_args)
def get_confdir(self):
return self.options.confdir
def get_nmap(self):
"""Return a list of nmap arguments or False if this option was not
called by the user"""
try:
nmap = self.options.nmap
if nmap:
return nmap
except AttributeError:
return False
def get_profile(self):
"""Return a string with the profile name, or False if no profile
option was specified by the user"""
if self.options.profile != "":
return self.options.profile
return False
def get_target(self):
"""Returns a string with the target specified, or False if this option
wass not called by the user"""
return self.options.target
def get_open_results(self):
"""Returns a list of strings with the name of the files specified with
the -f (--file) option and every positional argument."""
files = []
# Add arguments given with -f.
if self.options.result_files:
files = self.options.result_files[:]
# Add any other arguments.
files += self.args
return files
def get_verbose(self):
"""Returns an integer representing the verbosity level of the
application. Verbosity level starts in 40, which means that only
messages above the ERROR level are going to be reported at the output.
As this value gets lower, the verbosity increases.
"""
return 40 - (self.options.verbose * 10)
option_parser = UmitOptionParser()
if __name__ == "__main__":
opt = UmitOptionParser()
options, args = opt.parse_args()
| 55.052239 | 86 | 0.569269 |
aceabd156ff1a68bdbb4bf500d0718e6167241ee | 2,892 | py | Python | examples/core_examples/radioactive_decay.py | Samanwaya1301/bilby-BHNS | 5670212daf48e5c54c26e1978f3e0ff66c5c3f83 | [
"MIT"
] | null | null | null | examples/core_examples/radioactive_decay.py | Samanwaya1301/bilby-BHNS | 5670212daf48e5c54c26e1978f3e0ff66c5c3f83 | [
"MIT"
] | null | null | null | examples/core_examples/radioactive_decay.py | Samanwaya1301/bilby-BHNS | 5670212daf48e5c54c26e1978f3e0ff66c5c3f83 | [
"MIT"
] | 1 | 2019-10-15T05:17:57.000Z | 2019-10-15T05:17:57.000Z | #!/usr/bin/env python
"""
An example of how to use bilby to perform paramater estimation for
non-gravitational wave data. In this case, fitting the half-life and
initial radionuclide number for Polonium 214.
"""
from __future__ import division
import bilby
import numpy as np
import matplotlib.pyplot as plt
from bilby.core.likelihood import PoissonLikelihood
from bilby.core.prior import LogUniform
# A few simple setup steps
label = 'radioactive_decay'
outdir = 'outdir'
bilby.utils.check_directory_exists_and_if_not_mkdir(outdir)
# generate a set of counts per minute for n_init atoms of
# Polonium 214 in atto-moles with a half-life of 20 minutes
n_avogadro = 6.02214078e23
halflife = 20
atto = 1e-18
n_init = 1e-19 / atto
def decay_rate(delta_t, halflife, n_init):
"""
Get the decay rate of a radioactive substance in a range of time intervals
(in minutes). n_init is in moles.
Parameters
----------
delta_t: float, array-like
Time step in minutes
halflife: float
Halflife of atom in minutes
n_init: int, float
Initial nummber of atoms
"""
times = np.cumsum(delta_t)
times = np.insert(times, 0, 0.0)
n_atoms = n_init * atto * n_avogadro
rates = (np.exp(-np.log(2) * (times[:-1] / halflife)) -
np.exp(- np.log(2) * (times[1:] / halflife))) * n_atoms / delta_t
return rates
# Now we define the injection parameters which we make simulated data with
injection_parameters = dict(halflife=halflife, n_init=n_init)
# These lines of code generate the fake data. Note the ** just unpacks the
# contents of the injection_parameters when calling the model function.
sampling_frequency = 1
time_duration = 300
time = np.arange(0, time_duration, 1 / sampling_frequency)
delta_t = np.diff(time)
rates = decay_rate(delta_t, **injection_parameters)
# get radioactive counts
counts = np.random.poisson(rates)
theoretical = decay_rate(delta_t, **injection_parameters)
# We quickly plot the data to check it looks sensible
fig, ax = plt.subplots()
ax.semilogy(time[:-1], counts, 'o', label='data')
ax.semilogy(time[:-1], theoretical, '--r', label='signal')
ax.set_xlabel('time')
ax.set_ylabel('counts')
ax.legend()
fig.savefig('{}/{}_data.png'.format(outdir, label))
# Now lets instantiate a version of the Poisson Likelihood, giving it
# the time intervals, counts and rate model
likelihood = PoissonLikelihood(delta_t, counts, decay_rate)
# Make the prior
priors = dict()
priors['halflife'] = LogUniform(
1e-5, 1e5, latex_label='$t_{1/2}$', unit='min')
priors['n_init'] = LogUniform(
1e-25 / atto, 1e-10 / atto, latex_label='$N_0$', unit='attomole')
# And run sampler
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler='dynesty',
nlive=1000, sample='unif', injection_parameters=injection_parameters,
outdir=outdir, label=label)
result.plot_corner()
| 30.442105 | 78 | 0.721992 |
aceabd9eac74f6cc5ac647c48ffbb75a11907686 | 16,956 | gyp | Python | third_party/webrtc/src/chromium/src/android_webview/android_webview.gyp | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 8 | 2016-02-08T11:59:31.000Z | 2020-05-31T15:19:54.000Z | third_party/webrtc/src/chromium/src/android_webview/android_webview.gyp | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 1 | 2021-05-05T11:11:31.000Z | 2021-05-05T11:11:31.000Z | third_party/webrtc/src/chromium/src/android_webview/android_webview.gyp | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 7 | 2016-02-09T09:28:14.000Z | 2020-07-25T19:03:36.000Z | # Copyright 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'system_webview_package_name%': 'com.android.webview',
},
'targets': [
{
'target_name': 'android_webview_pak',
'type': 'none',
'dependencies': [
'<(DEPTH)/content/app/resources/content_resources.gyp:content_resources',
'<(DEPTH)/net/net.gyp:net_resources',
'<(DEPTH)/third_party/WebKit/public/blink_resources.gyp:blink_resources',
'<(DEPTH)/ui/resources/ui_resources.gyp:ui_resources',
],
'variables': {
'conditions': [
['target_arch=="arm" or target_arch=="ia32" or target_arch=="mipsel"', {
'arch_suffix':'32'
}],
['target_arch=="arm64" or target_arch=="x64" or target_arch=="mips64el"', {
'arch_suffix':'64'
}],
],
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/android_webview',
},
'actions': [
{
'action_name': 'generate_aw_resources',
'variables': {
'grit_grd_file': 'ui/aw_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
'action_name': 'repack_android_webview_pack',
'variables': {
'pak_inputs': [
'<(SHARED_INTERMEDIATE_DIR)/blink/public/resources/blink_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/blink/public/resources/blink_image_resources_100_percent.pak',
'<(SHARED_INTERMEDIATE_DIR)/content/app/resources/content_resources_100_percent.pak',
'<(SHARED_INTERMEDIATE_DIR)/content/content_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/net/net_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/ui/resources/ui_resources_100_percent.pak',
'<(grit_out_dir)/aw_resources.pak',
],
'pak_output': '<(PRODUCT_DIR)/android_webview_assets/webviewchromium.pak',
},
'includes': [ '../build/repack_action.gypi' ],
},
{
'action_name': 'generate_aw_strings',
'variables': {
'grit_grd_file': 'ui/aw_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
'action_name': 'android_webview_repack_locales',
'variables': {
'repack_locales': 'tools/webview_repack_locales.py',
},
'inputs': [
'<(repack_locales)',
'<!@pymod_do_main(webview_repack_locales -i -p <(PRODUCT_DIR) -s <(SHARED_INTERMEDIATE_DIR) <(locales))'
],
'outputs': [
'<!@pymod_do_main(webview_repack_locales -o -p <(PRODUCT_DIR) -s <(SHARED_INTERMEDIATE_DIR) <(locales))'
],
'action': [
'python',
'<(repack_locales)',
'-p', '<(PRODUCT_DIR)',
'-s', '<(SHARED_INTERMEDIATE_DIR)',
'<@(locales)',
],
},
{
'action_name': 'rename_snapshot_blob',
'inputs': [
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'outputs': [
'<(PRODUCT_DIR)/snapshot_blob_<(arch_suffix).bin',
],
'action': [
'python',
'<(DEPTH)/build/cp.py',
'<@(_inputs)',
'<@(_outputs)',
],
},
{
'action_name': 'rename_natives_blob',
'inputs': [
'<(PRODUCT_DIR)/natives_blob.bin',
],
'outputs': [
'<(PRODUCT_DIR)/natives_blob_<(arch_suffix).bin',
],
'action': [
'python',
'<(DEPTH)/build/cp.py',
'<@(_inputs)',
'<@(_outputs)',
],
},
],
},
{
'target_name': 'android_webview_locale_paks',
'type': 'none',
'variables': {
'locale_pak_files': [ '<@(webview_locales_input_paks)' ],
},
'includes': [
'apk/system_webview_locales_paks.gypi',
'../build/android/locale_pak_resources.gypi',
],
},
{
'target_name': 'android_webview_strings_grd',
'android_unmangled_name': 1,
'type': 'none',
'variables': {
'grd_file': '../android_webview/java/strings/android_webview_strings.grd',
},
'includes': [
'../build/java_strings_grd.gypi',
],
},
{
'target_name': 'android_webview_version',
'type': 'none',
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)',
],
},
# Because generate_version generates a header, we must set the
# hard_dependency flag.
'hard_dependency': 1,
'actions': [
{
'action_name': 'generate_version',
'includes': [
'../build/util/version.gypi',
],
'variables': {
'template_input_path': 'common/aw_version_info_values.h.version',
},
'inputs': [
'<(version_py_path)',
'<(template_input_path)',
'<(version_path)',
'<(lastchange_path)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/android_webview/common/aw_version_info_values.h',
],
'action': [
'python',
'<(version_py_path)',
'-f', '<(version_path)',
'-f', '<(lastchange_path)',
'<(template_input_path)',
'<@(_outputs)',
],
'message': 'Generating version information',
},
],
},
{
'target_name': 'android_webview_common',
'type': 'static_library',
'dependencies': [
'../android_webview/native/webview_native.gyp:webview_native',
'../cc/cc.gyp:cc_surfaces',
'../components/components.gyp:auto_login_parser',
'../components/components.gyp:autofill_content_renderer',
'../components/components.gyp:breakpad_host',
'../components/components.gyp:cdm_browser',
'../components/components.gyp:cdm_renderer',
'../components/components.gyp:crash_component',
'../components/components.gyp:data_reduction_proxy_core_browser',
'../components/components.gyp:devtools_discovery',
'../components/components.gyp:navigation_interception',
'../components/components.gyp:printing_common',
'../components/components.gyp:printing_browser',
'../components/components.gyp:printing_renderer',
'../components/components.gyp:visitedlink_browser',
'../components/components.gyp:visitedlink_renderer',
'../components/components.gyp:web_contents_delegate_android',
'../content/content.gyp:content_app_both',
'../content/content.gyp:content_browser',
'../gin/gin.gyp:gin',
'../gpu/blink/gpu_blink.gyp:gpu_blink',
'../gpu/command_buffer/command_buffer.gyp:gles2_utils',
'../gpu/gpu.gyp:command_buffer_service',
'../gpu/gpu.gyp:gl_in_process_context',
'../gpu/gpu.gyp:gles2_c_lib',
'../gpu/gpu.gyp:gles2_implementation',
'../gpu/skia_bindings/skia_bindings.gyp:gpu_skia_bindings',
'../media/media.gyp:media',
'../media/midi/midi.gyp:midi',
'../printing/printing.gyp:printing',
'../skia/skia.gyp:skia',
'../third_party/WebKit/public/blink.gyp:blink',
'../ui/events/events.gyp:gesture_detection',
'../ui/gl/gl.gyp:gl',
'../ui/shell_dialogs/shell_dialogs.gyp:shell_dialogs',
'../v8/tools/gyp/v8.gyp:v8',
'android_webview_pak',
'android_webview_version',
'../components/components.gyp:policy',
'../components/components.gyp:policy_component',
'../components/components.gyp:pref_registry',
'../components/url_formatter/url_formatter.gyp:url_formatter',
],
'include_dirs': [
'..',
'../skia/config',
'<(SHARED_INTERMEDIATE_DIR)/ui/resources/',
],
'sources': [
'browser/aw_browser_context.cc',
'browser/aw_browser_context.h',
'browser/aw_browser_main_parts.cc',
'browser/aw_browser_main_parts.h',
'browser/aw_browser_permission_request_delegate.h',
'browser/aw_browser_policy_connector.cc',
'browser/aw_browser_policy_connector.h',
'browser/aw_contents_client_bridge_base.cc',
'browser/aw_contents_client_bridge_base.h',
'browser/aw_content_browser_client.cc',
'browser/aw_content_browser_client.h',
'browser/aw_contents_io_thread_client.h',
'browser/aw_cookie_access_policy.cc',
'browser/aw_cookie_access_policy.h',
'browser/aw_dev_tools_discovery_provider.cc',
'browser/aw_dev_tools_discovery_provider.h',
'browser/aw_download_manager_delegate.cc',
'browser/aw_download_manager_delegate.h',
'browser/aw_form_database_service.cc',
'browser/aw_form_database_service.h',
'browser/aw_gl_surface.cc',
'browser/aw_gl_surface.h',
'browser/aw_http_auth_handler_base.cc',
'browser/aw_http_auth_handler_base.h',
'browser/aw_javascript_dialog_manager.cc',
'browser/aw_javascript_dialog_manager.h',
'browser/aw_locale_manager.h',
'browser/aw_login_delegate.cc',
'browser/aw_login_delegate.h',
'browser/aw_media_client_android.cc',
'browser/aw_media_client_android.h',
'browser/aw_message_port_message_filter.cc',
'browser/aw_message_port_message_filter.h',
'browser/aw_message_port_service.h',
'browser/aw_permission_manager.cc',
'browser/aw_permission_manager.h',
'browser/aw_pref_store.cc',
'browser/aw_pref_store.h',
'browser/aw_print_manager.cc',
'browser/aw_print_manager.h',
'browser/aw_printing_message_filter.cc',
'browser/aw_printing_message_filter.h',
'browser/aw_quota_manager_bridge.cc',
'browser/aw_quota_manager_bridge.h',
'browser/aw_quota_permission_context.cc',
'browser/aw_quota_permission_context.h',
'browser/aw_render_thread_context_provider.cc',
'browser/aw_render_thread_context_provider.h',
'browser/aw_resource_context.cc',
'browser/aw_resource_context.h',
'browser/aw_ssl_host_state_delegate.cc',
'browser/aw_ssl_host_state_delegate.h',
'browser/aw_result_codes.h',
'browser/aw_web_preferences_populater.cc',
'browser/aw_web_preferences_populater.h',
'browser/browser_view_renderer.cc',
'browser/browser_view_renderer.h',
'browser/browser_view_renderer_client.h',
'browser/child_frame.cc',
'browser/child_frame.h',
'browser/deferred_gpu_command_service.cc',
'browser/deferred_gpu_command_service.h',
'browser/find_helper.cc',
'browser/find_helper.h',
'browser/hardware_renderer.cc',
'browser/hardware_renderer.h',
'browser/icon_helper.cc',
'browser/icon_helper.h',
'browser/input_stream.h',
'browser/jni_dependency_factory.h',
'browser/gl_view_renderer_manager.cc',
'browser/gl_view_renderer_manager.h',
'browser/net/android_stream_reader_url_request_job.cc',
'browser/net/android_stream_reader_url_request_job.h',
'browser/net/aw_http_user_agent_settings.h',
'browser/net/aw_http_user_agent_settings.cc',
'browser/net/aw_network_delegate.cc',
'browser/net/aw_network_delegate.h',
'browser/net/aw_request_interceptor.cc',
'browser/net/aw_request_interceptor.h',
'browser/net/aw_url_request_context_getter.cc',
'browser/net/aw_url_request_context_getter.h',
'browser/net/aw_url_request_job_factory.cc',
'browser/net/aw_url_request_job_factory.h',
'browser/net/aw_web_resource_response.h',
'browser/net_disk_cache_remover.cc',
'browser/net_disk_cache_remover.h',
'browser/net/init_native_callback.h',
'browser/net/input_stream_reader.cc',
'browser/net/input_stream_reader.h',
'browser/parent_compositor_draw_constraints.cc',
'browser/parent_compositor_draw_constraints.h',
'browser/parent_output_surface.cc',
'browser/parent_output_surface.h',
'browser/renderer_host/aw_render_view_host_ext.cc',
'browser/renderer_host/aw_render_view_host_ext.h',
'browser/renderer_host/aw_resource_dispatcher_host_delegate.cc',
'browser/renderer_host/aw_resource_dispatcher_host_delegate.h',
'browser/scoped_allow_wait_for_legacy_web_view_api.h',
'browser/scoped_app_gl_state_restore.cc',
'browser/scoped_app_gl_state_restore.h',
'browser/shared_renderer_state.cc',
'browser/shared_renderer_state.h',
'common/android_webview_message_generator.cc',
'common/android_webview_message_generator.h',
'common/aw_content_client.cc',
'common/aw_content_client.h',
'common/aw_descriptors.h',
'common/aw_hit_test_data.cc',
'common/aw_hit_test_data.h',
'common/aw_message_port_messages.h',
'common/aw_resource.h',
'common/aw_switches.cc',
'common/aw_switches.h',
'common/devtools_instrumentation.h',
'common/render_view_messages.cc',
'common/render_view_messages.h',
'common/url_constants.cc',
'common/url_constants.h',
'crash_reporter/aw_microdump_crash_reporter.cc',
'crash_reporter/aw_microdump_crash_reporter.h',
'lib/aw_browser_dependency_factory_impl.cc',
'lib/aw_browser_dependency_factory_impl.h',
'lib/main/aw_main_delegate.cc',
'lib/main/aw_main_delegate.h',
'lib/main/webview_jni_onload.cc',
'lib/main/webview_jni_onload.h',
'public/browser/draw_gl.h',
'renderer/aw_content_renderer_client.cc',
'renderer/aw_content_renderer_client.h',
'renderer/aw_content_settings_client.cc',
'renderer/aw_content_settings_client.h',
'renderer/aw_key_systems.cc',
'renderer/aw_key_systems.h',
'renderer/aw_message_port_client.cc',
'renderer/aw_message_port_client.h',
'renderer/aw_print_web_view_helper_delegate.cc',
'renderer/aw_print_web_view_helper_delegate.h',
'renderer/aw_render_process_observer.cc',
'renderer/aw_render_process_observer.h',
'renderer/aw_render_frame_ext.cc',
'renderer/aw_render_frame_ext.h',
'renderer/aw_render_view_ext.cc',
'renderer/aw_render_view_ext.h',
'renderer/print_render_frame_observer.cc',
'renderer/print_render_frame_observer.h',
],
},
{
'target_name': 'libwebviewchromium',
'includes': [
'libwebviewchromium.gypi',
],
},
{
'target_name': 'android_webview_java',
'type': 'none',
'dependencies': [
'../android_webview/native/webview_native.gyp:android_webview_aw_permission_request_resource',
'../components/components.gyp:external_video_surface_java',
'../components/components.gyp:navigation_interception_java',
'../components/components.gyp:policy_java',
'../components/components.gyp:web_contents_delegate_android_java',
'../content/content.gyp:content_java',
'../ui/android/ui_android.gyp:ui_java',
'android_webview_locale_paks',
'android_webview_strings_grd',
],
'variables': {
'java_in_dir': '../android_webview/java',
'has_java_resources': 1,
'R_package': 'org.chromium.android_webview',
'R_package_relpath': 'org/chromium/android_webview',
'android_manifest_path': '../android_webview/apk/java/AndroidManifest.xml', # for lint
},
'includes': [ '../build/java.gypi' ],
},
{
'target_name': 'system_webview_glue_java',
'variables': {
'android_sdk_jar': '../third_party/android_platform/webview/frameworks_6.0.jar',
'java_in_dir': 'glue/java',
},
'includes': [ 'apk/system_webview_glue_common.gypi' ],
},
{
'target_name': 'system_webview_apk',
'dependencies': [
'system_webview_glue_java',
],
'variables': {
'apk_name': 'SystemWebView',
'android_sdk_jar': '../third_party/android_platform/webview/frameworks_6.0.jar',
'java_in_dir': '../build/android/empty',
'resource_dir': 'apk/java/res',
'android_manifest_template_vars': ['package=<(system_webview_package_name)'],
},
'includes': [ 'apk/system_webview_apk_common.gypi' ],
},
],
'includes': [
'android_webview_tests.gypi',
],
}
| 39.159353 | 116 | 0.615063 |
aceabdc249bea35ee73add238a2cae976a9dc2fc | 3,096 | py | Python | test_expecttest.py | johnpyp/ghstack | 64a1035cea2edb4676c50fffda06d2fda300474d | [
"MIT"
] | null | null | null | test_expecttest.py | johnpyp/ghstack | 64a1035cea2edb4676c50fffda06d2fda300474d | [
"MIT"
] | null | null | null | test_expecttest.py | johnpyp/ghstack | 64a1035cea2edb4676c50fffda06d2fda300474d | [
"MIT"
] | null | null | null | import doctest
import string
import textwrap
import unittest
from typing import Any, Dict, Tuple
import hypothesis
from hypothesis.strategies import (booleans, composite, integers, sampled_from,
text)
import ghstack.expecttest as expecttest
@composite
def text_lineno(draw: Any) -> Tuple[str, int]:
t = draw(text("a\n"))
lineno = draw(integers(min_value=1, max_value=t.count("\n") + 1))
return (t, lineno)
class TestExpectTest(expecttest.TestCase):
@hypothesis.given(text_lineno())
def test_nth_line_ref(self, t_lineno: Tuple[str, int]) -> None:
t, lineno = t_lineno
hypothesis.event("lineno = {}".format(lineno))
def nth_line_ref(src: str, lineno: int) -> int:
xs = src.split("\n")[:lineno]
xs[-1] = ''
return len("\n".join(xs))
self.assertEqual(expecttest.nth_line(t, lineno), nth_line_ref(t, lineno))
@hypothesis.given(text(string.printable), booleans(), sampled_from(['"', "'"]))
def test_replace_string_literal_roundtrip(self, t: str, raw: bool, quote: str) -> None:
if raw:
hypothesis.assume(expecttest.ok_for_raw_triple_quoted_string(t, quote=quote))
prog = """\
r = {r}{quote}placeholder{quote}
r2 = {r}{quote}placeholder2{quote}
r3 = {r}{quote}placeholder3{quote}
""".format(r='r' if raw else '', quote=quote * 3)
new_prog = expecttest.replace_string_literal(textwrap.dedent(prog), 2, t)[0]
ns: Dict[str, str] = {}
exec(new_prog, ns)
msg = "program was:\n{}".format(new_prog)
self.assertEqual(ns['r'], 'placeholder', msg=msg) # noqa: F821
self.assertEqual(ns['r2'], expecttest.normalize_nl(t), msg=msg) # noqa: F821
self.assertEqual(ns['r3'], 'placeholder3', msg=msg) # noqa: F821
def test_sample(self) -> None:
prog = r"""
single_single('''0''')
single_multi('''1''')
multi_single('''\
2
''')
multi_multi_less('''\
3
4
''')
multi_multi_same('''\
5
''')
multi_multi_more('''\
6
''')
"""
# NB: These are the end of the statements, not beginning
# TODO: Test other permutations of these edits
edits = [(2, "a"),
(3, "b\n"),
(6, "c"),
(10, "d\n"),
(13, "e\n"),
(16, "f\ng\n")]
history = expecttest.EditHistory()
fn = 'not_a_real_file.py'
for lineno, actual in edits:
lineno = history.adjust_lineno(fn, lineno)
prog, delta = expecttest.replace_string_literal(prog, lineno, actual)
history.record_edit(fn, lineno, delta)
self.assertExpected(prog, r"""
single_single('''a''')
single_multi('''\
b
''')
multi_single('''c''')
multi_multi_less('''\
d
''')
multi_multi_same('''\
e
''')
multi_multi_more('''\
f
g
''')
""")
def load_tests(loader: unittest.TestLoader, tests: unittest.TestSuite, ignore: Any) -> unittest.TestSuite:
tests.addTests(doctest.DocTestSuite(expecttest))
return tests
if __name__ == '__main__':
unittest.main()
| 28.666667 | 106 | 0.599483 |
aceabf56324c0f803566b04e704ad5f49ff779ba | 305 | bzl | Python | interpreter.bzl | GabrielDougherty/bazel-rules_conda-pybind11_bazel-test | 3bd28a8db7bb00500728458752fb900618ca5c57 | [
"MIT-0"
] | null | null | null | interpreter.bzl | GabrielDougherty/bazel-rules_conda-pybind11_bazel-test | 3bd28a8db7bb00500728458752fb900618ca5c57 | [
"MIT-0"
] | null | null | null | interpreter.bzl | GabrielDougherty/bazel-rules_conda-pybind11_bazel-test | 3bd28a8db7bb00500728458752fb900618ca5c57 | [
"MIT-0"
] | null | null | null | '''interpreter'''
def interpreter(env_name):
prefix = "@{}//:{}/".format(env_name, env_name)
native.filegroup(
name = env_name + "_interpreter",
# srcs = native.glob([prefix + "bin/python", prefix + "python.exe"])
srcs = [prefix + "bin/python", prefix + "python.exe"]
) | 38.125 | 76 | 0.583607 |
aceabfe2a24183e3a0b352c315793f2af2557581 | 538 | py | Python | nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..core import SEMLikeCommandLine
def test_SEMLikeCommandLine_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
)
inputs = SEMLikeCommandLine.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
| 28.315789 | 66 | 0.644981 |
aceac06dd0dad8b7b84a565f34cc538359a8579f | 180 | py | Python | src/documentClassification/domain/Document.py | shaibachar/DocumentClassification | f592d1173d826e2b4e62deeba77cf58ec6f8095b | [
"MIT"
] | null | null | null | src/documentClassification/domain/Document.py | shaibachar/DocumentClassification | f592d1173d826e2b4e62deeba77cf58ec6f8095b | [
"MIT"
] | null | null | null | src/documentClassification/domain/Document.py | shaibachar/DocumentClassification | f592d1173d826e2b4e62deeba77cf58ec6f8095b | [
"MIT"
] | null | null | null | class Document(object):
def __init__(self, name, documentType,wordCount):
self.name = name
self.documentType = documentType
self.wordCount = wordCount
| 25.714286 | 53 | 0.672222 |
aceac0a5d0ccfcb0cfdc539bc6a63018dcffc9b7 | 14,274 | py | Python | venv/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py | sunlum/Deep-Semantic-Space-NST | 468ac2590385f48e65df12c1a3c9db0ed8d49477 | [
"MIT"
] | 1 | 2021-06-10T23:59:44.000Z | 2021-06-10T23:59:44.000Z | tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py | luyahan/tensorflow | a5f8f42cb29d890650c4ca5fac6654884856a8e1 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py | luyahan/tensorflow | a5f8f42cb29d890650c4ca5fac6654884856a8e1 | [
"Apache-2.0"
] | 1 | 2021-12-16T05:34:55.000Z | 2021-12-16T05:34:55.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
@@
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints given tensors every N iteration.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensors, every_n_iter=100):
"""Initializes a LoggingHook monitor.
Args:
tensors: `dict` of tag to tensors/names or
`iterable` of tensors/names.
every_n_iter: `int`, print every N iteration.
"""
if not isinstance(tensors, dict):
tensors = {item: item for item in tensors}
self._tensors = tensors
self._every_n_iter = every_n_iter
def begin(self):
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()}
def before_run(self, run_context): # pylint: disable=unused-argument
if self._iter_count % self._every_n_iter == 0:
return SessionRunArgs(self._current_tensors)
else:
return None
def after_run(self, run_context, run_values):
_ = run_context
if self._iter_count % self._every_n_iter == 0:
stats = []
for tag in sorted(self._current_tensors.keys()):
stats.append("%s = %s" % (tag, run_values.results[tag]))
logging.info("%s", ", ".join(stats))
self._iter_count += 1
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Monitor to request stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Create a StopAtStep Hook.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_step is None:
self._last_step = global_step + self._num_steps - 1
if global_step >= self._last_step:
run_context.request_stop()
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
"""Initialize CheckpointSaverHook monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
"""
logging.info("Create CheckpointSaverHook")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self):
self._last_saved_time = None
self._last_saved_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
if self._last_saved_time is None:
# Write graph in the first call
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
self._summary_writer.add_graph(ops.get_default_graph())
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_saved_time is None:
self._save(global_step, run_context.session)
if self._save_steps is not None:
if global_step >= self._last_saved_step + self._save_steps:
self._save(global_step, run_context.session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(global_step, run_context.session)
def end(self, session):
last_step = session.run(contrib_variables.get_global_step())
self._save(last_step, session)
def _save(self, step, session):
"""Saves the latest checkpoint."""
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounterHook(session_run_hook.SessionRunHook):
"""Steps per second monitor."""
def __init__(self, every_n_steps=100, output_dir=None, summary_writer=None):
self._summary_tag = "global_step/sec"
self._every_n_steps = every_n_steps
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def begin(self):
self._last_reported_time = None
self._last_reported_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results
current_time = time.time()
if self._last_reported_time is None:
self._last_reported_step = global_step
self._last_reported_time = current_time
else:
if global_step >= self._every_n_steps + self._last_reported_step:
added_steps = global_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(
tag=self._summary_tag, simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, global_step)
self._last_reported_step = global_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanTensorHook(session_run_hook.SessionRunHook):
"""NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaver` monitor.
Args:
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `scalar_summary` or
`merge_all_summaries`.
"""
# TODO(ipolosukhin): Implement every N seconds.
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
self._scaffold = scaffold
self._save_steps = save_steps
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
self._last_saved_step = None
self._request_summary = True
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._summary_op is not None:
requests["summary"] = self._summary_op
elif self._scaffold.summary_op is not None:
requests["summary"] = self._scaffold.summary_op
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results["global_step"]
if self._last_saved_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._last_saved_step = global_step
if "summary" in run_values.results:
self._summary_writer.add_summary(run_values.results["summary"],
global_step)
self._request_summary = (
global_step >= self._last_saved_step + self._save_steps - 1)
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| 36.045455 | 89 | 0.702396 |
aceac12bbabdfc73f8c3f5be72f9b6cf004609eb | 179 | py | Python | appstore/api/urls.py | helxplatform/appstore | 7df7e602582cb1783a918547608755325814394c | [
"MIT"
] | 1 | 2021-03-22T16:02:43.000Z | 2021-03-22T16:02:43.000Z | appstore/api/urls.py | helxplatform/appstore | 7df7e602582cb1783a918547608755325814394c | [
"MIT"
] | 34 | 2020-05-28T19:07:19.000Z | 2022-03-21T20:29:13.000Z | appstore/api/urls.py | helxplatform/appstore | 7df7e602582cb1783a918547608755325814394c | [
"MIT"
] | 2 | 2020-05-14T13:27:58.000Z | 2020-06-01T18:19:12.000Z | from django.conf.urls import re_path
from django.urls import include
from .v1.router import v1_urlpatterns
urlpatterns = [
re_path(r"^api/v1/", include(v1_urlpatterns)),
]
| 17.9 | 50 | 0.75419 |
aceac18a6a30c28077d84a7589305d70c959eed6 | 8,197 | py | Python | external_algn_tools/bowtie_tools.py | costas821/seqtables | e4632898a912050fcd769d90e359fd6bee6d412b | [
"MIT"
] | 7 | 2017-07-24T18:06:23.000Z | 2021-06-03T18:34:54.000Z | external_algn_tools/bowtie_tools.py | cchrysostomou/seqtables | e4632898a912050fcd769d90e359fd6bee6d412b | [
"MIT"
] | 6 | 2018-07-10T18:28:40.000Z | 2021-06-01T23:15:45.000Z | external_algn_tools/bowtie_tools.py | costas821/seqtables | e4632898a912050fcd769d90e359fd6bee6d412b | [
"MIT"
] | 2 | 2017-01-13T19:03:16.000Z | 2018-02-28T21:02:09.000Z | import sys
import os
import subprocess
from subprocess import PIPE, Popen
from Bio import SeqIO
from datetime import datetime
import re
from seqtables.external_algn_tools.config import current_dir
if os.name == 'posix':
# im on a linux machine
bowtie_path = os.path.abspath(os.path.join(current_dir, 'executables', 'bowtie2-2.2.8-linux'))
bowtie_suffix = ""
elif os.name == 'nt':
# im on a windows machine
bowtie_path = os.path.abspath(os.path.join(current_dir, 'executables', 'bowtie2-2.2.8-windows'))
bowtie_suffix = ".exe"
elif os.name == 'mac':
print('oops ya')
else:
raise Exception("I was not expecting this: " + os.name)
def build_reference(input_fasta, ref_name, ref_path=None):
input_fasta = os.path.abspath(input_fasta)
if not os.path.isfile(input_fasta):
raise Exception('The provided input file does not exist')
if ref_path is None:
ref_name = os.path.join(os.path.dirname(input_fasta), os.path.basename(ref_name))
else:
if not os.path.isdir(ref_path):
os.makedirs(ref_path)
ref_name = os.path.join(ref_path, os.path.basename(ref_name))
execute_call = [os.path.join(bowtie_path, "bowtie2-build"), "'{0}'".format(input_fasta), "'{0}'".format(ref_name), ' --quiet']
proc = Popen(execute_call, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
message, err = proc.communicate()
# process_worked = subprocess.call(
# execute_call,
# stderr=subprocess.STDOUT,
# shell=True
# )
if proc.returncode > 0: # process_worked > 0:
print('The following error was returned: ')
print(err)
raise Exception('Something went wrong when building the database')
return os.path.join(os.path.dirname(input_fasta), ref_name)
def bowtie2(fastq_files, references, paired_seqs, sam_file_name, bowtie_ref_name=None, working_directory=None, include_phix=True, phix_filter=False, threads=2, options=[]):
"""
Wrapper function for running bowtie2 from within python
Args:
fastq_files (list of strings): location of all the input seq files that will be aligned to references
references (list of tuple OR a location to fasta file name):
IF list of types then the format should be [(reference name, sequence), ...]
IF string then assume that string refers to the location of a fasta file containing all reference sequences
paired_seqs (boolean): If true then treat input files as R1/R2 FASTQ files
sam_file_name (string): Name of the SAM file to be generated.
..note::
The folder path should NOT be defined in this parameter. It should be defined in "working_directory" parameter
bowtie_ref_name (string, default None): Name to refer to the reference index file generated by bowtie-build command. If None the filename will default to timestamp when function is called.
working_directory (string, default None): Output path location to save the SAM file name. If None then default folder location will be the same as parent folder of input seq file
include_phix (boolean, default True): If true then also include a search for phix in the input files
phix_filter (boolean, default False): If true then after alignment, remove any sequences that show hits to phix control
threads (int, default 2): Run bowtie using this many threads
options (list of tuples): user can pass in any parameter/value pair recognized by bowtie program (i.e. [('--local'), ('--np', 10)])
Returns:
Location of the generated sam file
"""
# remove comment line to allow for "fake" bowtie (debugging)
# return os.path.abspath(os.path.join(working_directory, sam_file_name))
if isinstance(references, str):
# this is a horrible hack to allow refernces to be passed as a list or a link to a fasta file...consider cleaning up in the future
barcodes = [[r.description, str(r.seq)] for r in SeqIO.parse(references, 'fasta')]
references = barcodes
output_ref_dir = working_directory
if working_directory is None:
# make the default working directory equal to input fastq file 1
working_directory = os.path.dirname(fastq_files[0])
if include_phix:
# also include the phix sequence into bowtie
phix_fasta = os.path.join(current_dir, "phix.fasta")
with open(phix_fasta) as r:
r.readline()
seq = ''.join([l.strip() for l in r])
references.append(['phix', seq])
if bowtie_ref_name is None:
# make a default bowtie reference name based on timestampe
bowtie_ref_name = re.sub("[\:\-\ \.]", "", str(datetime.now()))
bowtie_ref_name = os.path.join(working_directory, os.path.basename(bowtie_ref_name))
sam_file_name = os.path.join(working_directory, os.path.basename(sam_file_name))
# write reference sequences as a fasta file (...yes seems slightly circuituous...consider improving)
fastaname = bowtie_ref_name + '.fasta'
if output_ref_dir is not None:
if not os.path.isdir(output_ref_dir):
os.makedirs(output_ref_dir)
fastaname = os.path.join(output_ref_dir, fastaname)
with open(fastaname, 'w') as out:
for b in references:
out.write('>{0}\n{1}\n'.format(b[0], b[1]))
# run bowtie-build commandd
ref = build_reference(fastaname, bowtie_ref_name, output_ref_dir)
# https://jimmyg.org/blog/2009/working-with-python-subprocess.html
command_list = [os.path.join(bowtie_path, "bowtie2")]
# add in user provided options
if threads:
command_list.extend(['--threads', threads])
for o in options:
if isinstance(o, tuple):
command_list.extend([o[0], o[1]] if o[1] != '' else [o[0]])
else:
command_list.append(o)
# add reference sequence
command_list.extend(['-x', ref])
# add input files (fastq/fasta/tab, etc)
if paired_seqs is True:
command_list.extend(['-1', os.path.abspath(fastq_files[0]), '-2', os.path.abspath(fastq_files[1])])
else:
for f in fastq_files:
command_list.extend(['-U', os.path.abspath(f)])
# add output SAME file name
command_list.extend(['-S', os.path.abspath(sam_file_name)])
command_list = [str(c) for c in command_list]
# RUN subprocess
proc = Popen(command_list, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
message, err = proc.communicate()
if proc.returncode > 0:
print('The following error was returned: ')
print(err)
raise Exception('Something went wrong when running bowtie: ' + err)
if not os.path.isfile(os.path.abspath(sam_file_name)):
raise Exception('There was no error running the command, but the SAM file was not generated. Please look at parameters')
print('bowtie successfully completed')
print(err)
return os.path.abspath(sam_file_name)
def remove_phix_sequences(fastq_files, result_prefix, threads=2, return_orignal_name=True, delete_sam=True, delete_original_files=False):
options = [
('--un-conc', result_prefix),
]
if delete_original_files:
return_orignal_name = True
working_directory = os.path.dirname(fastq_files[0])
refpath = os.path.join(working_directory, 'phix.ref')
empty_sam = 'sam_file' + re.sub("[\:\-\ \.]", "", str(datetime.now())) + '.sam'
bowtie2(fastq_files, references=[], sam_file_name=empty_sam, paired_seqs=True, working_directory=working_directory, include_phix=True, bowtie_ref_name=refpath, threads=threads, options=options)
if delete_sam:
os.remove(os.path.join(working_directory, empty_sam))
if return_orignal_name:
for i, f in enumerate(fastq_files):
if delete_original_files:
os.remove(f)
else:
os.rename(f, f + '.before-phix-filter.fastq')
if i == 0:
os.rename(result_prefix + '.1', f)
elif i == 1:
os.rename(result_prefix + '.2', f)
return_files = fastq_files
else:
return_files = [result_prefix + '.1', result_prefix + '.2']
return return_files
if __name__ == "__main__":
# refs = [[s.description, str(s.seq)] for s in SeqIO.parse(sys.argv[1], 'fasta')]
options = [
('--local', ''),
('--no-sq'),
('--very-sensitive-local'),
('--n-ceil', 'L,0,100'),
('--np', 0),
('--rfg', '20,4'),
('--rdg', '20,4'),
('--un-conc', 'discordinate_reads.sam'),
('--dovetail'),
('--no-discordant'),
('--no-mixed')
]
# bowtie2(['testme/small_r1.fastq', 'testme/small_r2.fastq'], refs, True, 'out.sam', sys.argv[1] + '.ref', options=options, threads=4)
# remove_phix_sequences(['testme/small_r1.fastq', 'testme/small_r2.fastq'], result_prefix='testme/stuff_goes_here')
| 38.848341 | 194 | 0.722459 |
aceac1bb1849d108e1731d0054ef0ea735b66b60 | 2,023 | py | Python | projects/water/image.py | kmckiern/scripts | acc8326ca653d804ee06752af9e7f5b011fc6e0e | [
"MIT"
] | 2 | 2015-04-27T01:57:43.000Z | 2015-05-01T18:18:56.000Z | projects/water/image.py | kmckiern/scripts | acc8326ca653d804ee06752af9e7f5b011fc6e0e | [
"MIT"
] | null | null | null | projects/water/image.py | kmckiern/scripts | acc8326ca653d804ee06752af9e7f5b011fc6e0e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
reimage a molecule to the center of a pbc and cut beyond a certain radius
example usage:
>> python /path/to/script/dir/image.py --p input.pdb --t input.dcd --m mol_num --cd cutdist -o out
"""
import argparse
import mdtraj
import copy
parser = argparse.ArgumentParser(description='replicate crystal pdb to arbitrary size')
parser.add_argument('--p', type=str, help='input pdb file')
parser.add_argument('--b', type=str, help='big input pdb file')
parser.add_argument('--t', type=str, help='input trj')
parser.add_argument('--m', type=int, help='molecule to center')
parser.add_argument('--cd', type=str, help='distance beyond which molecules are cut')
parser.add_argument('--o', type=str, help='output pdb and trj name')
args = parser.parse_args()
#def center_mol(trj, molnum):
def sandwich(trj, ndx):
tm = copy.deepcopy(trj)
tm.xyz -= trj.unitcell_vectors[0][:,ndx]
tp = copy.deepcopy(trj)
tp.xyz += trj.unitcell_vectors[0][:,ndx]
return tm.stack(tp)
def image_trj(trj):
# stack x for first row
rank1 = trj.stack(sandwich(trj, 0))
# stack y for matrix
rank2 = rank1.stack(sandwich(rank1, 1))
# now, 3d
rank3 = rank2.stack(sandwich(rank2, 2))
return rank3
def indx_dists(trj, center_mol, cut):
nmols = trj.xyz.shape[1]
pairs = np.zeros((nmols, 2))
pairs[:,0] = np.arange(nmols)
pairs[:,1] = center_mol*np.ones(nmols)
ds = mdtraj.compute_distances(trj, pairs)
return [i for i in ds if i < cut]
def main():
# get input
p = mdtraj.load(args.t, top=args.p)
# image
p_im = image_trj(p)
p_im[0].save_pdb('zero.pdb')
p_im.save_dcd('big.dcd')
# couldn't figure out how to merge chains w/o chimera
p_big = mdtraj.load('big.dcd', top='ftlog.pdb')
# fix first oxygen
msel = p_big.xyz[:,0,:]
# shift coords
for t in range(p_big.xyz.shape[0]):
p_big.xyz[t] -= msel[t]
# write new dcd
p_big.save_dcd('shifted.dcd')
if __name__ == '__main__':
main()
| 29.318841 | 102 | 0.653979 |
aceac239532e2bc4594548d68ed9cbcf69734d55 | 1,343 | py | Python | openpype/auth/utils.py | pypeclub/openpype4-backend | a0abe2ed66887c6529b01bbb9cb00278bbff41e4 | [
"Apache-2.0"
] | 2 | 2022-03-09T08:02:52.000Z | 2022-03-15T00:34:01.000Z | openpype/auth/utils.py | pypeclub/openpype4-backend | a0abe2ed66887c6529b01bbb9cb00278bbff41e4 | [
"Apache-2.0"
] | 1 | 2022-03-08T16:22:34.000Z | 2022-03-08T16:22:34.000Z | openpype/auth/utils.py | pypeclub/openpype4-backend | a0abe2ed66887c6529b01bbb9cb00278bbff41e4 | [
"Apache-2.0"
] | null | null | null | from openpype.config import pypeconfig
from openpype.utils import create_hash, hash_data
def ensure_password_complexity(password: str) -> bool:
"""
Ensure password complexity.
Simple password policy which checks whether the given password's
lenght is greater or equal to auth_pass_min_length config value.
When auth_pass_complex is set to True, the password is also checked
whether it contains letters, numbers and special characters.
"""
if len(password) < pypeconfig.auth_pass_min_length:
return False
if pypeconfig.auth_pass_complex:
# Ensure password has digits, letters and special characters
if not any(c.isalpha() for c in password):
return False
if not any(c.isdigit() for c in password):
return False
if not any(c in ".-!@#$%^&*()_+" for c in password):
return False
return True
def hash_password(password: str, salt: str) -> str:
"""Create a hash string from a given password and salt,
and pepper from the config.
"""
return hash_data(f"{password}:{salt}:{pypeconfig.auth_pass_pepper}")
def create_password(password: str) -> str:
"""Create a hash:salt string from a given password."""
salt = create_hash()
pass_hash = hash_password(password, salt)
return f"{pass_hash}:{salt}"
| 33.575 | 72 | 0.684289 |
aceac3cef5df6a1db2d4116614296dd46a37a593 | 4,691 | py | Python | parameters.py | chart21/Verification-of-Outsourced-Object-Detection | ed1c27e5de6e6c98098969262aa8bd41da140c8b | [
"MIT"
] | null | null | null | parameters.py | chart21/Verification-of-Outsourced-Object-Detection | ed1c27e5de6e6c98098969262aa8bd41da140c8b | [
"MIT"
] | null | null | null | parameters.py | chart21/Verification-of-Outsourced-Object-Detection | ed1c27e5de6e6c98098969262aa8bd41da140c8b | [
"MIT"
] | null | null | null | #Class for setting all kinds of differents parameters
import hashlib
# The outsource contract defines the agreement between outsourcer and contractor and has to be set identically for both parties
# If this machine is a verifier, this class is a dummy class and parameters inside are never accessed
class OutsourceContract:
contract_uid = 0 #contracts have to have a unique id to esnure that each contract hash is unique
public_key_outsourcer = b'e\x0fy\xfd\xe6\x16\x1f\xe0\x16B\xf2\xdb\x1d\x7f\xc9\xbcLCo\xa7\xa6c\x17\xbf\x8fo\xc8[\x07|bL'
public_key_contractor = b'\xe9\x919rce\xc9\x1a\xcfJ}\xa3\xee\x17q\x19\xbd\x0eu\xf4\xe0\xd5\x8a<\xc0\x81\x0c\xdbD\xf5;G'
reward_per_image = 1
deposit_outsourcer = 10000000 #deposit of outsourcer to ensure paying fine and reward is possible
deposit_contractor = 10000000 #deposit of contractor to ensure paying fine
fine_outsourcer = 500000 #fine if a party is detected cheating
fine_contractor = 500000
bounty_contractor = 250000 #bounty if a oarty decetcs another party cheating
bounty_verifier = 250000
model = 'yolov4' #model to use, possible choices are yolov4, yolov3
tiny = True #whether to use tiny weigths for higher performance
merkle_tree_interval = 0 # 0: Do not use Merkle Tree but sign every output image, >0: Specifies the intervals at wich a Merkle Tree root is signed and sent
criteria = 'all' #Specifies if all outputs should be sent back or only outputs that fulfill a certain criteria (e.g certain event happens), criterias should be combined with Merkle Trees to to maximize efficiency
deposit_verfier = 10000000 #verifier details are also set in outsource contract because the contractor creates a list of all available verifier that meet requirements of the outsourcer
fine_verifier = 500000
reward_per_image_verifier = 1
# The outsourcre contract defines the agreement between outsourcer and verifier and has to be set identically for both parties
# If this machine is a contractor, this class is a dummy class and parameters inside are never accessed
class VerifierContract:
contract_uid = 0 #contracts have to have a unique id to esnure that each contract hash is unique
public_key_outsourcer = b'e\x0fy\xfd\xe6\x16\x1f\xe0\x16B\xf2\xdb\x1d\x7f\xc9\xbcLCo\xa7\xa6c\x17\xbf\x8fo\xc8[\x07|bL'
public_key_verifier = b'\xe9\x919rce\xc9\x1a\xcfJ}\xa3\xee\x17q\x19\xbd\x0eu\xf4\xe0\xd5\x8a<\xc0\x81\x0c\xdbD\xf5;G'
deposit_verfier = 10000000
fine_verifier = 500000
bounty_verifier = 250000
reward_per_image_verifier = 1
model = 'yolov4' #model to use, possible choices are yolov4, yolov3
tiny = True #whether to use tiny weigths for higher performance
# class for setting non-contract-related information
class Parameters:
is_contractor = True #if this machine should act as a verifier or a contractor
private_key_self = b'b\xc8\x8c\xa4\xd5\x82\x18cU\xfa\xdb\x0cg"\x06K\xa7\x01@\x9a\xf7\xa5Yn\x1b>|\x9a\xb6\x02\xaf&' #private Key of contractor
sendingPort = 1234 #Port to send responses to
port_outsourcer = 5555 #Port to listen to images
ip_outsourcer = "192.168.178.34" #Ip address of the outsourcer
framework = '' #tflite, tfRT, tf
minimum_receive_rate_from_contractor = 0.9 #outsourcer has to recive and acknowledge atleast x% of resonses. Otherwise contract is aborted.
# Yolo specific parameters
framework = ''
tiny = True
input_size = 416
iou = 0.45
score = 0.5
weights = './checkpoints/yolov4-tiny-416'
count = False
dont_show = False
info = True
crop = False
# Edge TPU specific parameters
edgeTPU_model_path = 'models_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite'
edgeTPU_label_Path = 'labels_edgetpu/coco_labels.txt'
EdgeTPU_confidence_level = 0.3
# Dummy class
class ParticipantData:
balance_outsourcer = 10000000000000000000
balance_contractor = 10000000000000000000
balance_verifier = 100000000000000000000
#Helper class to calculate contract hashes with SHA3-256
class Helperfunctions:
def hashContract():
contractHash = hashlib.sha3_256(str(vars(OutsourceContract)).encode('latin1'))
return contractHash.hexdigest()
def hashVerifierContract():
contractHash = hashlib.sha3_256(str(vars(VerifierContract)).encode('latin1'))
return contractHash.hexdigest()
def dishonestResponse(boxtext, sig, sk, contractHash):
boxtext += 'Object found: Person'
sig = sk.sign(boxtext.encode('latin1') + contractHash).signature
sig = sig.decode('latin1')
return boxtext, sig
| 43.841121 | 218 | 0.743338 |
aceac3d3ba8700e9e1252ec5889cd84f76a92d8c | 869 | py | Python | fbone/user/views.py | eamonnbell/tep-home | b358ba26c9244dd1c02745ccc9df02e87d024993 | [
"BSD-3-Clause"
] | 1 | 2015-12-14T17:53:34.000Z | 2015-12-14T17:53:34.000Z | fbone/user/views.py | eamonnbell/tep-home | b358ba26c9244dd1c02745ccc9df02e87d024993 | [
"BSD-3-Clause"
] | null | null | null | fbone/user/views.py | eamonnbell/tep-home | b358ba26c9244dd1c02745ccc9df02e87d024993 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from flask import Blueprint, render_template, send_from_directory, abort
from flask import current_app as APP
from flask.ext.login import login_required, current_user
from .models import User
user = Blueprint('user', __name__, url_prefix='/user')
@user.route('/')
@login_required
def index():
if not current_user.is_authenticated():
abort(403)
return render_template('user/index.html', user=current_user)
@user.route('/<int:user_id>/profile')
def profile(user_id):
user = User.get_by_id(user_id)
return render_template('user/profile.html', user=user)
@user.route('/<int:user_id>/avatar/<path:filename>')
@login_required
def avatar(user_id, filename):
dir_path = os.path.join(APP.config['UPLOAD_FOLDER'], 'user_%s' % user_id)
return send_from_directory(dir_path, filename, as_attachment=True)
| 25.558824 | 77 | 0.735328 |
aceac4241a645673929c7378022da08461ded528 | 6,741 | py | Python | R_2_LR_clim_clim_conv.py | GunnarBehrens/CBRAIN-CAM | 1b59b5b4731ada7c077c181b543339412d25e5c3 | [
"MIT"
] | null | null | null | R_2_LR_clim_clim_conv.py | GunnarBehrens/CBRAIN-CAM | 1b59b5b4731ada7c077c181b543339412d25e5c3 | [
"MIT"
] | null | null | null | R_2_LR_clim_clim_conv.py | GunnarBehrens/CBRAIN-CAM | 1b59b5b4731ada7c077c181b543339412d25e5c3 | [
"MIT"
] | null | null | null | """
This notebook computes the R² of 700hPa predicted dT/dt and dq/dt of LR_clim_clim_conv
"""
from tensorflow.keras.layers import Input, Dense
from cbrain.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.losses import mse, binary_crossentropy
from tensorflow.keras.utils import plot_model
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import LearningRateScheduler,Callback
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
import tensorflow as tf
from cbrain.imports import *
from cbrain.utils import *
import pandas as ps
original_dim_input=64 # CAM variables node size
original_dim_output=int(65+64) # SP + CAM variables node size
# network parameters
input_shape = (original_dim_input,)
out_shape=(original_dim_output,)
intermediate_dim = 463 # number of first hidden layers of linear Encoder or last hidden layers of linear Decoder
batch_size = 714
latent_dim = 5 # latent space width
epochs = 40
## Linear Encoder
inputs =Input(shape=input_shape, name='encoder_input')
x_0 =Dense(intermediate_dim, activation='linear')(inputs)
x_1 =Dense(intermediate_dim, activation='linear')(x_0)
x_2 =Dense(int(np.round(intermediate_dim/2)), activation='linear')(x_1)
x_3 =Dense(int(np.round(intermediate_dim/4)), activation='linear')(x_2)
x_4 =Dense(int(np.round(intermediate_dim/8)), activation='linear')(x_3)
x_5 =Dense(int(np.round(intermediate_dim/16)), activation='linear')(x_4)
z_lin = Dense(latent_dim, activation='linear', name='z_lin')(x_5)
# instantiate encoder model
encoder_lin = Model(inputs, [z_lin], name='encoder_lin')
## linear Decoder
decoder_inputs =Input(shape=(latent_dim,), name='decoder_input')
x_1 =Dense(int(np.round(intermediate_dim/16)), activation='linear')(decoder_inputs)
x_2 =Dense(int(np.round(intermediate_dim/8)), activation='linear')(x_1)
x_3 =Dense(int(np.round(intermediate_dim/4)), activation='linear')(x_2)
x_4 =Dense(int(np.round(intermediate_dim/2)), activation='linear')(x_3)
x_5 =Dense(intermediate_dim, activation='linear')(x_4)
x_6 =Dense(intermediate_dim, activation='linear')(x_5)
outputs = Dense(original_dim_output, activation='linear')(x_6)
decoder_lin = Model(decoder_inputs, outputs, name='decoder')
emul_outputs=decoder_lin(encoder_lin(inputs))
LR_clim_clim_conv=Model(inputs,emul_outputs)
#loading scaling dictionary of SP variables
scale_array=ps.read_csv('nn_config/scale_dicts/Scaling_cond_VAE.csv')
PHQ_std_surf=scale_array.PHQ_std.values[-1]
TPHYSTND_std_23=scale_array.TPHYSTND_std.values[-1]
PRECT_std=scale_array.PRECT_std.values
FSNS_std=scale_array.FSNS_std.values
FSNT_std=scale_array.FSNT_std.values
FLNS_std=scale_array.FLNS_std.values
FLNT_std=scale_array.FLNT_std.values
# loading scaling dictionaries of CAM variables
scale_array_2D=ps.read_csv('nn_config/scale_dicts/Scaling_enc_II_range_profiles.csv')
scale_array_1D=ps.read_csv('nn_config/scale_dicts/Scaling_enc_II_range.csv')
TBP_std_surf=scale_array_2D.TBP_std.values[-1]
QBP_std_surf=scale_array_2D.QBP_std.values[-1]
Q_lat_std_surf=scale_array_1D.Q_lat_std.values
Q_sens_std_surf=scale_array_1D.Q_sens_std.values
Q_solar_std_surf=scale_array_1D.Q_sol_std.values
PS_std_surf=scale_array_1D.PS_std.values
# resulting output normalization dictionary
scale_dict_II = {
'PHQ': 1/PHQ_std_surf,
'QBP':1/QBP_std_surf,
'TPHYSTND': 1/TPHYSTND_std_23,
'TBP':1/TBP_std_surf,
'FSNT': 1/FSNT_std,
'FSNS': 1/FSNS_std,
'FLNT': 1/FLNT_std,
'FLNS': 1/FLNS_std,
'PRECT': 1/PRECT_std,
'LHFLX': 1/Q_lat_std_surf,
'SHFLX': 1/Q_sens_std_surf,
'SOLIN': 1/Q_solar_std_surf,
'PS':1/PS_std_surf
}
in_vars = ['QBP', 'TBP','PS', 'SOLIN', 'SHFLX', 'LHFLX']
out_vars = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS', 'PRECT','QBP', 'TBP','PS', 'SOLIN', 'SHFLX', 'LHFLX']
# Takes representative value for PS since purpose is normalization
PS = 1e5; P0 = 1e5;
P = P0*hyai+PS*hybi; # Total pressure [Pa]
dP = P[1:]-P[:-1];
from cbrain.data_generator import DataGenerator
val_gen_II = DataGenerator(
data_fn = '../preprocessed_data/1918_train_3_month_OND.nc',
input_vars = in_vars,
output_vars = out_vars,
norm_fn = '../preprocessed_data/000_norm_1_month.nc',
input_transform = ('mean', 'maxrs'),
output_transform = scale_dict_II,
batch_size=8192,
shuffle=True
)
LR_clim_clim_conv.load_weights('./saved_models/LR_clim_clim_conv/LR_clim_clim_conv_40_opt_net.h5')
lat=np.arange(-90,90,180/64)
lon=np.arange(-180,180,360/128)
time=4415
X=np.nan*np.zeros((4415,8192,64))
Y=np.nan*np.zeros((4415,8192,129))
for i in np.arange(X[:,1,1].size):
X[i],Y[i]=val_gen_II[i]
X_=np.reshape(X,(4415*8192,64))
Y_=np.reshape(Y,(4415*8192,129))
Y_emul=val_gen_II.output_transform.inverse_transform(LR_clim_clim_conv.predict(X_))
Y_real=val_gen_II.output_transform.inverse_transform(Y_)
lat=np.arange(-90,90,180/64)
lon=np.arange(-180,180,360/128)
time=4415
print('compute_r²')
T_tend_real=np.reshape(Y_real[:,30:60],(time,lat.size,lon.size,Y_real[1,30:60].size))
T_tend_emul=np.reshape(Y_emul[:,30:60],(time,lat.size,lon.size,Y_real[1,30:60].size))
Q_tend_real=np.reshape(Y_real[:,0:30],(time,lat.size,lon.size,Y_real[1,30:60].size))
Q_tend_emul=np.reshape(Y_emul[:,0:30],(time,lat.size,lon.size,Y_real[1,30:60].size))
T_tend_real_long_mean=np.mean(T_tend_real,2)
T_tend_emul_long_mean=np.mean(T_tend_emul,2)
Q_tend_real_long_mean=np.mean(Q_tend_real,2)
Q_tend_emul_long_mean=np.mean(Q_tend_emul,2)
## dT/dt ID: 30:60
## dq/dt ID: 0:30
T_tend_real=np.reshape(Y_real[:,30:60],(time,lat.size,lon.size,Y_real[1,30:60].size))
T_tend_emul=np.reshape(Y_emul[:,30:60],(time,lat.size,lon.size,Y_real[1,30:60].size))
Q_tend_real=np.reshape(Y_real[:,0:30],(time,lat.size,lon.size,Y_real[1,30:60].size))
Q_tend_emul=np.reshape(Y_emul[:,0:30],(time,lat.size,lon.size,Y_real[1,30:60].size))
T_tend_real_long_mean=np.mean(T_tend_real,2)
T_tend_emul_long_mean=np.mean(T_tend_emul,2)
Q_tend_real_long_mean=np.mean(Q_tend_real,2)
Q_tend_emul_long_mean=np.mean(Q_tend_emul,2)
lat=np.arange(-90,90,180/64)
lon=np.arange(-180,180,360/128)
time=4415
T_tend_R_2_700=np.nan*np.zeros((lat.size,lon.size))
Q_tend_R_2_700=np.nan*np.zeros((lat.size,lon.size))
# compute R2 values on level 20 ~ 700hPa
T_tend_R_2_700=1-np.mean((np.squeeze(T_tend_real[:,:,:,20])-np.squeeze(T_tend_emul[:,:,:,20]))**2,0)/np.var(np.squeeze(T_tend_real[:,:,:,20]),0)
Q_tend_R_2_700=1-np.mean((np.squeeze(Q_tend_real[:,:,:,20])-np.squeeze(Q_tend_emul[:,:,:,20]))**2,0)/np.var(np.squeeze(Q_tend_real[:,:,:,20]),0)
np.save('R_2_val/LR_clim_clim_conv_T_tend_R_2_700',T_tend_R_2_700)
np.save('R_2_val/LR_clim_clim_conv_Q_tend_R_2_700',Q_tend_R_2_700)
| 30.364865 | 144 | 0.757751 |
aceac78515be08c193ce2300b23e6faab9d261fa | 9,504 | py | Python | 2.ReinforcementLearning/RL_Book/1-gridworld/1.policy-iteration/environment.py | link-kut/deeplink_public | 688c379bfeb63156e865d78d0428f97d7d203cc1 | [
"MIT"
] | null | null | null | 2.ReinforcementLearning/RL_Book/1-gridworld/1.policy-iteration/environment.py | link-kut/deeplink_public | 688c379bfeb63156e865d78d0428f97d7d203cc1 | [
"MIT"
] | 11 | 2020-01-28T22:33:49.000Z | 2022-03-11T23:41:08.000Z | 2.ReinforcementLearning/RL_Book/1-gridworld/1.policy-iteration/environment.py | link-kut/deeplink_public | 688c379bfeb63156e865d78d0428f97d7d203cc1 | [
"MIT"
] | 2 | 2019-06-01T04:14:52.000Z | 2020-05-31T08:13:23.000Z | from environment import *
import tkinter as tk
from tkinter import Button
import time
import numpy as np
from PIL import ImageTk, Image
PhotoImage = ImageTk.PhotoImage
UNIT = 100 # pixels
HEIGHT = 5 # grid height
WIDTH = 5 # grid width
TRANSITION_PROB = 1
POSSIBLE_ACTIONS = [0, 1, 2, 3] # up, down, left, right
ACTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)] # actions in coordinates
REWARDS = []
class GraphicDisplay(tk.Tk):
def __init__(self, agent, title):
super(GraphicDisplay, self).__init__()
self.title(title)
self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT + 50))
self.texts = []
self.arrows = []
self.env = Env()
self.agent = agent
self.iter_count = 0
self.improvement_count = 0
self.is_moving = 0
(self.up, self.down, self.left, self.right), self.shapes = self.load_images()
self.btn_1_text = "Evaluate"
self.btn_2_text = "Improve"
self.btn_1_func = self.evaluate_policy
self.btn_2_func = self.improve_policy
self.btn_3_func = self.move_by_policy_iteration
self.canvas = self._build_canvas()
self.text_reward(2, 2, "R : 1.0")
self.text_reward(1, 2, "R : -1.0")
self.text_reward(2, 1, "R : -1.0")
def _build_canvas(self):
canvas = tk.Canvas(self, bg='white', height=HEIGHT * UNIT, width=WIDTH * UNIT)
# buttons
iteration_button = Button(self, text=self.btn_1_text, command=self.btn_1_func)
iteration_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.13, HEIGHT * UNIT + 10, window=iteration_button)
policy_button = Button(self, text=self.btn_2_text, command=self.btn_2_func)
policy_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.37, HEIGHT * UNIT + 10, window=policy_button)
move_button = Button(self, text="move", command=self.btn_3_func)
move_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.62, HEIGHT * UNIT + 10, window=move_button)
reset_button = Button(self, text="reset", command=self.reset)
reset_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.87, HEIGHT * UNIT + 10, window=reset_button)
# create grids
for col in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = col, 0, col, HEIGHT * UNIT
canvas.create_line(x0, y0, x1, y1)
for row in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = 0, row, HEIGHT * UNIT, row
canvas.create_line(x0, y0, x1, y1)
# add img to canvas
self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])
canvas.create_image(250, 150, image=self.shapes[1])
canvas.create_image(150, 250, image=self.shapes[1])
canvas.create_image(250, 250, image=self.shapes[2])
# pack all
canvas.pack()
return canvas
def load_images(self):
up = PhotoImage(Image.open("../img/up.png").resize((13, 13)))
right = PhotoImage(Image.open("../img/right.png").resize((13, 13)))
left = PhotoImage(Image.open("../img/left.png").resize((13, 13)))
down = PhotoImage(Image.open("../img/down.png").resize((13, 13)))
rectangle = PhotoImage(Image.open("../img/rectangle.png").resize((65, 65)))
triangle = PhotoImage(Image.open("../img/triangle.png").resize((65, 65)))
circle = PhotoImage(Image.open("../img/circle.png").resize((65, 65)))
return (up, down, left, right), (rectangle, triangle, circle)
def reset(self):
if self.is_moving == 0:
self.evaluation_count = 0
self.improvement_count = 0
for i in self.texts:
self.canvas.delete(i)
for i in self.arrows:
self.canvas.delete(i)
self.agent.value_table = [[0.0] * WIDTH for _ in range(HEIGHT)]
self.agent.policy_table = ([[[0.25, 0.25, 0.25, 0.25]] * WIDTH
for _ in range(HEIGHT)])
self.agent.policy_table[2][2] = []
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
def text_value(self, row, col, contents, font='Helvetica', size=15, style='normal', anchor="nw"):
origin_x, origin_y = 37, 39
x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents, font=font, anchor=anchor)
return self.texts.append(text)
def text_reward(self, row, col, contents, font='Helvetica', size=10, style='normal', anchor="nw"):
origin_x, origin_y = 5, 5
x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents, font=font, anchor=anchor)
return self.texts.append(text)
def rectangle_move(self, action):
base_action = np.array([0, 0])
location = self.find_rectangle()
self.render()
if action == 0 and location[0] > 0: # up
base_action[1] -= UNIT
elif action == 1 and location[0] < HEIGHT - 1: # down
base_action[1] += UNIT
elif action == 2 and location[1] > 0: # left
base_action[0] -= UNIT
elif action == 3 and location[1] < WIDTH - 1: # right
base_action[0] += UNIT
# move agent
self.canvas.move(self.rectangle, base_action[0], base_action[1])
def find_rectangle(self):
temp = self.canvas.coords(self.rectangle)
x = (temp[0] / 100) - 0.5
y = (temp[1] / 100) - 0.5
return int(y), int(x)
def print_value_table(self, value_table):
for i in range(WIDTH):
for j in range(HEIGHT):
self.text_value(i, j, value_table[i][j])
def render(self):
time.sleep(0.1)
self.canvas.tag_raise(self.rectangle)
self.update()
def move_by_policy_iteration(self):
if self.improvement_count != 0 and self.is_moving != 1:
self.is_moving = 1
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
x, y = self.find_rectangle()
while len(self.agent.policy_table[x][y]) != 0:
self.after(100, self.rectangle_move(self.agent.get_action([x, y])))
x, y = self.find_rectangle()
self.is_moving = 0
def draw_one_arrow(self, col, row, policy):
if col == 2 and row == 2:
return
if policy[0] > 0: # up
origin_x, origin_y = 50 + (UNIT * row), 10 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.up))
if policy[1] > 0: # down
origin_x, origin_y = 50 + (UNIT * row), 90 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.down))
if policy[2] > 0: # left
origin_x, origin_y = 10 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.left))
if policy[3] > 0: # right
origin_x, origin_y = 90 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.right))
def draw_from_policy(self, policy_table):
for i in range(HEIGHT):
for j in range(WIDTH):
self.draw_one_arrow(i, j, policy_table[i][j])
def evaluate_policy(self):
self.iter_count += 1
for i in self.texts:
self.canvas.delete(i)
self.agent.policy_evaluation()
self.print_value_table(self.agent.value_table)
def improve_policy(self):
self.improvement_count += 1
for i in self.arrows:
self.canvas.delete(i)
self.agent.policy_improvement()
self.draw_from_policy(self.agent.policy_table)
class Env:
def __init__(self):
self.transition_probability = TRANSITION_PROB
self.width = WIDTH
self.height = HEIGHT
self.reward = [[0] * WIDTH for _ in range(HEIGHT)]
self.possible_actions = POSSIBLE_ACTIONS
self.reward[2][2] = 1 # reward 1 for circle
self.reward[1][2] = -1 # reward -1 for triangle
self.reward[2][1] = -1 # reward -1 for triangle
self.all_states = []
for x in range(WIDTH):
for y in range(HEIGHT):
state = [x, y]
self.all_states.append(state)
def get_next_state_and_reward(self, state, action_index):
action = ACTIONS[action_index]
next_state = self.check_boundary([state[0] + action[0], state[1] + action[1]])
reward = self.reward[next_state[0]][next_state[1]]
return next_state, reward
@staticmethod
def check_boundary(state):
state[0] = (0 if state[0] < 0 else WIDTH - 1 if state[0] > WIDTH - 1 else state[0])
state[1] = (0 if state[1] < 0 else HEIGHT - 1 if state[1] > HEIGHT - 1 else state[1])
return state
def get_transition_prob(self, state, action):
return self.transition_probability
| 39.932773 | 102 | 0.592698 |
aceac7ddbd43ca237d6e1d82141d40b9d3e83a26 | 8,918 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20190601/network_profile.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/network/v20190601/network_profile.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/network/v20190601/network_profile.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NetworkProfile']
class NetworkProfile(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_network_interface_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerNetworkInterfaceConfigurationArgs']]]]] = None,
container_network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerNetworkInterfaceArgs']]]]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
network_profile_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Network profile resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerNetworkInterfaceConfigurationArgs']]]] container_network_interface_configurations: List of chid container network interface configurations.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerNetworkInterfaceArgs']]]] container_network_interfaces: List of child container network interfaces.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_profile_name: The name of the network profile.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['container_network_interface_configurations'] = container_network_interface_configurations
__props__['container_network_interfaces'] = container_network_interfaces
__props__['etag'] = etag
__props__['id'] = id
__props__['location'] = location
__props__['network_profile_name'] = network_profile_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/latest:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20180801:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20181001:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20181101:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20181201:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20190201:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20190401:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20190701:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20190801:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20190901:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20191101:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20200701:NetworkProfile"), pulumi.Alias(type_="azure-nextgen:network/v20200801:NetworkProfile")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NetworkProfile, __self__).__init__(
'azure-nextgen:network/v20190601:NetworkProfile',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkProfile':
"""
Get an existing NetworkProfile resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return NetworkProfile(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="containerNetworkInterfaceConfigurations")
def container_network_interface_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.ContainerNetworkInterfaceConfigurationResponse']]]:
"""
List of chid container network interface configurations.
"""
return pulumi.get(self, "container_network_interface_configurations")
@property
@pulumi.getter(name="containerNetworkInterfaces")
def container_network_interfaces(self) -> pulumi.Output[Optional[Sequence['outputs.ContainerNetworkInterfaceResponse']]]:
"""
List of child container network interfaces.
"""
return pulumi.get(self, "container_network_interfaces")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the network interface resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 49.270718 | 1,370 | 0.682216 |
aceac815f6b1d315b6fd83959f421e121ad82029 | 2,261 | py | Python | backup/overwatch.py | katlings/adafruit | dd906382a40bf1f83997be508c04c8372a7a250d | [
"MIT"
] | null | null | null | backup/overwatch.py | katlings/adafruit | dd906382a40bf1f83997be508c04c8372a7a250d | [
"MIT"
] | null | null | null | backup/overwatch.py | katlings/adafruit | dd906382a40bf1f83997be508c04c8372a7a250d | [
"MIT"
] | null | null | null | # Gemma IO demo
# Welcome to CircuitPython 2.2.4 :)
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
from digitalio import DigitalInOut, Direction, Pull
from analogio import AnalogIn, AnalogOut
from touchio import TouchIn
import adafruit_dotstar as dotstar
import microcontroller
import board
import time
import neopixel
# screen /dev/ttyusbmodem1
# One pixel connected internally!
dot = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1)
dot.brightness = 0
dot.show()
# Built in red LED
led = DigitalInOut(board.D13)
led.direction = Direction.OUTPUT
# Analog output on A0
aout = AnalogOut(board.A0)
# Analog input on A1
analog1in = AnalogIn(board.A1)
# Capacitive touch on A2
touch2 = TouchIn(board.A2)
# Used if we do HID output, see below
kbd = Keyboard()
######################### HELPERS ##############################
# Helper to convert analog input to voltage
def getVoltage(pin):
return (pin.value * 3.3) / 65536
# Helper to give us a nice color swirl
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if (pos < 0):
return [0, 0, 0]
if (pos > 255):
return [0, 0, 0]
if (pos < 85):
return [int(pos * 3), int(255 - (pos*3)), 0]
elif (pos < 170):
pos -= 85
return [int(255 - pos*3), 0, int(pos*3)]
else:
pos -= 170
return [0, int(pos*3), int(255 - pos*3)]
######################### MAIN LOOP ##############################
i = 0
while True:
# spin internal LED around!
dot[0] = wheel(i)
dot.show()
# set analog output to 0-3.3V (0-65535 in increments)
aout.value = i * 256
# once every 256 ticks, so it doesnt rush by!
if i == 0:
# Read analog voltage on A1
print("A1: %0.2f" % getVoltage(analog1in))
# Print the temperature
print("Temp: %0.1f" % microcontroller.cpu.temperature)
# use A2 as capacitive touch to turn on internal LED
if touch2.value:
print("A2 touched!")
# optional! uncomment below & save to have it sent a keypress
kbd.press(Keycode.A)
kbd.release_all()
time.sleep(1)
led.value = touch2.value
i = (i+1) % 256 # run from 0 to 255
| 25.122222 | 69 | 0.620964 |
aceac83d2bf17f60b9735ac6907939a8452ce5f3 | 1,164 | py | Python | test/network/explorers/bitcoin/bitaps_explorer_test.py | khand100/CypherpunkPay | 855c01df1093f21b33d1f096d737be1472df4985 | [
"MIT",
"Unlicense"
] | 44 | 2021-06-25T16:40:52.000Z | 2022-03-24T08:17:49.000Z | test/network/explorers/bitcoin/bitaps_explorer_test.py | khand100/CypherpunkPay | 855c01df1093f21b33d1f096d737be1472df4985 | [
"MIT",
"Unlicense"
] | 25 | 2021-07-20T04:36:08.000Z | 2022-03-10T14:00:53.000Z | test/network/explorers/bitcoin/bitaps_explorer_test.py | khand100/CypherpunkPay | 855c01df1093f21b33d1f096d737be1472df4985 | [
"MIT",
"Unlicense"
] | 5 | 2021-06-29T23:17:06.000Z | 2022-01-25T22:55:43.000Z | from test.network.explorers.bitcoin.block_explorer_test import BlockExplorerTest
from cypherpunkpay.explorers.bitcoin.bitaps_explorer import BitapsExplorer
class BitapsExplorerTest(BlockExplorerTest):
def test_get_height_mainnet(self):
be = BitapsExplorer(self.tor_http_client, btc_network='mainnet')
self.assert_btc_mainnet_height(be)
def test_get_height_testnet(self):
be = BitapsExplorer(self.tor_http_client, btc_network='testnet')
self.assert_btc_testnet_height(be)
def test_get_address_credits_mainnet(self):
be = BitapsExplorer(self.tor_http_client, btc_network='mainnet')
credits = be.get_address_credits(
address='bc1qwqdg6squsna38e46795at95yu9atm8azzmyvckulcc7kytlcckxswvvzej',
current_height=0
)
self.assertNotEmpty(credits.any())
def test_get_address_credits_testnet(self):
be = BitapsExplorer(self.tor_http_client, btc_network='testnet')
credits = be.get_address_credits(
address='tb1q4cnvakxhuwrlfesn5uvj4haqp83t6zvpsxwzv8',
current_height=0
)
self.assertEqual(1, len(credits.any()))
| 38.8 | 85 | 0.733677 |
aceacace7b1b5a47c00ddb2c8a0621d51b65654b | 2,534 | py | Python | pay-api/migrations/versions/92fed98da61c_create_statement_tables.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | pay-api/migrations/versions/92fed98da61c_create_statement_tables.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | pay-api/migrations/versions/92fed98da61c_create_statement_tables.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | """create statement tables
Revision ID: 92fed98da61c
Revises: 7231303dabdf
Create Date: 2020-08-13 06:39:51.780897
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '92fed98da61c'
down_revision = '7231303dabdf'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('statement',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('frequency', sa.String(length=50), nullable=True),
sa.Column('payment_account_id', sa.Integer(), nullable=True),
sa.Column('from_date', sa.Date(), nullable=False),
sa.Column('to_date', sa.Date(), nullable=True),
sa.Column('status', sa.String(length=50), nullable=True),
sa.ForeignKeyConstraint(['payment_account_id'], ['payment_account.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_statement_payment_account_id'), 'statement', ['payment_account_id'], unique=False)
op.create_index(op.f('ix_statement_frequency'), 'statement', ['frequency'], unique=False)
op.create_index(op.f('ix_statement_status'), 'statement', ['status'], unique=False)
op.create_table('statement_invoices',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('statement_id', sa.Integer(), nullable=False),
sa.Column('inovice_id', sa.Integer(), nullable=False),
sa.Column('status', sa.String(length=50), nullable=True),
sa.ForeignKeyConstraint(['inovice_id'], ['invoice.id'], ),
sa.ForeignKeyConstraint(['statement_id'], ['statement.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_statement_invoices_statement_id'), 'statement_invoices', ['statement_id'], unique=False)
op.create_index(op.f('ix_statement_invoices_status'), 'statement_invoices', ['status'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_statement_invoices_status'), table_name='statement_invoices')
op.drop_index(op.f('ix_statement_invoices_statement_id'), table_name='statement_invoices')
op.drop_table('statement_invoices')
op.drop_index(op.f('ix_statement_status'), table_name='statement')
op.drop_index(op.f('ix_statement_frequency'), table_name='statement')
op.drop_index(op.f('ix_statement_payment_account_id'), table_name='statement')
op.drop_table('statement')
# ### end Alembic commands ###
| 43.689655 | 117 | 0.71547 |
aceacaef58046537fb391fb7f8af9a242bf508df | 2,306 | py | Python | src/oci/operator_access_control/models/reject_access_request_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/operator_access_control/models/reject_access_request_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/operator_access_control/models/reject_access_request_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class RejectAccessRequestDetails(object):
"""
Summary of why the access request was rejected.
"""
def __init__(self, **kwargs):
"""
Initializes a new RejectAccessRequestDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param approver_comment:
The value to assign to the approver_comment property of this RejectAccessRequestDetails.
:type approver_comment: str
"""
self.swagger_types = {
'approver_comment': 'str'
}
self.attribute_map = {
'approver_comment': 'approverComment'
}
self._approver_comment = None
@property
def approver_comment(self):
"""
Gets the approver_comment of this RejectAccessRequestDetails.
Comment by the approver explaining why the request is rejected.
:return: The approver_comment of this RejectAccessRequestDetails.
:rtype: str
"""
return self._approver_comment
@approver_comment.setter
def approver_comment(self, approver_comment):
"""
Sets the approver_comment of this RejectAccessRequestDetails.
Comment by the approver explaining why the request is rejected.
:param approver_comment: The approver_comment of this RejectAccessRequestDetails.
:type: str
"""
self._approver_comment = approver_comment
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32.478873 | 245 | 0.688205 |
aceacbe9fbb3a779a47652d254c617263b76474d | 712 | py | Python | webapp/core/migrations/0002_auto_20210120_1612.py | Bruno-Odinukweze/License-Parser | 247898e424a66a4d86f90b9db6f822ea4e836f2d | [
"BSD-Source-Code"
] | null | null | null | webapp/core/migrations/0002_auto_20210120_1612.py | Bruno-Odinukweze/License-Parser | 247898e424a66a4d86f90b9db6f822ea4e836f2d | [
"BSD-Source-Code"
] | null | null | null | webapp/core/migrations/0002_auto_20210120_1612.py | Bruno-Odinukweze/License-Parser | 247898e424a66a4d86f90b9db6f822ea4e836f2d | [
"BSD-Source-Code"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-20 15:12
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='license',
name='driver_license_image',
field=models.ImageField(upload_to=core.models.PathRename('b6fadd24491247d1b11f625c1d36f1e7', 'driver_image')),
),
migrations.AlterField(
model_name='license',
name='pco_license_image',
field=models.ImageField(upload_to=core.models.PathRename('b6fadd24491247d1b11f625c1d36f1e7', 'pco_image')),
),
]
| 28.48 | 122 | 0.644663 |
aceacc0d175b286bf7ce90590cb58e0138ab166a | 12,153 | py | Python | src/olympia/translations/fields.py | Osmose/olympia | 774c3b927ec05ef971e4206e2669b4291b8b4f17 | [
"BSD-3-Clause"
] | 1 | 2020-04-07T07:21:25.000Z | 2020-04-07T07:21:25.000Z | src/olympia/translations/fields.py | Osmose/olympia | 774c3b927ec05ef971e4206e2669b4291b8b4f17 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/translations/fields.py | Osmose/olympia | 774c3b927ec05ef971e4206e2669b4291b8b4f17 | [
"BSD-3-Clause"
] | 2 | 2018-03-04T00:11:22.000Z | 2019-12-14T09:45:55.000Z | from django import forms
from django.conf import settings
from django.db import models
from django.db.models.fields import related
from django.utils import translation as translation_utils
from django.utils.translation.trans_real import to_language
from .hold import add_translation, make_key, save_translations
from .models import (Translation, PurifiedTranslation, LinkifiedTranslation,
NoLinksTranslation, NoLinksNoMarkupTranslation)
from .widgets import TransInput, TransTextarea
class TranslatedField(models.ForeignKey):
"""
A foreign key to the translations table.
If require_locale=False, the fallback join will not use a locale. Instead,
we will look for 1) a translation in the current locale and 2) fallback
with any translation matching the foreign key.
"""
to = Translation
requires_unique_target = False
def __init__(self, **kwargs):
# to_field: The field on the related object that the relation is to.
# Django wants to default to translations.autoid, but we need id.
options = dict(null=True, to_field='id', unique=True, blank=True,
on_delete=models.SET_NULL)
kwargs.update(options)
self.short = kwargs.pop('short', True)
self.require_locale = kwargs.pop('require_locale', True)
# "to" is passed here from the migration framework; we ignore it
# since it's the same for every instance.
kwargs.pop('to', None)
super(TranslatedField, self).__init__(self.to, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(TranslatedField, self).deconstruct()
kwargs['to'] = self.to
kwargs['short'] = self.short
kwargs['require_locale'] = self.require_locale
return (name, path, args, kwargs)
@property
def db_column(self):
# Django wants to call the db_column ('%s_id' % self.name), but our
# translations foreign keys aren't set up that way.
return self._db_column if hasattr(self, '_db_column') else self.name
@db_column.setter
def db_column(self, value):
# Django sets db_column=None to initialize it. I don't think anyone
# would set the db_column otherwise.
if value is not None:
self._db_column = value
def contribute_to_class(self, cls, name):
"""Add this Translation to ``cls._meta.translated_fields``."""
super(TranslatedField, self).contribute_to_class(cls, name)
# Add self to the list of translated fields.
if hasattr(cls._meta, 'translated_fields'):
cls._meta.translated_fields.append(self)
else:
cls._meta.translated_fields = [self]
# Set up a unique related name. The + means it's hidden.
self.rel.related_name = '%s_%s_set+' % (cls.__name__, name)
# Replace the normal descriptor with our custom descriptor.
setattr(cls, self.name, TranslationDescriptor(self))
def formfield(self, **kw):
widget = TransInput if self.short else TransTextarea
defaults = {'form_class': TransField, 'widget': widget}
defaults.update(kw)
return super(TranslatedField, self).formfield(**defaults)
def validate(self, value, model_instance):
# Skip ForeignKey.validate since that expects only one Translation when
# doing .get(id=id)
return models.Field.validate(self, value, model_instance)
class PurifiedField(TranslatedField):
to = PurifiedTranslation
class LinkifiedField(TranslatedField):
to = LinkifiedTranslation
class NoLinksField(TranslatedField):
to = NoLinksTranslation
class NoLinksNoMarkupField(TranslatedField):
to = NoLinksNoMarkupTranslation
def switch(obj, new_model):
"""Switch between Translation and Purified/Linkified Translations."""
fields = [(f.name, getattr(obj, f.name)) for f in new_model._meta.fields]
return new_model(**dict(fields))
def save_on_signal(obj, trans):
"""Connect signals so the translation gets saved during obj.save()."""
signal = models.signals.pre_save
def cb(sender, instance, **kw):
if instance is obj:
is_new = trans.autoid is None
trans.save(force_insert=is_new, force_update=not is_new)
signal.disconnect(cb)
signal.connect(cb, sender=obj.__class__, weak=False)
class TranslationDescriptor(related.ReverseSingleRelatedObjectDescriptor):
"""
Descriptor that handles creating and updating Translations given strings.
"""
def __init__(self, field):
super(TranslationDescriptor, self).__init__(field)
self.model = field.rel.to
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# If Django doesn't find find the value in the cache (which would only
# happen if the field was set or accessed already), it does a db query
# to follow the foreign key. We expect translations to be set by
# queryset transforms, so doing a query is the wrong thing here.
try:
return getattr(instance, self.field.get_cache_name())
except AttributeError:
return None
def __set__(self, instance, value):
lang = translation_utils.get_language()
if isinstance(value, basestring):
value = self.translation_from_string(instance, lang, value)
elif hasattr(value, 'items'):
value = self.translation_from_dict(instance, lang, value)
# Don't let this be set to None, because Django will then blank out the
# foreign key for this object. That's incorrect for translations.
if value is not None:
# We always get these back from the database as Translations, but
# we may want them to be a more specific Purified/Linkified child
# class.
if not isinstance(value, self.model):
value = switch(value, self.model)
super(TranslationDescriptor, self).__set__(instance, value)
elif getattr(instance, self.field.attname, None) is None:
super(TranslationDescriptor, self).__set__(instance, None)
def translation_from_string(self, instance, lang, string):
"""Create, save, and return a Translation from a string."""
try:
trans = getattr(instance, self.field.name)
trans_id = getattr(instance, self.field.attname)
if trans is None and trans_id is not None:
# This locale doesn't have a translation set, but there are
# translations in another locale, so we have an id already.
translation = self.model.new(string, lang, id=trans_id)
elif to_language(trans.locale) == lang.lower():
# Replace the translation in the current language.
trans.localized_string = string
translation = trans
else:
# We already have a translation in a different language.
translation = self.model.new(string, lang, id=trans.id)
except AttributeError:
# Create a brand new translation.
translation = self.model.new(string, lang)
# A new translation has been created and it might need to be saved.
# This adds the translation to the queue of translation that need
# to be saved for this instance.
add_translation(make_key(instance), translation)
return translation
def translation_from_dict(self, instance, lang, dict_):
"""
Create Translations from a {'locale': 'string'} mapping.
If one of the locales matches lang, that Translation will be returned.
"""
from olympia.amo.utils import to_language as amo_to_language
rv = None
for locale, string in dict_.items():
loc = amo_to_language(locale)
if loc not in settings.AMO_LANGUAGES + settings.HIDDEN_LANGUAGES:
continue
# The Translation is created and saved in here.
trans = self.translation_from_string(instance, locale, string)
# Set the Translation on the object because translation_from_string
# doesn't expect Translations to be created but not attached.
self.__set__(instance, trans)
# If we're setting the current locale, set it to the object so
# callers see the expected effect.
if to_language(locale) == lang:
rv = trans
return rv
class _TransField(object):
def __init__(self, *args, **kwargs):
self.default_locale = settings.LANGUAGE_CODE
for k in ('queryset', 'to_field_name', 'limit_choices_to'):
if k in kwargs:
del kwargs[k]
self.widget = kwargs.pop('widget', TransInput)
# XXX: Figure out why this is being forwarded here (cgrebs)
# It's empty and not supported by CharField (-> TransField)
kwargs.pop('limit_choices_to', None)
super(_TransField, self).__init__(*args, **kwargs)
def clean(self, value):
errors = LocaleList()
value = dict((k, v.strip() if v else v) for (k, v) in value.items())
# Raise an exception if the default locale is required and not present
if self.default_locale.lower() not in value:
value[self.default_locale.lower()] = None
# Now, loop through them and validate them separately.
for locale, val in value.items():
try:
# Only the default locale can be required; all non-default
# fields are automatically optional.
if self.default_locale.lower() == locale:
super(_TransField, self).validate(val)
super(_TransField, self).run_validators(val)
except forms.ValidationError, e:
errors.extend(e.messages, locale)
if errors:
raise LocaleValidationError(errors)
return value
def _has_changed(self, initial, data):
# This used to be called on the field's widget and always returned
# False!
return False
class LocaleValidationError(forms.ValidationError):
def __init__(self, messages, code=None, params=None):
self.msgs = messages
@property
def messages(self):
return self.msgs
class TransField(_TransField, forms.CharField):
"""
A CharField subclass that can deal with multiple locales.
Most validators are run over the data for each locale. The required
validator is only run on the default_locale, which is hooked up to the
instance with TranslationFormMixin.
"""
@staticmethod
def adapt(cls, opts=None):
"""Get a new TransField that subclasses cls instead of CharField."""
if opts is None:
opts = {}
return type('Trans%s' % cls.__name__, (_TransField, cls), opts)
# Subclass list so that isinstance(list) in Django works.
class LocaleList(dict):
"""
List-like objects that maps list elements to a locale.
>>> LocaleList([1, 2], 'en')
[1, 2]
['en', 'en']
This is useful for validation error lists where we want to associate an
error with a locale.
"""
def __init__(self, seq=None, locale=None):
self.seq, self.locales = [], []
if seq:
assert seq and locale
self.extend(seq, locale)
def __iter__(self):
return iter(self.zip())
def extend(self, seq, locale):
self.seq.extend(seq)
self.locales.extend([locale] * len(seq))
def __nonzero__(self):
return bool(self.seq)
def __contains__(self, item):
return item in self.seq
def zip(self):
return zip(self.locales, self.seq)
def save_signal(sender, instance, **kw):
"""
Use this signal on a model to iterate through all the translations added
to the hold queue and save them all. Hook this up to the pre_save signal
on the model.
"""
if not kw.get('raw'):
save_translations(make_key(instance))
| 36.716012 | 79 | 0.647988 |
aceacc4eec5ea74de791a213e90bc644be1243d9 | 6,716 | py | Python | t/test_020_api.py | bodgery/doorbot-server | aa7f4b02ff083d11675b542ba71af970d01cbd62 | [
"BSD-2-Clause"
] | null | null | null | t/test_020_api.py | bodgery/doorbot-server | aa7f4b02ff083d11675b542ba71af970d01cbd62 | [
"BSD-2-Clause"
] | 3 | 2021-10-14T15:21:26.000Z | 2021-10-14T15:31:30.000Z | t/test_020_api.py | bodgery/doorbot-server | aa7f4b02ff083d11675b542ba71af970d01cbd62 | [
"BSD-2-Clause"
] | null | null | null | import unittest
import flask_unittest
import flask.globals
from flask import json
import os
import psycopg2
import re
import sqlite3
import Doorbot.Config
import Doorbot.DB as DB
import Doorbot.DBSqlite3
import Doorbot.API
class TestAPI( flask_unittest.ClientTestCase ):
app = Doorbot.API.app
@classmethod
def setUpClass( cls ):
if 'PG' == os.environ.get( 'DB' ):
pg_conf = Doorbot.Config.get( 'postgresql' )
user = pg_conf[ 'username' ]
passwd = pg_conf[ 'passwd' ]
database = pg_conf[ 'database' ]
conn_str = ' '.join([
'dbname=' + database,
'user=' + user,
'password=' + passwd,
])
conn = psycopg2.connect( conn_str )
conn.set_session( autocommit = True )
DB.set_db( conn )
else:
conn = sqlite3.connect( ':memory:', isolation_level = None )
DB.set_db( conn )
DB.set_sqlite()
Doorbot.DBSqlite3.create()
@classmethod
def tearDownClass( cls ):
DB.close()
def test_check_tag( self, client ):
DB.add_member( "Foo Bar", "1234" )
DB.add_member( "Foo Baz", "4321" )
DB.deactivate_member( "4321" )
rv = client.get( '/check_tag/1234' )
self.assertStatus( rv, 200 )
rv = client.get( '/check_tag/4321' )
self.assertStatus( rv, 403 )
rv = client.get( '/check_tag/1111' )
self.assertStatus( rv, 404 )
rv = client.get( '/check_tag/foobar' )
self.assertStatus( rv, 400 )
def test_entry_location( self, client ):
DB.add_member( "Bar Baz", "5678" )
DB.add_member( "Bar Qux", "8765" )
DB.deactivate_member( "8765" )
rv = client.get( '/entry/5678/cleanroom.door' )
self.assertStatus( rv, 200 )
rv = client.get( '/entry/8765/cleanroom.door' )
self.assertStatus( rv, 403 )
rv = client.get( '/entry/1111/cleanroom.door' )
self.assertStatus( rv, 404 )
rv = client.get( '/entry/foobar/cleanroom.door' )
self.assertStatus( rv, 400 )
def test_add_tag( self, client ):
rv = client.get( '/check_tag/9012' )
self.assertStatus( rv, 404 )
rv = client.put( '/secure/new_tag/9012/foo' )
self.assertStatus( rv, 201 )
rv = client.get( '/check_tag/9012' )
self.assertStatus( rv, 200 )
def test_activate_deactivate_member( self, client ):
DB.add_member( "Qux Quux", "0123" )
rv = client.get( '/check_tag/0123' )
self.assertStatus( rv, 200 )
rv = client.post( '/secure/deactivate_tag/0123' )
self.assertStatus( rv, 200 )
rv = client.get( '/check_tag/0123' )
self.assertStatus( rv, 403 )
rv = client.post( '/secure/reactivate_tag/0123' )
self.assertStatus( rv, 200 )
rv = client.get( '/check_tag/0123' )
self.assertStatus( rv, 200 )
def test_search_tags( self, client ):
DB.add_member( "Bar Quuux", "09865" )
DB.add_member( "Bar Quuuux", "98764" )
DB.add_member( "Baz Quuux", "87653" )
DB.add_member( "Baz Quuuux", "76542" )
match_bar = re.compile( '.*,.*Bar.*', re.MULTILINE | re.DOTALL | re.I )
match_quuux = re.compile( '.*,.*quuux.*', flags = re.I )
rv = client.get( '/secure/search_tags?name=Bar&offset=0&limit=1' )
data = rv.data.decode( "UTF-8" )
self.assertTrue(
match_bar.match( data ),
"Matched bar",
)
rv = client.get( '/secure/search_tags?name=bar&offset=0&limit=1' )
data = rv.data.decode( "UTF-8" )
self.assertTrue(
match_bar.match( data ),
"Matched bar in a case insensitive way",
)
rv = client.get( '/secure/search_tags?name=quuux&offset=0&limit=1' )
data = rv.data.decode( "UTF-8" )
self.assertTrue(
match_quuux.match( data ),
"Matched quuux in a case insensitive way",
)
def test_search_entry_log( self, client ):
DB.add_member( "Bar Quuux", "09876" )
DB.log_entry( "09876", "cleanroom.door", True, True )
match_cleanroom = re.compile( '.*,cleanroom\.door.*',
re.MULTILINE | re.DOTALL )
rv = client.get( '/secure/search_entry_log?tag=09876&offset=0&limit=1' )
data = rv.data.decode( "UTF-8" )
self.assertTrue(
match_cleanroom.match( data ),
"Matched RFID tag",
)
# Test for blank location
DB.log_entry( "09876", None, True, True )
rv = client.get( '/secure/search_entry_log' )
data = rv.data.decode( "UTF-8" )
self.assertTrue(
match_cleanroom.match( data ),
"Matched bar",
)
def test_dump_tags( self, client ):
DB.add_member( "Qux Quuux", "45321" )
DB.add_member( "Qux Quuuux", "12354" )
DB.deactivate_member( "12354" )
rv = client.get( '/secure/dump_active_tags' )
data = rv.data.decode( "UTF-8" )
data = json.loads( data )
self.assertTrue(
"45321" in data,
"Fetched active member",
)
self.assertFalse(
"12354" in data,
"Did not fetch deactivated member",
)
def test_edit_tag( self, client ):
rv = client.get( '/check_tag/09017' )
self.assertStatus( rv, 404 )
rv = client.get( '/check_tag/19017' )
self.assertStatus( rv, 404 )
# Create tag
rv = client.put( '/secure/new_tag/09017/foo' )
self.assertStatus( rv, 201 )
rv = client.get( '/check_tag/09017' )
self.assertStatus( rv, 200 )
rv = client.get( '/check_tag/19017' )
self.assertStatus( rv, 404 )
# Edit tag
rv = client.post( '/secure/edit_tag/09017/19017' )
self.assertStatus( rv, 201 )
rv = client.get( '/check_tag/09017' )
self.assertStatus( rv, 404 )
rv = client.get( '/check_tag/19017' )
self.assertStatus( rv, 200 )
def test_edit_name( self, client ):
rv = client.get( '/check_tag/29017' )
self.assertStatus( rv, 404 )
rv = client.get( '/check_tag/29017' )
self.assertStatus( rv, 404 )
# Create tag
rv = client.put( '/secure/new_tag/29017/foo' )
self.assertStatus( rv, 201 )
rv = client.get( '/check_tag/29017' )
self.assertStatus( rv, 200 )
# Edit tag
rv = client.post( '/secure/edit_name/29017/bar' )
self.assertStatus( rv, 201 )
if __name__ == '__main__':
unittest.main()
| 29.716814 | 80 | 0.556432 |
aceaccdddc73519c01245db40493715382641d85 | 685 | py | Python | tests/journal.ext/info_file.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | tests/journal.ext/info_file.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | tests/journal.ext/info_file.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2022 all rights reserved
def test():
"""
Send info channel output to a log file
"""
# get the channel
from journal.ext.journal import Informational as info
# send all output to a file
info.logfile("info_file.log")
# make an info channel
channel = info(name="tests.journal.info")
# add some metadata
channel.notes["time"] = "now"
# inject
channel.line("info channel:")
channel.log(" hello world!")
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| 18.026316 | 57 | 0.613139 |
aceace4d2edd5d838147ac08fdc008677898ac3a | 8,338 | py | Python | infer/lib/python/inferlib/capture/gradle.py | sangwoo-joh/infer | 598a4d5a5579a92bf51ee5cad5dbe5a36edfa2d0 | [
"MIT"
] | 30 | 2019-06-28T15:20:25.000Z | 2021-08-25T11:37:30.000Z | infer/lib/python/inferlib/capture/gradle.py | sangwoo-joh/infer | 598a4d5a5579a92bf51ee5cad5dbe5a36edfa2d0 | [
"MIT"
] | 39 | 2019-06-28T15:08:12.000Z | 2020-07-17T01:44:33.000Z | infer/lib/python/inferlib/capture/gradle.py | sangwoo-joh/infer | 598a4d5a5579a92bf51ee5cad5dbe5a36edfa2d0 | [
"MIT"
] | 17 | 2019-06-28T14:40:41.000Z | 2021-03-11T19:26:13.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
import util
import tempfile
MODULE_NAME = __name__
MODULE_DESCRIPTION = '''Run analysis of code built with a command like:
gradle [options] [task]
Analysis examples:
infer -- gradle build
infer -- ./gradlew build'''
LANG = ['java']
def gen_instance(*args):
return GradleCapture(*args)
# This creates an empty argparser for the module, which provides only
# description/usage information and no arguments.
create_argparser = util.base_argparser(MODULE_DESCRIPTION, MODULE_NAME)
def extract_filepath(parts):
size = len(parts)
pos = size - 1
while pos >= 0:
path = ' '.join(itertools.islice(parts, pos, None))
if os.path.isfile(path):
return parts[:pos], path
pos -= 1
return parts, None
def pop(the_list):
if len(the_list) > 0:
return the_list.pop()
return None
def extract_argfiles_from_rev(javac_arguments):
"""Extract class names and @argfiles from the reversed list."""
# Reverse the list, so it's in a natural order now
javac_arguments = list(reversed(javac_arguments))
java_opts = []
saved = []
java_arg = pop(javac_arguments)
while java_arg is not None:
if java_arg.startswith('@'):
# Probably got an @argfile
path = ' '.join([java_arg[1:]] + saved)
if os.path.isfile(path):
java_opts.insert(0, '@' + path)
saved = []
else:
# @ at the middle of the path
saved.insert(0, java_arg)
else:
# Either a class name or a part of the @argfile path
saved.insert(0, java_arg)
java_arg = pop(javac_arguments)
# Only class names left
java_opts[0:0] = saved
return java_opts
# Please run the doctests using:
# $ python -m doctest -v gradle.py
def extract_all(javac_arguments):
"""Extract Java filenames and Javac options from the Javac arguments.
>>> os.path.isfile = lambda s: s[1:].startswith('path/to/')
>>> extract_all([])
{'files': [], 'opts': []}
>>> extract_all(['-opt1', 'optval1', '/path/to/1.java'])
{'files': ['/path/to/1.java'], 'opts': ['-opt1', 'optval1']}
>>> extract_all(['-opt1', 'optval1', '/path/to/a', 'b/1.java'])
{'files': ['/path/to/a b/1.java'], 'opts': ['-opt1', 'optval1']}
>>> extract_all(['-opt1', 'opt', 'val1', '/path/to/1.java'])
{'files': ['/path/to/1.java'], 'opts': ['-opt1', 'opt val1']}
>>> extract_all(['-opt1', '/path/to/a', 'b/c', 'd/1.java', '-opt2'])
{'files': ['/path/to/a b/c d/1.java'], 'opts': ['-opt1', '-opt2']}
>>> extract_all(['-opt1', 'optval1', '-path/to/1.java'])
{'files': ['-path/to/1.java'], 'opts': ['-opt1', 'optval1']}
>>> extract_all(['-opt1', 'optval1', '/path/to/', '-1.java'])
{'files': ['/path/to/ -1.java'], 'opts': ['-opt1', 'optval1']}
>>> extract_all(['undef1', 'undef2'])
{'files': [], 'opts': ['undef1', 'undef2']}
>>> extract_all(['-o', '/path/to/1.java', 'cls.class', '@/path/to/1'])
{'files': ['/path/to/1.java'], 'opts': ['-o', 'cls.class', '@/path/to/1']}
>>> extract_all(['-opt1', 'optval1', '/path/to/1.java', 'cls.class'])
{'files': ['/path/to/1.java'], 'opts': ['-opt1', 'optval1', 'cls.class']}
>>> extract_all(['cls.class', '@/path/to/a', 'b.txt'])
{'files': [], 'opts': ['cls.class', '@/path/to/a b.txt']}
>>> extract_all(['cls.class', '@/path/to/a', '@b.txt'])
{'files': [], 'opts': ['cls.class', '@/path/to/a @b.txt']}
>>> v = extract_all(['-opt1', 'optval1'] * 1000 + ['/path/to/1.java'])
>>> len(v['opts'])
2000
"""
java_files = []
java_opts = []
# Reversed Javac options parameters
rev_opt_params = []
java_arg = pop(javac_arguments)
while java_arg is not None:
if java_arg.endswith('.java'):
# Probably got a file
remainder, path = extract_filepath(javac_arguments + [java_arg])
if path is not None:
java_files.append(path)
javac_arguments = remainder
# The file name can't be in the middle of the option
java_opts.extend(extract_argfiles_from_rev(rev_opt_params))
rev_opt_params = []
else:
# A use-case here: *.java dir as an option parameter
rev_opt_params.append(java_arg)
elif java_arg.startswith('-'):
# Got a Javac option
option = [java_arg]
if len(rev_opt_params) > 0:
option.append(' '.join(reversed(rev_opt_params)))
rev_opt_params = []
java_opts[0:0] = option
else:
# Got Javac option parameter
rev_opt_params.append(java_arg)
java_arg = pop(javac_arguments)
# We may have class names and @argfiles besides java files and options
java_opts.extend(extract_argfiles_from_rev(rev_opt_params))
return {'files': java_files, 'opts': java_opts}
def normalize(path):
from inferlib import utils
# From Javac docs: If a filename contains embedded spaces,
# put the whole filename in double quotes
quoted_path = path
if ' ' in path:
quoted_path = '"' + path + '"'
return utils.encode(quoted_path)
class GradleCapture:
def __init__(self, args, cmd):
from inferlib import config, utils
self.args = args
# TODO: make the extraction of targets smarter
self.build_cmd = [cmd[0], '--debug'] + cmd[1:]
# That contains javac version as well
version_str = util.run_cmd_ignore_fail([cmd[0], '--version'])
path = os.path.join(self.args.infer_out,
config.JAVAC_FILELISTS_FILENAME)
if not os.path.exists(path):
os.mkdir(path)
logging.info('Running with:\n' + utils.decode(version_str))
def get_infer_commands(self, verbose_output):
from inferlib import config, jwlib
argument_start_pattern = ' Compiler arguments: '
calls = []
seen_build_cmds = set([])
for line in verbose_output.split('\n'):
if argument_start_pattern in line:
content = line.partition(argument_start_pattern)[2].strip()
# if we're building both the debug and release configuration
# and the build commands are identical up to "release/debug",
# only do capture for one set of commands
build_agnostic_cmd = content.replace('release', 'debug')
if build_agnostic_cmd in seen_build_cmds:
continue
seen_build_cmds.add(build_agnostic_cmd)
arguments = content.split(' ')
# Note: do *not* try to filter out empty strings from the arguments (as was done
# here previously)! It will make compilation commands like
# `javac -classpath '' -Xmaxerrs 1000` fail with "Unrecognized option 1000"
extracted = extract_all(arguments)
java_files = extracted['files']
java_args = extracted['opts']
with tempfile.NamedTemporaryFile(
mode='w',
suffix='.txt',
prefix='gradle_',
dir=os.path.join(self.args.infer_out,
config.JAVAC_FILELISTS_FILENAME),
delete=False) as sources:
sources.write('\n'.join(map(normalize, java_files)))
sources.flush()
java_args.append('@' + sources.name)
capture = jwlib.create_infer_command(java_args)
calls.append(capture)
return calls
def capture(self):
print('Running and capturing gradle compilation...')
(build_code, (verbose_out, _)) = util.get_build_output(self.build_cmd)
cmds = self.get_infer_commands(verbose_out)
capture_code = util.run_compilation_commands(cmds)
if build_code != os.EX_OK:
return build_code
return capture_code
| 38.247706 | 96 | 0.578676 |
aceacede0f1cdbb9e3a46a71d213ee9cd10291bf | 1,452 | py | Python | similar_search/request_doc2vec.py | jphacks/B_2117 | f4fbf05576b02512c747c1c9c5328c7829027de2 | [
"MIT"
] | null | null | null | similar_search/request_doc2vec.py | jphacks/B_2117 | f4fbf05576b02512c747c1c9c5328c7829027de2 | [
"MIT"
] | null | null | null | similar_search/request_doc2vec.py | jphacks/B_2117 | f4fbf05576b02512c747c1c9c5328c7829027de2 | [
"MIT"
] | null | null | null | import json
from gensim.models.doc2vec import Doc2Vec
import numpy as np
#クエリID
query_id = '11610212' #ここに検索する対象の講義のIDを入れて
#返り値
score_list = [] #類似スコア
similar_class_id_list = [] #類似講義
#WMD用のデータjsonファイル読み込み
with open('data/div_corpus_data.json', mode='r') as f:
data = json.load(f)
en_data = data['en']
ja_data = data['ja']
en_data_keys = en_data.keys()
ja_data_keys = ja_data.keys()
#日本語講義か英語講義かチェック
if query_id in en_data_keys:
#英語講義
#Doc2vecモデル読み込み
en_doc2vec_model = Doc2Vec.load('model/en_doc2vec.model')
tmp_similar_list = en_doc2vec_model.docvecs.most_similar(query_id, topn=10) #類似講義上位100件
for similar_class in tmp_similar_list:
similar_class_id = similar_class[0]
score = similar_class[1]
similar_class_id_list.append(similar_class_id)
score_list.append(score)
elif query_id in ja_data_keys:
#日本語講義
#Doc2vecモデル読み込み
ja_doc2vec_model = Doc2Vec.load('model/ja_doc2vec.model')
tmp_similar_list = ja_doc2vec_model.docvecs.most_similar(query_id, topn=10) #類似講義上位100件
for similar_class in tmp_similar_list:
similar_class_id = similar_class[0]
score = similar_class[1]
similar_class_id_list.append(similar_class_id)
score_list.append(score)
else:
#エラー該当講義なし
pass
# 確認用
print(similar_class_id_list)
print(score_list)
#返り値
# similar_class_id_list : 類似講義上位10件の講義ID(0番目から順に最も類似している)
# score_list : 類似講義上位10件の類似度(1が最も良い(たぶん)) | 25.473684 | 91 | 0.739669 |
aceacf1678fb3c3ee99310a9cb03bcf18df889fc | 1,299 | py | Python | release/stubs.min/System/Windows/Controls/Primitives_parts/ItemsChangedEventArgs.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 182 | 2017-06-27T02:26:15.000Z | 2022-03-30T18:53:43.000Z | release/stubs.min/System/Windows/Controls/Primitives_parts/ItemsChangedEventArgs.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 28 | 2017-06-27T13:38:23.000Z | 2022-03-15T11:19:44.000Z | release/stubs.min/System/Windows/Controls/Primitives_parts/ItemsChangedEventArgs.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 67 | 2017-06-28T09:43:59.000Z | 2022-03-20T21:17:10.000Z | class ItemsChangedEventArgs(EventArgs):
""" Provides data for the System.Windows.Controls.ItemContainerGenerator.ItemsChanged event. """
Action=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the action that occurred on the items collection.
Get: Action(self: ItemsChangedEventArgs) -> NotifyCollectionChangedAction
"""
ItemCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of items that were involved in the change.
Get: ItemCount(self: ItemsChangedEventArgs) -> int
"""
ItemUICount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of user interface (UI) elements involved in the change.
Get: ItemUICount(self: ItemsChangedEventArgs) -> int
"""
OldPosition=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the position in the collection before the change occurred.
Get: OldPosition(self: ItemsChangedEventArgs) -> GeneratorPosition
"""
Position=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the position in the collection where the change occurred.
Get: Position(self: ItemsChangedEventArgs) -> GeneratorPosition
"""
| 22.016949 | 98 | 0.716705 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.